1 //===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 #include "stack_depot.h"
11 #include "tests/scudo_unit_test.h"
13 #include "allocator_config.h"
16 #include "condition_variable.h"
18 #include "size_class_map.h"
21 #include <condition_variable>
29 static constexpr scudo::Chunk::Origin Origin
= scudo::Chunk::Origin::Malloc
;
30 static constexpr scudo::uptr MinAlignLog
= FIRST_32_SECOND_64(3U, 4U);
32 // Fuchsia complains that the function is not used.
33 UNUSED
static void disableDebuggerdMaybe() {
35 // Disable the debuggerd signal handler on Android, without this we can end
36 // up spending a significant amount of time creating tombstones.
37 signal(SIGSEGV
, SIG_DFL
);
41 template <class AllocatorT
>
42 bool isPrimaryAllocation(scudo::uptr Size
, scudo::uptr Alignment
) {
43 const scudo::uptr MinAlignment
= 1UL << SCUDO_MIN_ALIGNMENT_LOG
;
44 if (Alignment
< MinAlignment
)
45 Alignment
= MinAlignment
;
46 const scudo::uptr NeededSize
=
47 scudo::roundUp(Size
, MinAlignment
) +
48 ((Alignment
> MinAlignment
) ? Alignment
: scudo::Chunk::getHeaderSize());
49 return AllocatorT::PrimaryT::canAllocate(NeededSize
);
52 template <class AllocatorT
>
53 void checkMemoryTaggingMaybe(AllocatorT
*Allocator
, void *P
, scudo::uptr Size
,
54 scudo::uptr Alignment
) {
55 const scudo::uptr MinAlignment
= 1UL << SCUDO_MIN_ALIGNMENT_LOG
;
56 Size
= scudo::roundUp(Size
, MinAlignment
);
57 if (Allocator
->useMemoryTaggingTestOnly())
60 disableDebuggerdMaybe();
61 reinterpret_cast<char *>(P
)[-1] = 'A';
64 if (isPrimaryAllocation
<AllocatorT
>(Size
, Alignment
)
65 ? Allocator
->useMemoryTaggingTestOnly()
66 : Alignment
== MinAlignment
) {
69 disableDebuggerdMaybe();
70 reinterpret_cast<char *>(P
)[Size
] = 'A';
76 template <typename Config
> struct TestAllocator
: scudo::Allocator
<Config
> {
78 this->initThreadMaybe();
79 if (scudo::archSupportsMemoryTagging() &&
80 !scudo::systemDetectsMemoryTagFaultsTestOnly())
81 this->disableMemoryTagging();
83 ~TestAllocator() { this->unmapTestOnly(); }
85 void *operator new(size_t size
);
86 void operator delete(void *ptr
);
89 constexpr size_t kMaxAlign
= std::max({
90 alignof(scudo::Allocator
<scudo::DefaultConfig
>),
91 #if SCUDO_CAN_USE_PRIMARY64
92 alignof(scudo::Allocator
<scudo::FuchsiaConfig
>),
94 alignof(scudo::Allocator
<scudo::AndroidConfig
>)
98 // The allocator is over 4MB large. Rather than creating an instance of this on
99 // the heap, keep it in a global storage to reduce fragmentation from having to
100 // mmap this at the start of every test.
101 struct TestAllocatorStorage
{
102 static constexpr size_t kMaxSize
= std::max({
103 sizeof(scudo::Allocator
<scudo::DefaultConfig
>),
104 #if SCUDO_CAN_USE_PRIMARY64
105 sizeof(scudo::Allocator
<scudo::FuchsiaConfig
>),
107 sizeof(scudo::Allocator
<scudo::AndroidConfig
>)
110 // To alleviate some problem, let's skip the thread safety analysis here.
111 static void *get(size_t size
) NO_THREAD_SAFETY_ANALYSIS
{
112 CHECK(size
<= kMaxSize
&&
113 "Allocation size doesn't fit in the allocator storage");
115 return AllocatorStorage
;
118 static void release(void *ptr
) NO_THREAD_SAFETY_ANALYSIS
{
121 ASSERT_EQ(ptr
, AllocatorStorage
);
124 static scudo::HybridMutex M
;
125 static uint8_t AllocatorStorage
[kMaxSize
];
127 scudo::HybridMutex
TestAllocatorStorage::M
;
128 alignas(kMaxAlign
) uint8_t TestAllocatorStorage::AllocatorStorage
[kMaxSize
];
130 struct TestAllocatorStorage
{
131 static void *get(size_t size
) NO_THREAD_SAFETY_ANALYSIS
{
133 EXPECT_EQ(0, posix_memalign(&p
, kMaxAlign
, size
));
136 static void release(void *ptr
) NO_THREAD_SAFETY_ANALYSIS
{ free(ptr
); }
140 template <typename Config
>
141 void *TestAllocator
<Config
>::operator new(size_t size
) {
142 return TestAllocatorStorage::get(size
);
145 template <typename Config
>
146 void TestAllocator
<Config
>::operator delete(void *ptr
) {
147 TestAllocatorStorage::release(ptr
);
150 template <class TypeParam
> struct ScudoCombinedTest
: public Test
{
151 ScudoCombinedTest() {
152 UseQuarantine
= std::is_same
<TypeParam
, scudo::AndroidConfig
>::value
;
153 Allocator
= std::make_unique
<AllocatorT
>();
155 ~ScudoCombinedTest() {
156 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
157 UseQuarantine
= true;
162 void BasicTest(scudo::uptr SizeLog
);
164 using AllocatorT
= TestAllocator
<TypeParam
>;
165 std::unique_ptr
<AllocatorT
> Allocator
;
168 template <typename T
> using ScudoCombinedDeathTest
= ScudoCombinedTest
<T
>;
171 struct TestConditionVariableConfig
{
172 static const bool MaySupportMemoryTagging
= true;
175 scudo::TSDRegistrySharedT
<A
, 8U, 4U>; // Shared, max 8 TSDs.
178 using SizeClassMap
= scudo::AndroidSizeClassMap
;
179 #if SCUDO_CAN_USE_PRIMARY64
180 static const scudo::uptr RegionSizeLog
= 28U;
181 typedef scudo::u32 CompactPtrT
;
182 static const scudo::uptr CompactPtrScale
= SCUDO_MIN_ALIGNMENT_LOG
;
183 static const scudo::uptr GroupSizeLog
= 20U;
184 static const bool EnableRandomOffset
= true;
185 static const scudo::uptr MapSizeIncrement
= 1UL << 18;
187 static const scudo::uptr RegionSizeLog
= 18U;
188 static const scudo::uptr GroupSizeLog
= 18U;
189 typedef scudo::uptr CompactPtrT
;
191 static const scudo::s32 MinReleaseToOsIntervalMs
= 1000;
192 static const scudo::s32 MaxReleaseToOsIntervalMs
= 1000;
194 using ConditionVariableT
= scudo::ConditionVariableLinux
;
196 using ConditionVariableT
= scudo::ConditionVariableDummy
;
199 #if SCUDO_CAN_USE_PRIMARY64
200 template <typename Config
>
201 using PrimaryT
= scudo::SizeClassAllocator64
<Config
>;
203 template <typename Config
>
204 using PrimaryT
= scudo::SizeClassAllocator32
<Config
>;
208 template <typename Config
>
209 using CacheT
= scudo::MapAllocatorNoCache
<Config
>;
211 template <typename Config
> using SecondaryT
= scudo::MapAllocator
<Config
>;
216 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
217 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
219 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
220 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \
221 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig) \
222 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig)
225 #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
226 using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>; \
227 TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<scudo::TYPE>::Run(); }
229 #define SCUDO_TYPED_TEST(FIXTURE, NAME) \
230 template <class TypeParam> \
231 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
232 using BaseT = FIXTURE<TypeParam>; \
235 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
236 template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
238 // Accessing `TSD->getCache()` requires `TSD::Mutex` which isn't easy to test
239 // using thread-safety analysis. Alternatively, we verify the thread safety
240 // through a runtime check in ScopedTSD and mark the test body with
241 // NO_THREAD_SAFETY_ANALYSIS.
242 #define SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(FIXTURE, NAME) \
243 template <class TypeParam> \
244 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
245 using BaseT = FIXTURE<TypeParam>; \
246 void Run() NO_THREAD_SAFETY_ANALYSIS; \
248 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
249 template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
251 SCUDO_TYPED_TEST(ScudoCombinedTest
, IsOwned
) {
252 auto *Allocator
= this->Allocator
.get();
253 static scudo::u8 StaticBuffer
[scudo::Chunk::getHeaderSize() + 1];
255 Allocator
->isOwned(&StaticBuffer
[scudo::Chunk::getHeaderSize()]));
257 scudo::u8 StackBuffer
[scudo::Chunk::getHeaderSize() + 1];
258 for (scudo::uptr I
= 0; I
< sizeof(StackBuffer
); I
++)
259 StackBuffer
[I
] = 0x42U
;
260 EXPECT_FALSE(Allocator
->isOwned(&StackBuffer
[scudo::Chunk::getHeaderSize()]));
261 for (scudo::uptr I
= 0; I
< sizeof(StackBuffer
); I
++)
262 EXPECT_EQ(StackBuffer
[I
], 0x42U
);
265 template <class Config
>
266 void ScudoCombinedTest
<Config
>::BasicTest(scudo::uptr SizeLog
) {
267 auto *Allocator
= this->Allocator
.get();
269 // This allocates and deallocates a bunch of chunks, with a wide range of
270 // sizes and alignments, with a focus on sizes that could trigger weird
271 // behaviors (plus or minus a small delta of a power of two for example).
272 for (scudo::uptr AlignLog
= MinAlignLog
; AlignLog
<= 16U; AlignLog
++) {
273 const scudo::uptr Align
= 1U << AlignLog
;
274 for (scudo::sptr Delta
= -32; Delta
<= 32; Delta
++) {
275 if ((1LL << SizeLog
) + Delta
< 0)
277 const scudo::uptr Size
=
278 static_cast<scudo::uptr
>((1LL << SizeLog
) + Delta
);
279 void *P
= Allocator
->allocate(Size
, Origin
, Align
);
280 EXPECT_NE(P
, nullptr);
281 EXPECT_TRUE(Allocator
->isOwned(P
));
282 EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr
>(P
), Align
));
283 EXPECT_LE(Size
, Allocator
->getUsableSize(P
));
284 memset(P
, 0xaa, Size
);
285 checkMemoryTaggingMaybe(Allocator
, P
, Size
, Align
);
286 Allocator
->deallocate(P
, Origin
, Size
);
290 Allocator
->printStats();
291 Allocator
->printFragmentationInfo();
294 #define SCUDO_MAKE_BASIC_TEST(SizeLog) \
295 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, BasicCombined##SizeLog) { \
296 this->BasicTest(SizeLog); \
299 SCUDO_MAKE_BASIC_TEST(0)
300 SCUDO_MAKE_BASIC_TEST(1)
301 SCUDO_MAKE_BASIC_TEST(2)
302 SCUDO_MAKE_BASIC_TEST(3)
303 SCUDO_MAKE_BASIC_TEST(4)
304 SCUDO_MAKE_BASIC_TEST(5)
305 SCUDO_MAKE_BASIC_TEST(6)
306 SCUDO_MAKE_BASIC_TEST(7)
307 SCUDO_MAKE_BASIC_TEST(8)
308 SCUDO_MAKE_BASIC_TEST(9)
309 SCUDO_MAKE_BASIC_TEST(10)
310 SCUDO_MAKE_BASIC_TEST(11)
311 SCUDO_MAKE_BASIC_TEST(12)
312 SCUDO_MAKE_BASIC_TEST(13)
313 SCUDO_MAKE_BASIC_TEST(14)
314 SCUDO_MAKE_BASIC_TEST(15)
315 SCUDO_MAKE_BASIC_TEST(16)
316 SCUDO_MAKE_BASIC_TEST(17)
317 SCUDO_MAKE_BASIC_TEST(18)
318 SCUDO_MAKE_BASIC_TEST(19)
319 SCUDO_MAKE_BASIC_TEST(20)
321 SCUDO_TYPED_TEST(ScudoCombinedTest
, ZeroContents
) {
322 auto *Allocator
= this->Allocator
.get();
324 // Ensure that specifying ZeroContents returns a zero'd out block.
325 for (scudo::uptr SizeLog
= 0U; SizeLog
<= 20U; SizeLog
++) {
326 for (scudo::uptr Delta
= 0U; Delta
<= 4U; Delta
++) {
327 const scudo::uptr Size
= (1U << SizeLog
) + Delta
* 128U;
328 void *P
= Allocator
->allocate(Size
, Origin
, 1U << MinAlignLog
, true);
329 EXPECT_NE(P
, nullptr);
330 for (scudo::uptr I
= 0; I
< Size
; I
++)
331 ASSERT_EQ((reinterpret_cast<char *>(P
))[I
], '\0');
332 memset(P
, 0xaa, Size
);
333 Allocator
->deallocate(P
, Origin
, Size
);
338 SCUDO_TYPED_TEST(ScudoCombinedTest
, ZeroFill
) {
339 auto *Allocator
= this->Allocator
.get();
341 // Ensure that specifying ZeroFill returns a zero'd out block.
342 Allocator
->setFillContents(scudo::ZeroFill
);
343 for (scudo::uptr SizeLog
= 0U; SizeLog
<= 20U; SizeLog
++) {
344 for (scudo::uptr Delta
= 0U; Delta
<= 4U; Delta
++) {
345 const scudo::uptr Size
= (1U << SizeLog
) + Delta
* 128U;
346 void *P
= Allocator
->allocate(Size
, Origin
, 1U << MinAlignLog
, false);
347 EXPECT_NE(P
, nullptr);
348 for (scudo::uptr I
= 0; I
< Size
; I
++)
349 ASSERT_EQ((reinterpret_cast<char *>(P
))[I
], '\0');
350 memset(P
, 0xaa, Size
);
351 Allocator
->deallocate(P
, Origin
, Size
);
356 SCUDO_TYPED_TEST(ScudoCombinedTest
, PatternOrZeroFill
) {
357 auto *Allocator
= this->Allocator
.get();
359 // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
360 // block. The primary allocator only produces pattern filled blocks if MTE
361 // is disabled, so we only require pattern filled blocks in that case.
362 Allocator
->setFillContents(scudo::PatternOrZeroFill
);
363 for (scudo::uptr SizeLog
= 0U; SizeLog
<= 20U; SizeLog
++) {
364 for (scudo::uptr Delta
= 0U; Delta
<= 4U; Delta
++) {
365 const scudo::uptr Size
= (1U << SizeLog
) + Delta
* 128U;
366 void *P
= Allocator
->allocate(Size
, Origin
, 1U << MinAlignLog
, false);
367 EXPECT_NE(P
, nullptr);
368 for (scudo::uptr I
= 0; I
< Size
; I
++) {
369 unsigned char V
= (reinterpret_cast<unsigned char *>(P
))[I
];
370 if (isPrimaryAllocation
<TestAllocator
<TypeParam
>>(Size
,
371 1U << MinAlignLog
) &&
372 !Allocator
->useMemoryTaggingTestOnly())
373 ASSERT_EQ(V
, scudo::PatternFillByte
);
375 ASSERT_TRUE(V
== scudo::PatternFillByte
|| V
== 0);
377 memset(P
, 0xaa, Size
);
378 Allocator
->deallocate(P
, Origin
, Size
);
383 SCUDO_TYPED_TEST(ScudoCombinedTest
, BlockReuse
) {
384 auto *Allocator
= this->Allocator
.get();
386 // Verify that a chunk will end up being reused, at some point.
387 const scudo::uptr NeedleSize
= 1024U;
388 void *NeedleP
= Allocator
->allocate(NeedleSize
, Origin
);
389 Allocator
->deallocate(NeedleP
, Origin
);
391 for (scudo::uptr I
= 0; I
< 1024U && !Found
; I
++) {
392 void *P
= Allocator
->allocate(NeedleSize
, Origin
);
393 if (Allocator
->getHeaderTaggedPointer(P
) ==
394 Allocator
->getHeaderTaggedPointer(NeedleP
))
396 Allocator
->deallocate(P
, Origin
);
401 SCUDO_TYPED_TEST(ScudoCombinedTest
, ReallocateLargeIncreasing
) {
402 auto *Allocator
= this->Allocator
.get();
404 // Reallocate a chunk all the way up to a secondary allocation, verifying that
405 // we preserve the data in the process.
406 scudo::uptr Size
= 16;
407 void *P
= Allocator
->allocate(Size
, Origin
);
408 const char Marker
= 'A';
409 memset(P
, Marker
, Size
);
410 while (Size
< TypeParam::Primary::SizeClassMap::MaxSize
* 4) {
411 void *NewP
= Allocator
->reallocate(P
, Size
* 2);
412 EXPECT_NE(NewP
, nullptr);
413 for (scudo::uptr J
= 0; J
< Size
; J
++)
414 EXPECT_EQ((reinterpret_cast<char *>(NewP
))[J
], Marker
);
415 memset(reinterpret_cast<char *>(NewP
) + Size
, Marker
, Size
);
419 Allocator
->deallocate(P
, Origin
);
422 SCUDO_TYPED_TEST(ScudoCombinedTest
, ReallocateLargeDecreasing
) {
423 auto *Allocator
= this->Allocator
.get();
425 // Reallocate a large chunk all the way down to a byte, verifying that we
426 // preserve the data in the process.
427 scudo::uptr Size
= TypeParam::Primary::SizeClassMap::MaxSize
* 2;
428 const scudo::uptr DataSize
= 2048U;
429 void *P
= Allocator
->allocate(Size
, Origin
);
430 const char Marker
= 'A';
431 memset(P
, Marker
, scudo::Min(Size
, DataSize
));
434 void *NewP
= Allocator
->reallocate(P
, Size
);
435 EXPECT_NE(NewP
, nullptr);
436 for (scudo::uptr J
= 0; J
< scudo::Min(Size
, DataSize
); J
++)
437 EXPECT_EQ((reinterpret_cast<char *>(NewP
))[J
], Marker
);
440 Allocator
->deallocate(P
, Origin
);
443 SCUDO_TYPED_TEST(ScudoCombinedDeathTest
, ReallocateSame
) {
444 auto *Allocator
= this->Allocator
.get();
446 // Check that reallocating a chunk to a slightly smaller or larger size
447 // returns the same chunk. This requires that all the sizes we iterate on use
448 // the same block size, but that should be the case for MaxSize - 64 with our
449 // default class size maps.
450 constexpr scudo::uptr InitialSize
=
451 TypeParam::Primary::SizeClassMap::MaxSize
- 64;
452 const char Marker
= 'A';
453 Allocator
->setFillContents(scudo::PatternOrZeroFill
);
455 void *P
= Allocator
->allocate(InitialSize
, Origin
);
456 scudo::uptr CurrentSize
= InitialSize
;
457 for (scudo::sptr Delta
= -32; Delta
< 32; Delta
+= 8) {
458 memset(P
, Marker
, CurrentSize
);
459 const scudo::uptr NewSize
=
460 static_cast<scudo::uptr
>(static_cast<scudo::sptr
>(InitialSize
) + Delta
);
461 void *NewP
= Allocator
->reallocate(P
, NewSize
);
464 // Verify that existing contents have been preserved.
465 for (scudo::uptr I
= 0; I
< scudo::Min(CurrentSize
, NewSize
); I
++)
466 EXPECT_EQ((reinterpret_cast<char *>(NewP
))[I
], Marker
);
468 // Verify that new bytes are set according to FillContentsMode.
469 for (scudo::uptr I
= CurrentSize
; I
< NewSize
; I
++) {
470 unsigned char V
= (reinterpret_cast<unsigned char *>(NewP
))[I
];
471 EXPECT_TRUE(V
== scudo::PatternFillByte
|| V
== 0);
474 checkMemoryTaggingMaybe(Allocator
, NewP
, NewSize
, 0);
475 CurrentSize
= NewSize
;
477 Allocator
->deallocate(P
, Origin
);
480 SCUDO_TYPED_TEST(ScudoCombinedTest
, IterateOverChunks
) {
481 auto *Allocator
= this->Allocator
.get();
482 // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
483 // they are the ones we allocated. This requires the allocator to not have any
484 // other allocated chunk at this point (eg: won't work with the Quarantine).
485 // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
486 // iterateOverChunks reads header by tagged and non-tagger pointers so one of
488 if (!UseQuarantine
) {
489 std::vector
<void *> V
;
490 for (scudo::uptr I
= 0; I
< 64U; I
++)
491 V
.push_back(Allocator
->allocate(
492 static_cast<scudo::uptr
>(std::rand()) %
493 (TypeParam::Primary::SizeClassMap::MaxSize
/ 2U),
495 Allocator
->disable();
496 Allocator
->iterateOverChunks(
497 0U, static_cast<scudo::uptr
>(SCUDO_MMAP_RANGE_SIZE
- 1),
498 [](uintptr_t Base
, UNUSED
size_t Size
, void *Arg
) {
499 std::vector
<void *> *V
= reinterpret_cast<std::vector
<void *> *>(Arg
);
500 void *P
= reinterpret_cast<void *>(Base
);
501 EXPECT_NE(std::find(V
->begin(), V
->end(), P
), V
->end());
503 reinterpret_cast<void *>(&V
));
506 Allocator
->deallocate(P
, Origin
);
510 SCUDO_TYPED_TEST(ScudoCombinedDeathTest
, UseAfterFree
) {
511 auto *Allocator
= this->Allocator
.get();
513 // Check that use-after-free is detected.
514 for (scudo::uptr SizeLog
= 0U; SizeLog
<= 20U; SizeLog
++) {
515 const scudo::uptr Size
= 1U << SizeLog
;
516 if (!Allocator
->useMemoryTaggingTestOnly())
520 disableDebuggerdMaybe();
521 void *P
= Allocator
->allocate(Size
, Origin
);
522 Allocator
->deallocate(P
, Origin
);
523 reinterpret_cast<char *>(P
)[0] = 'A';
528 disableDebuggerdMaybe();
529 void *P
= Allocator
->allocate(Size
, Origin
);
530 Allocator
->deallocate(P
, Origin
);
531 reinterpret_cast<char *>(P
)[Size
- 1] = 'A';
537 SCUDO_TYPED_TEST(ScudoCombinedDeathTest
, DoubleFreeFromPrimary
) {
538 auto *Allocator
= this->Allocator
.get();
540 for (scudo::uptr SizeLog
= 0U; SizeLog
<= 20U; SizeLog
++) {
541 const scudo::uptr Size
= 1U << SizeLog
;
542 if (!isPrimaryAllocation
<TestAllocator
<TypeParam
>>(Size
, 0))
545 // Verify that a double free results in a chunk state error.
548 // Allocate from primary
549 void *P
= Allocator
->allocate(Size
, Origin
);
550 ASSERT_TRUE(P
!= nullptr);
551 Allocator
->deallocate(P
, Origin
);
552 Allocator
->deallocate(P
, Origin
);
554 "invalid chunk state");
558 SCUDO_TYPED_TEST(ScudoCombinedDeathTest
, DisableMemoryTagging
) {
559 auto *Allocator
= this->Allocator
.get();
561 if (Allocator
->useMemoryTaggingTestOnly()) {
562 // Check that disabling memory tagging works correctly.
563 void *P
= Allocator
->allocate(2048, Origin
);
564 EXPECT_DEATH(reinterpret_cast<char *>(P
)[2048] = 'A', "");
565 scudo::ScopedDisableMemoryTagChecks NoTagChecks
;
566 Allocator
->disableMemoryTagging();
567 reinterpret_cast<char *>(P
)[2048] = 'A';
568 Allocator
->deallocate(P
, Origin
);
570 P
= Allocator
->allocate(2048, Origin
);
571 EXPECT_EQ(scudo::untagPointer(P
), P
);
572 reinterpret_cast<char *>(P
)[2048] = 'A';
573 Allocator
->deallocate(P
, Origin
);
575 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
579 SCUDO_TYPED_TEST(ScudoCombinedTest
, Stats
) {
580 auto *Allocator
= this->Allocator
.get();
582 scudo::uptr BufferSize
= 8192;
583 std::vector
<char> Buffer(BufferSize
);
584 scudo::uptr ActualSize
= Allocator
->getStats(Buffer
.data(), BufferSize
);
585 while (ActualSize
> BufferSize
) {
586 BufferSize
= ActualSize
+ 1024;
587 Buffer
.resize(BufferSize
);
588 ActualSize
= Allocator
->getStats(Buffer
.data(), BufferSize
);
590 std::string
Stats(Buffer
.begin(), Buffer
.end());
591 // Basic checks on the contents of the statistics output, which also allows us
592 // to verify that we got it all.
593 EXPECT_NE(Stats
.find("Stats: SizeClassAllocator"), std::string::npos
);
594 EXPECT_NE(Stats
.find("Stats: MapAllocator"), std::string::npos
);
595 EXPECT_NE(Stats
.find("Stats: Quarantine"), std::string::npos
);
598 SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest
, CacheDrain
) {
599 using AllocatorT
= typename
BaseT::AllocatorT
;
600 auto *Allocator
= this->Allocator
.get();
602 std::vector
<void *> V
;
603 for (scudo::uptr I
= 0; I
< 64U; I
++)
604 V
.push_back(Allocator
->allocate(
605 static_cast<scudo::uptr
>(std::rand()) %
606 (TypeParam::Primary::SizeClassMap::MaxSize
/ 2U),
609 Allocator
->deallocate(P
, Origin
);
611 typename
AllocatorT::TSDRegistryT::ScopedTSD
TSD(
612 *Allocator
->getTSDRegistry());
613 EXPECT_TRUE(!TSD
->getCache().isEmpty());
614 TSD
->getCache().drain();
615 EXPECT_TRUE(TSD
->getCache().isEmpty());
618 SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest
, ForceCacheDrain
) {
619 using AllocatorT
= typename
BaseT::AllocatorT
;
620 auto *Allocator
= this->Allocator
.get();
622 std::vector
<void *> V
;
623 for (scudo::uptr I
= 0; I
< 64U; I
++)
624 V
.push_back(Allocator
->allocate(
625 static_cast<scudo::uptr
>(std::rand()) %
626 (TypeParam::Primary::SizeClassMap::MaxSize
/ 2U),
629 Allocator
->deallocate(P
, Origin
);
631 // `ForceAll` will also drain the caches.
632 Allocator
->releaseToOS(scudo::ReleaseToOS::ForceAll
);
634 typename
AllocatorT::TSDRegistryT::ScopedTSD
TSD(
635 *Allocator
->getTSDRegistry());
636 EXPECT_TRUE(TSD
->getCache().isEmpty());
637 EXPECT_EQ(TSD
->getQuarantineCache().getSize(), 0U);
638 EXPECT_TRUE(Allocator
->getQuarantine()->isEmpty());
641 SCUDO_TYPED_TEST(ScudoCombinedTest
, ThreadedCombined
) {
643 std::condition_variable Cv
;
645 auto *Allocator
= this->Allocator
.get();
646 std::thread Threads
[32];
647 for (scudo::uptr I
= 0; I
< ARRAY_SIZE(Threads
); I
++)
648 Threads
[I
] = std::thread([&]() {
650 std::unique_lock
<std::mutex
> Lock(Mutex
);
654 std::vector
<std::pair
<void *, scudo::uptr
>> V
;
655 for (scudo::uptr I
= 0; I
< 256U; I
++) {
656 const scudo::uptr Size
= static_cast<scudo::uptr
>(std::rand()) % 4096U;
657 void *P
= Allocator
->allocate(Size
, Origin
);
658 // A region could have ran out of memory, resulting in a null P.
660 V
.push_back(std::make_pair(P
, Size
));
663 // Try to interleave pushBlocks(), popBatch() and releaseToOS().
664 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
667 auto Pair
= V
.back();
668 Allocator
->deallocate(Pair
.first
, Origin
, Pair
.second
);
673 std::unique_lock
<std::mutex
> Lock(Mutex
);
677 for (auto &T
: Threads
)
679 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
682 // Test that multiple instantiations of the allocator have not messed up the
683 // process's signal handlers (GWP-ASan used to do this).
684 TEST(ScudoCombinedDeathTest
, SKIP_ON_FUCHSIA(testSEGV
)) {
685 const scudo::uptr Size
= 4 * scudo::getPageSizeCached();
686 scudo::ReservedMemoryT ReservedMemory
;
687 ASSERT_TRUE(ReservedMemory
.create(/*Addr=*/0U, Size
, "testSEGV"));
688 void *P
= reinterpret_cast<void *>(ReservedMemory
.getBase());
689 ASSERT_NE(P
, nullptr);
690 EXPECT_DEATH(memset(P
, 0xaa, Size
), "");
691 ReservedMemory
.release();
694 struct DeathSizeClassConfig
{
695 static const scudo::uptr NumBits
= 1;
696 static const scudo::uptr MinSizeLog
= 10;
697 static const scudo::uptr MidSizeLog
= 10;
698 static const scudo::uptr MaxSizeLog
= 13;
699 static const scudo::u16 MaxNumCachedHint
= 8;
700 static const scudo::uptr MaxBytesCachedLog
= 12;
701 static const scudo::uptr SizeDelta
= 0;
704 static const scudo::uptr DeathRegionSizeLog
= 21U;
706 static const bool MaySupportMemoryTagging
= false;
707 template <class A
> using TSDRegistryT
= scudo::TSDRegistrySharedT
<A
, 1U, 1U>;
710 // Tiny allocator, its Primary only serves chunks of four sizes.
711 using SizeClassMap
= scudo::FixedSizeClassMap
<DeathSizeClassConfig
>;
712 static const scudo::uptr RegionSizeLog
= DeathRegionSizeLog
;
713 static const scudo::s32 MinReleaseToOsIntervalMs
= INT32_MIN
;
714 static const scudo::s32 MaxReleaseToOsIntervalMs
= INT32_MAX
;
715 typedef scudo::uptr CompactPtrT
;
716 static const scudo::uptr CompactPtrScale
= 0;
717 static const bool EnableRandomOffset
= true;
718 static const scudo::uptr MapSizeIncrement
= 1UL << 18;
719 static const scudo::uptr GroupSizeLog
= 18;
721 template <typename Config
>
722 using PrimaryT
= scudo::SizeClassAllocator64
<Config
>;
725 template <typename Config
>
726 using CacheT
= scudo::MapAllocatorNoCache
<Config
>;
729 template <typename Config
> using SecondaryT
= scudo::MapAllocator
<Config
>;
732 TEST(ScudoCombinedDeathTest
, DeathCombined
) {
733 using AllocatorT
= TestAllocator
<DeathConfig
>;
734 auto Allocator
= std::unique_ptr
<AllocatorT
>(new AllocatorT());
736 const scudo::uptr Size
= 1000U;
737 void *P
= Allocator
->allocate(Size
, Origin
);
738 EXPECT_NE(P
, nullptr);
740 // Invalid sized deallocation.
741 EXPECT_DEATH(Allocator
->deallocate(P
, Origin
, Size
+ 8U), "");
743 // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
744 UNUSED
void *MisalignedP
=
745 reinterpret_cast<void *>(reinterpret_cast<scudo::uptr
>(P
) | 1U);
746 EXPECT_DEATH(Allocator
->deallocate(MisalignedP
, Origin
, Size
), "");
747 EXPECT_DEATH(Allocator
->reallocate(MisalignedP
, Size
* 2U), "");
749 // Header corruption.
751 reinterpret_cast<scudo::u64
*>(scudo::Chunk::getAtomicHeader(P
));
753 EXPECT_DEATH(Allocator
->deallocate(P
, Origin
, Size
), "");
755 EXPECT_DEATH(Allocator
->deallocate(P
, Origin
, Size
), "");
758 // Invalid chunk state.
759 Allocator
->deallocate(P
, Origin
, Size
);
760 EXPECT_DEATH(Allocator
->deallocate(P
, Origin
, Size
), "");
761 EXPECT_DEATH(Allocator
->reallocate(P
, Size
* 2U), "");
762 EXPECT_DEATH(Allocator
->getUsableSize(P
), "");
765 // Verify that when a region gets full, the allocator will still manage to
766 // fulfill the allocation through a larger size class.
767 TEST(ScudoCombinedTest
, FullRegion
) {
768 using AllocatorT
= TestAllocator
<DeathConfig
>;
769 auto Allocator
= std::unique_ptr
<AllocatorT
>(new AllocatorT());
771 std::vector
<void *> V
;
772 scudo::uptr FailedAllocationsCount
= 0;
773 for (scudo::uptr ClassId
= 1U;
774 ClassId
<= DeathConfig::Primary::SizeClassMap::LargestClassId
;
776 const scudo::uptr Size
=
777 DeathConfig::Primary::SizeClassMap::getSizeByClassId(ClassId
);
778 // Allocate enough to fill all of the regions above this one.
779 const scudo::uptr MaxNumberOfChunks
=
780 ((1U << DeathRegionSizeLog
) / Size
) *
781 (DeathConfig::Primary::SizeClassMap::LargestClassId
- ClassId
+ 1);
783 for (scudo::uptr I
= 0; I
<= MaxNumberOfChunks
; I
++) {
784 P
= Allocator
->allocate(Size
- 64U, Origin
);
786 FailedAllocationsCount
++;
791 Allocator
->deallocate(V
.back(), Origin
);
795 EXPECT_EQ(FailedAllocationsCount
, 0U);
798 // Ensure that releaseToOS can be called prior to any other allocator
799 // operation without issue.
800 SCUDO_TYPED_TEST(ScudoCombinedTest
, ReleaseToOS
) {
801 auto *Allocator
= this->Allocator
.get();
802 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
805 SCUDO_TYPED_TEST(ScudoCombinedTest
, OddEven
) {
806 auto *Allocator
= this->Allocator
.get();
807 Allocator
->setOption(scudo::Option::MemtagTuning
, M_MEMTAG_TUNING_BUFFER_OVERFLOW
);
809 if (!Allocator
->useMemoryTaggingTestOnly())
812 auto CheckOddEven
= [](scudo::uptr P1
, scudo::uptr P2
) {
813 scudo::uptr Tag1
= scudo::extractTag(scudo::loadTag(P1
));
814 scudo::uptr Tag2
= scudo::extractTag(scudo::loadTag(P2
));
815 EXPECT_NE(Tag1
% 2, Tag2
% 2);
818 using SizeClassMap
= typename
TypeParam::Primary::SizeClassMap
;
819 for (scudo::uptr ClassId
= 1U; ClassId
<= SizeClassMap::LargestClassId
;
821 const scudo::uptr Size
= SizeClassMap::getSizeByClassId(ClassId
);
823 std::set
<scudo::uptr
> Ptrs
;
825 for (unsigned I
= 0; I
!= 65536; ++I
) {
826 scudo::uptr P
= scudo::untagPointer(reinterpret_cast<scudo::uptr
>(
827 Allocator
->allocate(Size
- scudo::Chunk::getHeaderSize(), Origin
)));
828 if (Ptrs
.count(P
- Size
)) {
830 CheckOddEven(P
, P
- Size
);
833 if (Ptrs
.count(P
+ Size
)) {
835 CheckOddEven(P
, P
+ Size
);
844 SCUDO_TYPED_TEST(ScudoCombinedTest
, DisableMemInit
) {
845 auto *Allocator
= this->Allocator
.get();
847 std::vector
<void *> Ptrs(65536);
849 Allocator
->setOption(scudo::Option::ThreadDisableMemInit
, 1);
851 constexpr scudo::uptr MinAlignLog
= FIRST_32_SECOND_64(3U, 4U);
853 // Test that if mem-init is disabled on a thread, calloc should still work as
854 // expected. This is tricky to ensure when MTE is enabled, so this test tries
855 // to exercise the relevant code on our MTE path.
856 for (scudo::uptr ClassId
= 1U; ClassId
<= 8; ClassId
++) {
857 using SizeClassMap
= typename
TypeParam::Primary::SizeClassMap
;
858 const scudo::uptr Size
=
859 SizeClassMap::getSizeByClassId(ClassId
) - scudo::Chunk::getHeaderSize();
862 for (unsigned I
= 0; I
!= Ptrs
.size(); ++I
) {
863 Ptrs
[I
] = Allocator
->allocate(Size
, Origin
);
864 memset(Ptrs
[I
], 0xaa, Size
);
866 for (unsigned I
= 0; I
!= Ptrs
.size(); ++I
)
867 Allocator
->deallocate(Ptrs
[I
], Origin
, Size
);
868 for (unsigned I
= 0; I
!= Ptrs
.size(); ++I
) {
869 Ptrs
[I
] = Allocator
->allocate(Size
- 8, Origin
);
870 memset(Ptrs
[I
], 0xbb, Size
- 8);
872 for (unsigned I
= 0; I
!= Ptrs
.size(); ++I
)
873 Allocator
->deallocate(Ptrs
[I
], Origin
, Size
- 8);
874 for (unsigned I
= 0; I
!= Ptrs
.size(); ++I
) {
875 Ptrs
[I
] = Allocator
->allocate(Size
, Origin
, 1U << MinAlignLog
, true);
876 for (scudo::uptr J
= 0; J
< Size
; ++J
)
877 ASSERT_EQ((reinterpret_cast<char *>(Ptrs
[I
]))[J
], '\0');
881 Allocator
->setOption(scudo::Option::ThreadDisableMemInit
, 0);
884 SCUDO_TYPED_TEST(ScudoCombinedTest
, ReallocateInPlaceStress
) {
885 auto *Allocator
= this->Allocator
.get();
887 // Regression test: make realloc-in-place happen at the very right end of a
889 constexpr size_t nPtrs
= 10000;
890 for (scudo::uptr i
= 1; i
< 32; ++i
) {
891 scudo::uptr Size
= 16 * i
- 1;
892 std::vector
<void *> Ptrs
;
893 for (size_t i
= 0; i
< nPtrs
; ++i
) {
894 void *P
= Allocator
->allocate(Size
, Origin
);
895 P
= Allocator
->reallocate(P
, Size
+ 1);
899 for (size_t i
= 0; i
< nPtrs
; ++i
)
900 Allocator
->deallocate(Ptrs
[i
], Origin
);
904 SCUDO_TYPED_TEST(ScudoCombinedTest
, RingBufferDefaultDisabled
) {
905 // The RingBuffer is not initialized until tracking is enabled for the
907 auto *Allocator
= this->Allocator
.get();
908 EXPECT_EQ(0u, Allocator
->getRingBufferSize());
909 EXPECT_EQ(nullptr, Allocator
->getRingBufferAddress());
912 SCUDO_TYPED_TEST(ScudoCombinedTest
, RingBufferInitOnce
) {
913 auto *Allocator
= this->Allocator
.get();
914 Allocator
->setTrackAllocationStacks(true);
916 auto RingBufferSize
= Allocator
->getRingBufferSize();
917 ASSERT_GT(RingBufferSize
, 0u);
918 auto *RingBufferAddress
= Allocator
->getRingBufferAddress();
919 EXPECT_NE(nullptr, RingBufferAddress
);
921 // Enable tracking again to verify that the initialization only happens once.
922 Allocator
->setTrackAllocationStacks(true);
923 ASSERT_EQ(RingBufferSize
, Allocator
->getRingBufferSize());
924 EXPECT_EQ(RingBufferAddress
, Allocator
->getRingBufferAddress());
927 SCUDO_TYPED_TEST(ScudoCombinedTest
, RingBufferSize
) {
928 auto *Allocator
= this->Allocator
.get();
929 Allocator
->setTrackAllocationStacks(true);
931 auto RingBufferSize
= Allocator
->getRingBufferSize();
932 ASSERT_GT(RingBufferSize
, 0u);
933 EXPECT_EQ(Allocator
->getRingBufferAddress()[RingBufferSize
- 1], '\0');
936 SCUDO_TYPED_TEST(ScudoCombinedTest
, RingBufferAddress
) {
937 auto *Allocator
= this->Allocator
.get();
938 Allocator
->setTrackAllocationStacks(true);
940 auto *RingBufferAddress
= Allocator
->getRingBufferAddress();
941 EXPECT_NE(RingBufferAddress
, nullptr);
942 EXPECT_EQ(RingBufferAddress
, Allocator
->getRingBufferAddress());
945 SCUDO_TYPED_TEST(ScudoCombinedTest
, StackDepotDefaultDisabled
) {
946 // The StackDepot is not initialized until tracking is enabled for the
948 auto *Allocator
= this->Allocator
.get();
949 EXPECT_EQ(0u, Allocator
->getStackDepotSize());
950 EXPECT_EQ(nullptr, Allocator
->getStackDepotAddress());
953 SCUDO_TYPED_TEST(ScudoCombinedTest
, StackDepotInitOnce
) {
954 auto *Allocator
= this->Allocator
.get();
955 Allocator
->setTrackAllocationStacks(true);
957 auto StackDepotSize
= Allocator
->getStackDepotSize();
958 EXPECT_GT(StackDepotSize
, 0u);
959 auto *StackDepotAddress
= Allocator
->getStackDepotAddress();
960 EXPECT_NE(nullptr, StackDepotAddress
);
962 // Enable tracking again to verify that the initialization only happens once.
963 Allocator
->setTrackAllocationStacks(true);
964 EXPECT_EQ(StackDepotSize
, Allocator
->getStackDepotSize());
965 EXPECT_EQ(StackDepotAddress
, Allocator
->getStackDepotAddress());
968 SCUDO_TYPED_TEST(ScudoCombinedTest
, StackDepotSize
) {
969 auto *Allocator
= this->Allocator
.get();
970 Allocator
->setTrackAllocationStacks(true);
972 auto StackDepotSize
= Allocator
->getStackDepotSize();
973 EXPECT_GT(StackDepotSize
, 0u);
974 EXPECT_EQ(Allocator
->getStackDepotAddress()[StackDepotSize
- 1], '\0');
977 SCUDO_TYPED_TEST(ScudoCombinedTest
, StackDepotAddress
) {
978 auto *Allocator
= this->Allocator
.get();
979 Allocator
->setTrackAllocationStacks(true);
981 auto *StackDepotAddress
= Allocator
->getStackDepotAddress();
982 EXPECT_NE(StackDepotAddress
, nullptr);
983 EXPECT_EQ(StackDepotAddress
, Allocator
->getStackDepotAddress());
986 SCUDO_TYPED_TEST(ScudoCombinedTest
, StackDepot
) {
987 alignas(scudo::StackDepot
) char Buf
[sizeof(scudo::StackDepot
) +
988 1024 * sizeof(scudo::atomic_u64
) +
989 1024 * sizeof(scudo::atomic_u32
)] = {};
990 auto *Depot
= reinterpret_cast<scudo::StackDepot
*>(Buf
);
991 Depot
->init(1024, 1024);
992 ASSERT_TRUE(Depot
->isValid(sizeof(Buf
)));
993 ASSERT_FALSE(Depot
->isValid(sizeof(Buf
) - 1));
994 scudo::uptr Stack
[] = {1, 2, 3};
995 scudo::u32 Elem
= Depot
->insert(&Stack
[0], &Stack
[3]);
996 scudo::uptr RingPosPtr
= 0;
997 scudo::uptr SizePtr
= 0;
998 ASSERT_TRUE(Depot
->find(Elem
, &RingPosPtr
, &SizePtr
));
999 ASSERT_EQ(SizePtr
, 3u);
1000 EXPECT_EQ(Depot
->at(RingPosPtr
), 1u);
1001 EXPECT_EQ(Depot
->at(RingPosPtr
+ 1), 2u);
1002 EXPECT_EQ(Depot
->at(RingPosPtr
+ 2), 3u);
1005 #if SCUDO_CAN_USE_PRIMARY64
1008 // TrustyConfig is designed for a domain-specific allocator. Add a basic test
1009 // which covers only simple operations and ensure the configuration is able to
1011 TEST(ScudoCombinedTest
, BasicTrustyConfig
) {
1012 using AllocatorT
= scudo::Allocator
<scudo::TrustyConfig
>;
1013 auto Allocator
= std::unique_ptr
<AllocatorT
>(new AllocatorT());
1015 for (scudo::uptr ClassId
= 1U;
1016 ClassId
<= scudo::TrustyConfig::SizeClassMap::LargestClassId
;
1018 const scudo::uptr Size
=
1019 scudo::TrustyConfig::SizeClassMap::getSizeByClassId(ClassId
);
1020 void *p
= Allocator
->allocate(Size
- scudo::Chunk::getHeaderSize(), Origin
);
1021 ASSERT_NE(p
, nullptr);
1025 bool UnlockRequired
;
1026 typename
AllocatorT::TSDRegistryT::ScopedTSD
TSD(
1027 *Allocator
->getTSDRegistry());
1028 TSD
->getCache().drain();
1030 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);