1 //===-- wrappers_c_test.cpp -------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 #include "scudo/interface.h"
12 #include "tests/scudo_unit_test.h"
21 #ifndef __GLIBC_PREREQ
22 #define __GLIBC_PREREQ(x, y) 0
26 // Fuchsia only has valloc
29 // Android only has pvalloc/valloc on 32 bit
30 #if !defined(__LP64__)
31 #define HAVE_PVALLOC 1
33 #endif // !defined(__LP64__)
35 // All others assumed to support both functions.
36 #define HAVE_PVALLOC 1
41 void malloc_enable(void);
42 void malloc_disable(void);
43 int malloc_iterate(uintptr_t base
, size_t size
,
44 void (*callback
)(uintptr_t base
, size_t size
, void *arg
),
46 void *valloc(size_t size
);
47 void *pvalloc(size_t size
);
49 #ifndef SCUDO_ENABLE_HOOKS_TESTS
50 #define SCUDO_ENABLE_HOOKS_TESTS 0
53 #if (SCUDO_ENABLE_HOOKS_TESTS == 1) && (SCUDO_ENABLE_HOOKS == 0)
54 #error "Hooks tests should have hooks enabled as well!"
61 struct DeallocContext
{
64 struct ReallocContext
{
69 static AllocContext AC
;
70 static DeallocContext DC
;
71 static ReallocContext RC
;
73 #if (SCUDO_ENABLE_HOOKS_TESTS == 1)
74 __attribute__((visibility("default"))) void __scudo_allocate_hook(void *Ptr
,
79 __attribute__((visibility("default"))) void __scudo_deallocate_hook(void *Ptr
) {
82 __attribute__((visibility("default"))) void
83 __scudo_realloc_allocate_hook(void *OldPtr
, void *NewPtr
, size_t Size
) {
84 // Verify that __scudo_realloc_deallocate_hook is called first and set the
86 EXPECT_EQ(OldPtr
, RC
.DeallocPtr
);
90 // Note that this is only used for testing. In general, only one pair of hooks
91 // will be invoked in `realloc`. if __scudo_realloc_*_hook are not defined,
92 // it'll call the general hooks only. To make the test easier, we call the
93 // general one here so that either case (whether __scudo_realloc_*_hook are
94 // defined) will be verified without separating them into different tests.
95 __scudo_allocate_hook(NewPtr
, Size
);
97 __attribute__((visibility("default"))) void
98 __scudo_realloc_deallocate_hook(void *Ptr
) {
101 // See the comment in the __scudo_realloc_allocate_hook above.
102 __scudo_deallocate_hook(Ptr
);
104 #endif // (SCUDO_ENABLE_HOOKS_TESTS == 1)
107 class ScudoWrappersCTest
: public Test
{
109 void SetUp() override
{
110 if (SCUDO_ENABLE_HOOKS
&& !SCUDO_ENABLE_HOOKS_TESTS
)
111 printf("Hooks are enabled but hooks tests are disabled.\n");
114 void invalidateHookPtrs() {
115 if (SCUDO_ENABLE_HOOKS_TESTS
) {
116 void *InvalidPtr
= reinterpret_cast<void *>(0xdeadbeef);
119 RC
.AllocPtr
= RC
.DeallocPtr
= InvalidPtr
;
122 void verifyAllocHookPtr(UNUSED
void *Ptr
) {
123 if (SCUDO_ENABLE_HOOKS_TESTS
)
124 EXPECT_EQ(Ptr
, AC
.Ptr
);
126 void verifyAllocHookSize(UNUSED
size_t Size
) {
127 if (SCUDO_ENABLE_HOOKS_TESTS
)
128 EXPECT_EQ(Size
, AC
.Size
);
130 void verifyDeallocHookPtr(UNUSED
void *Ptr
) {
131 if (SCUDO_ENABLE_HOOKS_TESTS
)
132 EXPECT_EQ(Ptr
, DC
.Ptr
);
134 void verifyReallocHookPtrs(UNUSED
void *OldPtr
, void *NewPtr
, size_t Size
) {
135 if (SCUDO_ENABLE_HOOKS_TESTS
) {
136 EXPECT_EQ(OldPtr
, RC
.DeallocPtr
);
137 EXPECT_EQ(NewPtr
, RC
.AllocPtr
);
138 EXPECT_EQ(Size
, RC
.Size
);
142 using ScudoWrappersCDeathTest
= ScudoWrappersCTest
;
144 // Note that every C allocation function in the test binary will be fulfilled
145 // by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
146 // But this might also lead to unexpected side-effects, since the allocation and
147 // deallocation operations in the TEST functions will coexist with others (see
148 // the EXPECT_DEATH comment below).
150 // We have to use a small quarantine to make sure that our double-free tests
151 // trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just
152 // freed (this depends on the size obviously) and the following free succeeds.
154 static const size_t Size
= 100U;
156 TEST_F(ScudoWrappersCDeathTest
, Malloc
) {
157 void *P
= malloc(Size
);
158 EXPECT_NE(P
, nullptr);
159 EXPECT_LE(Size
, malloc_usable_size(P
));
160 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) % FIRST_32_SECOND_64(8U, 16U), 0U);
161 verifyAllocHookPtr(P
);
162 verifyAllocHookSize(Size
);
164 // An update to this warning in Clang now triggers in this line, but it's ok
165 // because the check is expecting a bad pointer and should fail.
166 #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
167 #pragma GCC diagnostic push
168 #pragma GCC diagnostic ignored "-Wfree-nonheap-object"
171 free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(P
) | 1U)), "");
172 #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
173 #pragma GCC diagnostic pop
177 verifyDeallocHookPtr(P
);
178 EXPECT_DEATH(free(P
), "");
181 EXPECT_NE(P
, nullptr);
185 EXPECT_EQ(malloc(SIZE_MAX
), nullptr);
186 EXPECT_EQ(errno
, ENOMEM
);
189 TEST_F(ScudoWrappersCTest
, Calloc
) {
190 void *P
= calloc(1U, Size
);
191 EXPECT_NE(P
, nullptr);
192 EXPECT_LE(Size
, malloc_usable_size(P
));
193 verifyAllocHookPtr(P
);
194 verifyAllocHookSize(Size
);
195 for (size_t I
= 0; I
< Size
; I
++)
196 EXPECT_EQ((reinterpret_cast<uint8_t *>(P
))[I
], 0U);
198 verifyDeallocHookPtr(P
);
201 EXPECT_NE(P
, nullptr);
204 EXPECT_NE(P
, nullptr);
208 EXPECT_EQ(calloc(SIZE_MAX
, 1U), nullptr);
209 EXPECT_EQ(errno
, ENOMEM
);
211 EXPECT_EQ(calloc(static_cast<size_t>(LONG_MAX
) + 1U, 2U), nullptr);
213 EXPECT_EQ(errno
, ENOMEM
);
215 EXPECT_EQ(calloc(SIZE_MAX
, SIZE_MAX
), nullptr);
216 EXPECT_EQ(errno
, ENOMEM
);
219 TEST_F(ScudoWrappersCTest
, SmallAlign
) {
220 // Allocating pointers by the powers of 2 from 1 to 0x10000
221 // Using powers of 2 due to memalign using powers of 2 and test more sizes
222 constexpr size_t MaxSize
= 0x10000;
223 std::vector
<void *> ptrs
;
224 // Reserving space to prevent further allocation during the test
225 ptrs
.reserve((scudo::getLeastSignificantSetBitIndex(MaxSize
) + 1) *
226 (scudo::getLeastSignificantSetBitIndex(MaxSize
) + 1) * 3);
227 for (size_t Size
= 1; Size
<= MaxSize
; Size
<<= 1) {
228 for (size_t Align
= 1; Align
<= MaxSize
; Align
<<= 1) {
229 for (size_t Count
= 0; Count
< 3; ++Count
) {
230 void *P
= memalign(Align
, Size
);
231 EXPECT_TRUE(reinterpret_cast<uintptr_t>(P
) % Align
== 0);
236 for (void *ptr
: ptrs
)
240 TEST_F(ScudoWrappersCTest
, Memalign
) {
242 for (size_t I
= FIRST_32_SECOND_64(2U, 3U); I
<= 18U; I
++) {
243 const size_t Alignment
= 1U << I
;
245 P
= memalign(Alignment
, Size
);
246 EXPECT_NE(P
, nullptr);
247 EXPECT_LE(Size
, malloc_usable_size(P
));
248 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) % Alignment
, 0U);
249 verifyAllocHookPtr(P
);
250 verifyAllocHookSize(Size
);
252 verifyDeallocHookPtr(P
);
255 EXPECT_EQ(posix_memalign(&P
, Alignment
, Size
), 0);
256 EXPECT_NE(P
, nullptr);
257 EXPECT_LE(Size
, malloc_usable_size(P
));
258 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) % Alignment
, 0U);
259 verifyAllocHookPtr(P
);
260 verifyAllocHookSize(Size
);
262 verifyDeallocHookPtr(P
);
265 EXPECT_EQ(memalign(4096U, SIZE_MAX
), nullptr);
266 EXPECT_EQ(posix_memalign(&P
, 15U, Size
), EINVAL
);
267 EXPECT_EQ(posix_memalign(&P
, 4096U, SIZE_MAX
), ENOMEM
);
269 // Android's memalign accepts non power-of-2 alignments, and 0.
271 for (size_t Alignment
= 0U; Alignment
<= 128U; Alignment
++) {
272 P
= memalign(Alignment
, 1024U);
273 EXPECT_NE(P
, nullptr);
274 verifyAllocHookPtr(P
);
275 verifyAllocHookSize(Size
);
277 verifyDeallocHookPtr(P
);
282 TEST_F(ScudoWrappersCTest
, AlignedAlloc
) {
283 const size_t Alignment
= 4096U;
284 void *P
= aligned_alloc(Alignment
, Alignment
* 4U);
285 EXPECT_NE(P
, nullptr);
286 EXPECT_LE(Alignment
* 4U, malloc_usable_size(P
));
287 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) % Alignment
, 0U);
288 verifyAllocHookPtr(P
);
289 verifyAllocHookSize(Alignment
* 4U);
291 verifyDeallocHookPtr(P
);
294 P
= aligned_alloc(Alignment
, Size
);
295 EXPECT_EQ(P
, nullptr);
296 EXPECT_EQ(errno
, EINVAL
);
299 TEST_F(ScudoWrappersCDeathTest
, Realloc
) {
300 invalidateHookPtrs();
301 // realloc(nullptr, N) is malloc(N)
302 void *P
= realloc(nullptr, Size
);
303 EXPECT_NE(P
, nullptr);
304 verifyAllocHookPtr(P
);
305 verifyAllocHookSize(Size
);
307 verifyDeallocHookPtr(P
);
309 invalidateHookPtrs();
311 EXPECT_NE(P
, nullptr);
312 // realloc(P, 0U) is free(P) and returns nullptr
313 EXPECT_EQ(realloc(P
, 0U), nullptr);
314 verifyDeallocHookPtr(P
);
317 EXPECT_NE(P
, nullptr);
318 EXPECT_LE(Size
, malloc_usable_size(P
));
319 memset(P
, 0x42, Size
);
321 invalidateHookPtrs();
323 P
= realloc(P
, Size
* 2U);
324 EXPECT_NE(P
, nullptr);
325 EXPECT_LE(Size
* 2U, malloc_usable_size(P
));
326 for (size_t I
= 0; I
< Size
; I
++)
327 EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P
))[I
]);
329 verifyDeallocHookPtr(OldP
);
330 verifyAllocHookPtr(OldP
);
332 verifyAllocHookPtr(P
);
333 verifyAllocHookSize(Size
* 2U);
334 verifyDeallocHookPtr(OldP
);
336 verifyReallocHookPtrs(OldP
, P
, Size
* 2U);
338 invalidateHookPtrs();
340 P
= realloc(P
, Size
/ 2U);
341 EXPECT_NE(P
, nullptr);
342 EXPECT_LE(Size
/ 2U, malloc_usable_size(P
));
343 for (size_t I
= 0; I
< Size
/ 2U; I
++)
344 EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P
))[I
]);
346 verifyDeallocHookPtr(OldP
);
347 verifyAllocHookPtr(OldP
);
349 verifyAllocHookPtr(P
);
350 verifyAllocHookSize(Size
/ 2U);
352 verifyReallocHookPtrs(OldP
, P
, Size
/ 2U);
355 EXPECT_DEATH(P
= realloc(P
, Size
), "");
358 EXPECT_EQ(realloc(nullptr, SIZE_MAX
), nullptr);
359 EXPECT_EQ(errno
, ENOMEM
);
361 EXPECT_NE(P
, nullptr);
363 EXPECT_EQ(realloc(P
, SIZE_MAX
), nullptr);
364 EXPECT_EQ(errno
, ENOMEM
);
367 // Android allows realloc of memalign pointers.
369 const size_t Alignment
= 1024U;
370 P
= memalign(Alignment
, Size
);
371 EXPECT_NE(P
, nullptr);
372 EXPECT_LE(Size
, malloc_usable_size(P
));
373 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) % Alignment
, 0U);
374 memset(P
, 0x42, Size
);
376 P
= realloc(P
, Size
* 2U);
377 EXPECT_NE(P
, nullptr);
378 EXPECT_LE(Size
* 2U, malloc_usable_size(P
));
379 for (size_t I
= 0; I
< Size
; I
++)
380 EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P
))[I
]);
386 TEST_F(ScudoWrappersCTest
, MallOpt
) {
388 EXPECT_EQ(mallopt(-1000, 1), 0);
389 // mallopt doesn't set errno.
392 EXPECT_EQ(mallopt(M_PURGE
, 0), 1);
394 EXPECT_EQ(mallopt(M_DECAY_TIME
, 1), 1);
395 EXPECT_EQ(mallopt(M_DECAY_TIME
, 0), 1);
396 EXPECT_EQ(mallopt(M_DECAY_TIME
, 1), 1);
397 EXPECT_EQ(mallopt(M_DECAY_TIME
, 0), 1);
400 EXPECT_EQ(mallopt(M_CACHE_COUNT_MAX
, 100), 1);
401 EXPECT_EQ(mallopt(M_CACHE_SIZE_MAX
, 1024 * 1024 * 2), 1);
402 EXPECT_EQ(mallopt(M_TSDS_COUNT_MAX
, 10), 1);
407 TEST_F(ScudoWrappersCTest
, OtherAlloc
) {
409 const size_t PageSize
= static_cast<size_t>(sysconf(_SC_PAGESIZE
));
411 void *P
= pvalloc(Size
);
412 EXPECT_NE(P
, nullptr);
413 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) & (PageSize
- 1), 0U);
414 EXPECT_LE(PageSize
, malloc_usable_size(P
));
415 verifyAllocHookPtr(P
);
416 // Size will be rounded up to PageSize.
417 verifyAllocHookSize(PageSize
);
419 verifyDeallocHookPtr(P
);
421 EXPECT_EQ(pvalloc(SIZE_MAX
), nullptr);
424 EXPECT_NE(P
, nullptr);
425 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) & (PageSize
- 1), 0U);
430 EXPECT_EQ(valloc(SIZE_MAX
), nullptr);
434 template<typename FieldType
>
435 void MallInfoTest() {
436 // mallinfo is deprecated.
437 #pragma clang diagnostic push
438 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
439 const FieldType BypassQuarantineSize
= 1024U;
440 struct mallinfo MI
= mallinfo();
441 FieldType Allocated
= MI
.uordblks
;
442 void *P
= malloc(BypassQuarantineSize
);
443 EXPECT_NE(P
, nullptr);
445 EXPECT_GE(MI
.uordblks
, Allocated
+ BypassQuarantineSize
);
446 EXPECT_GT(MI
.hblkhd
, static_cast<FieldType
>(0));
447 FieldType Free
= MI
.fordblks
;
450 EXPECT_GE(MI
.fordblks
, Free
+ BypassQuarantineSize
);
451 #pragma clang diagnostic pop
455 TEST_F(ScudoWrappersCTest
, MallInfo
) {
457 // Android accidentally set the fields to size_t instead of int.
458 MallInfoTest
<size_t>();
465 #if __GLIBC_PREREQ(2, 33) || SCUDO_ANDROID
466 TEST_F(ScudoWrappersCTest
, MallInfo2
) {
467 const size_t BypassQuarantineSize
= 1024U;
468 struct mallinfo2 MI
= mallinfo2();
469 size_t Allocated
= MI
.uordblks
;
470 void *P
= malloc(BypassQuarantineSize
);
471 EXPECT_NE(P
, nullptr);
473 EXPECT_GE(MI
.uordblks
, Allocated
+ BypassQuarantineSize
);
474 EXPECT_GT(MI
.hblkhd
, 0U);
475 size_t Free
= MI
.fordblks
;
478 EXPECT_GE(MI
.fordblks
, Free
+ BypassQuarantineSize
);
482 static uintptr_t BoundaryP
;
485 static void callback(uintptr_t Base
, UNUSED
size_t Size
, UNUSED
void *Arg
) {
486 if (scudo::archSupportsMemoryTagging()) {
487 Base
= scudo::untagPointer(Base
);
488 BoundaryP
= scudo::untagPointer(BoundaryP
);
490 if (Base
== BoundaryP
)
494 // Verify that a block located on an iteration boundary is not mis-accounted.
495 // To achieve this, we allocate a chunk for which the backing block will be
496 // aligned on a page, then run the malloc_iterate on both the pages that the
497 // block is a boundary for. It must only be seen once by the callback function.
498 TEST_F(ScudoWrappersCTest
, MallocIterateBoundary
) {
499 const size_t PageSize
= static_cast<size_t>(sysconf(_SC_PAGESIZE
));
501 // Android uses a 16 byte alignment for both 32 bit and 64 bit.
502 const size_t BlockDelta
= 16U;
504 const size_t BlockDelta
= FIRST_32_SECOND_64(8U, 16U);
506 const size_t SpecialSize
= PageSize
- BlockDelta
;
508 // We aren't guaranteed that any size class is exactly a page wide. So we need
509 // to keep making allocations until we get an allocation that starts exactly
510 // on a page boundary. The BlockDelta value is expected to be the number of
511 // bytes to subtract from a returned pointer to get to the actual start of
512 // the pointer in the size class. In practice, this means BlockDelta should
513 // be set to the minimum alignment in bytes for the allocation.
515 // With a 16-byte block alignment and 4096-byte page size, each allocation has
516 // a probability of (1 - (16/4096)) of failing to meet the alignment
517 // requirements, and the probability of failing 65536 times is
518 // (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
519 // 65536 tries, give up.
522 for (unsigned I
= 0; I
!= 65536; ++I
) {
524 P
= malloc(SpecialSize
);
525 EXPECT_NE(P
, nullptr);
526 *reinterpret_cast<void **>(P
) = PrevP
;
527 BoundaryP
= reinterpret_cast<uintptr_t>(P
);
528 Block
= BoundaryP
- BlockDelta
;
529 if ((Block
& (PageSize
- 1)) == 0U)
532 EXPECT_EQ((Block
& (PageSize
- 1)), 0U);
536 malloc_iterate(Block
- PageSize
, PageSize
, callback
, nullptr);
537 malloc_iterate(Block
, PageSize
, callback
, nullptr);
539 EXPECT_EQ(Count
, 1U);
542 void *NextP
= *reinterpret_cast<void **>(P
);
548 // Fuchsia doesn't have alarm, fork or malloc_info.
550 TEST_F(ScudoWrappersCDeathTest
, MallocDisableDeadlock
) {
551 // We expect heap operations within a disable/enable scope to deadlock.
554 void *P
= malloc(Size
);
555 EXPECT_NE(P
, nullptr);
565 TEST_F(ScudoWrappersCTest
, MallocInfo
) {
566 // Use volatile so that the allocations don't get optimized away.
567 void *volatile P1
= malloc(1234);
568 void *volatile P2
= malloc(4321);
571 FILE *F
= fmemopen(Buffer
, sizeof(Buffer
), "w+");
572 EXPECT_NE(F
, nullptr);
574 EXPECT_EQ(malloc_info(0, F
), 0);
577 EXPECT_EQ(strncmp(Buffer
, "<malloc version=\"scudo-", 23), 0);
578 EXPECT_NE(nullptr, strstr(Buffer
, "<alloc size=\"1234\" count=\""));
579 EXPECT_NE(nullptr, strstr(Buffer
, "<alloc size=\"4321\" count=\""));
585 TEST_F(ScudoWrappersCDeathTest
, Fork
) {
588 EXPECT_GE(Pid
, 0) << strerror(errno
);
591 EXPECT_NE(P
, nullptr);
592 memset(P
, 0x42, Size
);
596 waitpid(Pid
, nullptr, 0);
598 EXPECT_NE(P
, nullptr);
599 memset(P
, 0x42, Size
);
602 // fork should stall if the allocator has been disabled.
613 static pthread_mutex_t Mutex
;
614 static pthread_cond_t Conditional
= PTHREAD_COND_INITIALIZER
;
617 static void *enableMalloc(UNUSED
void *Unused
) {
618 // Initialize the allocator for this thread.
619 void *P
= malloc(Size
);
620 EXPECT_NE(P
, nullptr);
621 memset(P
, 0x42, Size
);
624 // Signal the main thread we are ready.
625 pthread_mutex_lock(&Mutex
);
627 pthread_cond_signal(&Conditional
);
628 pthread_mutex_unlock(&Mutex
);
630 // Wait for the malloc_disable & fork, then enable the allocator again.
637 TEST_F(ScudoWrappersCTest
, DisableForkEnable
) {
640 EXPECT_EQ(pthread_create(&ThreadId
, nullptr, &enableMalloc
, nullptr), 0);
642 // Wait for the thread to be warmed up.
643 pthread_mutex_lock(&Mutex
);
645 pthread_cond_wait(&Conditional
, &Mutex
);
646 pthread_mutex_unlock(&Mutex
);
648 // Disable the allocator and fork. fork should succeed after malloc_enable.
653 void *P
= malloc(Size
);
654 EXPECT_NE(P
, nullptr);
655 memset(P
, 0x42, Size
);
659 waitpid(Pid
, nullptr, 0);
660 EXPECT_EQ(pthread_join(ThreadId
, 0), 0);
663 #endif // SCUDO_FUCHSIA