1 //===-- wrappers_c_test.cpp -------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 #include "scudo/interface.h"
12 #include "tests/scudo_unit_test.h"
21 #ifndef __GLIBC_PREREQ
22 #define __GLIBC_PREREQ(x, y) 0
26 // Fuchsia only has valloc
29 // Android only has pvalloc/valloc on 32 bit
30 #if !defined(__LP64__)
31 #define HAVE_PVALLOC 1
33 #endif // !defined(__LP64__)
35 // All others assumed to support both functions.
36 #define HAVE_PVALLOC 1
41 void malloc_enable(void);
42 void malloc_disable(void);
43 int malloc_iterate(uintptr_t base
, size_t size
,
44 void (*callback
)(uintptr_t base
, size_t size
, void *arg
),
46 void *valloc(size_t size
);
47 void *pvalloc(size_t size
);
49 #ifndef SCUDO_ENABLE_HOOKS_TESTS
50 #define SCUDO_ENABLE_HOOKS_TESTS 0
53 #if (SCUDO_ENABLE_HOOKS_TESTS == 1) && (SCUDO_ENABLE_HOOKS == 0)
54 #error "Hooks tests should have hooks enabled as well!"
61 struct DeallocContext
{
64 static AllocContext AC
;
65 static DeallocContext DC
;
67 #if (SCUDO_ENABLE_HOOKS_TESTS == 1)
68 __attribute__((visibility("default"))) void __scudo_allocate_hook(void *Ptr
,
73 __attribute__((visibility("default"))) void __scudo_deallocate_hook(void *Ptr
) {
76 #endif // (SCUDO_ENABLE_HOOKS_TESTS == 1)
79 class ScudoWrappersCTest
: public Test
{
81 void SetUp() override
{
82 if (SCUDO_ENABLE_HOOKS
&& !SCUDO_ENABLE_HOOKS_TESTS
)
83 printf("Hooks are enabled but hooks tests are disabled.\n");
86 void invalidateAllocHookPtrAs(UNUSED
void *Ptr
) {
87 if (SCUDO_ENABLE_HOOKS_TESTS
)
90 void verifyAllocHookPtr(UNUSED
void *Ptr
) {
91 if (SCUDO_ENABLE_HOOKS_TESTS
)
92 EXPECT_EQ(Ptr
, AC
.Ptr
);
94 void verifyAllocHookSize(UNUSED
size_t Size
) {
95 if (SCUDO_ENABLE_HOOKS_TESTS
)
96 EXPECT_EQ(Size
, AC
.Size
);
98 void verifyDeallocHookPtr(UNUSED
void *Ptr
) {
99 if (SCUDO_ENABLE_HOOKS_TESTS
)
100 EXPECT_EQ(Ptr
, DC
.Ptr
);
103 using ScudoWrappersCDeathTest
= ScudoWrappersCTest
;
105 // Note that every C allocation function in the test binary will be fulfilled
106 // by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
107 // But this might also lead to unexpected side-effects, since the allocation and
108 // deallocation operations in the TEST functions will coexist with others (see
109 // the EXPECT_DEATH comment below).
111 // We have to use a small quarantine to make sure that our double-free tests
112 // trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just
113 // freed (this depends on the size obviously) and the following free succeeds.
115 static const size_t Size
= 100U;
117 TEST_F(ScudoWrappersCDeathTest
, Malloc
) {
118 void *P
= malloc(Size
);
119 EXPECT_NE(P
, nullptr);
120 EXPECT_LE(Size
, malloc_usable_size(P
));
121 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) % FIRST_32_SECOND_64(8U, 16U), 0U);
122 verifyAllocHookPtr(P
);
123 verifyAllocHookSize(Size
);
125 // An update to this warning in Clang now triggers in this line, but it's ok
126 // because the check is expecting a bad pointer and should fail.
127 #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
128 #pragma GCC diagnostic push
129 #pragma GCC diagnostic ignored "-Wfree-nonheap-object"
132 free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(P
) | 1U)), "");
133 #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
134 #pragma GCC diagnostic pop
138 verifyDeallocHookPtr(P
);
139 EXPECT_DEATH(free(P
), "");
142 EXPECT_NE(P
, nullptr);
146 EXPECT_EQ(malloc(SIZE_MAX
), nullptr);
147 EXPECT_EQ(errno
, ENOMEM
);
150 TEST_F(ScudoWrappersCTest
, Calloc
) {
151 void *P
= calloc(1U, Size
);
152 EXPECT_NE(P
, nullptr);
153 EXPECT_LE(Size
, malloc_usable_size(P
));
154 verifyAllocHookPtr(P
);
155 verifyAllocHookSize(Size
);
156 for (size_t I
= 0; I
< Size
; I
++)
157 EXPECT_EQ((reinterpret_cast<uint8_t *>(P
))[I
], 0U);
159 verifyDeallocHookPtr(P
);
162 EXPECT_NE(P
, nullptr);
165 EXPECT_NE(P
, nullptr);
169 EXPECT_EQ(calloc(SIZE_MAX
, 1U), nullptr);
170 EXPECT_EQ(errno
, ENOMEM
);
172 EXPECT_EQ(calloc(static_cast<size_t>(LONG_MAX
) + 1U, 2U), nullptr);
174 EXPECT_EQ(errno
, ENOMEM
);
176 EXPECT_EQ(calloc(SIZE_MAX
, SIZE_MAX
), nullptr);
177 EXPECT_EQ(errno
, ENOMEM
);
180 TEST_F(ScudoWrappersCTest
, SmallAlign
) {
181 // Allocating pointers by the powers of 2 from 1 to 0x10000
182 // Using powers of 2 due to memalign using powers of 2 and test more sizes
183 constexpr size_t MaxSize
= 0x10000;
184 std::vector
<void *> ptrs
;
185 // Reserving space to prevent further allocation during the test
186 ptrs
.reserve((scudo::getLeastSignificantSetBitIndex(MaxSize
) + 1) *
187 (scudo::getLeastSignificantSetBitIndex(MaxSize
) + 1) * 3);
188 for (size_t Size
= 1; Size
<= MaxSize
; Size
<<= 1) {
189 for (size_t Align
= 1; Align
<= MaxSize
; Align
<<= 1) {
190 for (size_t Count
= 0; Count
< 3; ++Count
) {
191 void *P
= memalign(Align
, Size
);
192 EXPECT_TRUE(reinterpret_cast<uintptr_t>(P
) % Align
== 0);
197 for (void *ptr
: ptrs
)
201 TEST_F(ScudoWrappersCTest
, Memalign
) {
203 for (size_t I
= FIRST_32_SECOND_64(2U, 3U); I
<= 18U; I
++) {
204 const size_t Alignment
= 1U << I
;
206 P
= memalign(Alignment
, Size
);
207 EXPECT_NE(P
, nullptr);
208 EXPECT_LE(Size
, malloc_usable_size(P
));
209 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) % Alignment
, 0U);
210 verifyAllocHookPtr(P
);
211 verifyAllocHookSize(Size
);
213 verifyDeallocHookPtr(P
);
216 EXPECT_EQ(posix_memalign(&P
, Alignment
, Size
), 0);
217 EXPECT_NE(P
, nullptr);
218 EXPECT_LE(Size
, malloc_usable_size(P
));
219 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) % Alignment
, 0U);
220 verifyAllocHookPtr(P
);
221 verifyAllocHookSize(Size
);
223 verifyDeallocHookPtr(P
);
226 EXPECT_EQ(memalign(4096U, SIZE_MAX
), nullptr);
227 EXPECT_EQ(posix_memalign(&P
, 15U, Size
), EINVAL
);
228 EXPECT_EQ(posix_memalign(&P
, 4096U, SIZE_MAX
), ENOMEM
);
230 // Android's memalign accepts non power-of-2 alignments, and 0.
232 for (size_t Alignment
= 0U; Alignment
<= 128U; Alignment
++) {
233 P
= memalign(Alignment
, 1024U);
234 EXPECT_NE(P
, nullptr);
235 verifyAllocHookPtr(P
);
236 verifyAllocHookSize(Size
);
238 verifyDeallocHookPtr(P
);
243 TEST_F(ScudoWrappersCTest
, AlignedAlloc
) {
244 const size_t Alignment
= 4096U;
245 void *P
= aligned_alloc(Alignment
, Alignment
* 4U);
246 EXPECT_NE(P
, nullptr);
247 EXPECT_LE(Alignment
* 4U, malloc_usable_size(P
));
248 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) % Alignment
, 0U);
249 verifyAllocHookPtr(P
);
250 verifyAllocHookSize(Alignment
* 4U);
252 verifyDeallocHookPtr(P
);
255 P
= aligned_alloc(Alignment
, Size
);
256 EXPECT_EQ(P
, nullptr);
257 EXPECT_EQ(errno
, EINVAL
);
260 TEST_F(ScudoWrappersCDeathTest
, Realloc
) {
261 // realloc(nullptr, N) is malloc(N)
262 void *P
= realloc(nullptr, Size
);
263 EXPECT_NE(P
, nullptr);
264 verifyAllocHookPtr(P
);
265 verifyAllocHookSize(Size
);
267 verifyDeallocHookPtr(P
);
270 EXPECT_NE(P
, nullptr);
271 // realloc(P, 0U) is free(P) and returns nullptr
272 EXPECT_EQ(realloc(P
, 0U), nullptr);
273 verifyDeallocHookPtr(P
);
276 EXPECT_NE(P
, nullptr);
277 EXPECT_LE(Size
, malloc_usable_size(P
));
278 memset(P
, 0x42, Size
);
280 invalidateAllocHookPtrAs(reinterpret_cast<void *>(0xdeadbeef));
282 P
= realloc(P
, Size
* 2U);
283 EXPECT_NE(P
, nullptr);
284 EXPECT_LE(Size
* 2U, malloc_usable_size(P
));
285 for (size_t I
= 0; I
< Size
; I
++)
286 EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P
))[I
]);
288 verifyAllocHookPtr(reinterpret_cast<void *>(0xdeadbeef));
290 verifyAllocHookPtr(P
);
291 verifyAllocHookSize(Size
* 2U);
292 verifyDeallocHookPtr(OldP
);
295 invalidateAllocHookPtrAs(reinterpret_cast<void *>(0xdeadbeef));
297 P
= realloc(P
, Size
/ 2U);
298 EXPECT_NE(P
, nullptr);
299 EXPECT_LE(Size
/ 2U, malloc_usable_size(P
));
300 for (size_t I
= 0; I
< Size
/ 2U; I
++)
301 EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P
))[I
]);
303 verifyAllocHookPtr(reinterpret_cast<void *>(0xdeadbeef));
305 verifyAllocHookPtr(P
);
306 verifyAllocHookSize(Size
/ 2U);
310 EXPECT_DEATH(P
= realloc(P
, Size
), "");
313 EXPECT_EQ(realloc(nullptr, SIZE_MAX
), nullptr);
314 EXPECT_EQ(errno
, ENOMEM
);
316 EXPECT_NE(P
, nullptr);
318 EXPECT_EQ(realloc(P
, SIZE_MAX
), nullptr);
319 EXPECT_EQ(errno
, ENOMEM
);
322 // Android allows realloc of memalign pointers.
324 const size_t Alignment
= 1024U;
325 P
= memalign(Alignment
, Size
);
326 EXPECT_NE(P
, nullptr);
327 EXPECT_LE(Size
, malloc_usable_size(P
));
328 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) % Alignment
, 0U);
329 memset(P
, 0x42, Size
);
331 P
= realloc(P
, Size
* 2U);
332 EXPECT_NE(P
, nullptr);
333 EXPECT_LE(Size
* 2U, malloc_usable_size(P
));
334 for (size_t I
= 0; I
< Size
; I
++)
335 EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P
))[I
]);
341 TEST_F(ScudoWrappersCTest
, MallOpt
) {
343 EXPECT_EQ(mallopt(-1000, 1), 0);
344 // mallopt doesn't set errno.
347 EXPECT_EQ(mallopt(M_PURGE
, 0), 1);
349 EXPECT_EQ(mallopt(M_DECAY_TIME
, 1), 1);
350 EXPECT_EQ(mallopt(M_DECAY_TIME
, 0), 1);
351 EXPECT_EQ(mallopt(M_DECAY_TIME
, 1), 1);
352 EXPECT_EQ(mallopt(M_DECAY_TIME
, 0), 1);
355 EXPECT_EQ(mallopt(M_CACHE_COUNT_MAX
, 100), 1);
356 EXPECT_EQ(mallopt(M_CACHE_SIZE_MAX
, 1024 * 1024 * 2), 1);
357 EXPECT_EQ(mallopt(M_TSDS_COUNT_MAX
, 10), 1);
362 TEST_F(ScudoWrappersCTest
, OtherAlloc
) {
364 const size_t PageSize
= static_cast<size_t>(sysconf(_SC_PAGESIZE
));
366 void *P
= pvalloc(Size
);
367 EXPECT_NE(P
, nullptr);
368 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) & (PageSize
- 1), 0U);
369 EXPECT_LE(PageSize
, malloc_usable_size(P
));
370 verifyAllocHookPtr(P
);
371 // Size will be rounded up to PageSize.
372 verifyAllocHookSize(PageSize
);
374 verifyDeallocHookPtr(P
);
376 EXPECT_EQ(pvalloc(SIZE_MAX
), nullptr);
379 EXPECT_NE(P
, nullptr);
380 EXPECT_EQ(reinterpret_cast<uintptr_t>(P
) & (PageSize
- 1), 0U);
385 EXPECT_EQ(valloc(SIZE_MAX
), nullptr);
389 template<typename FieldType
>
390 void MallInfoTest() {
391 // mallinfo is deprecated.
392 #pragma clang diagnostic push
393 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
394 const FieldType BypassQuarantineSize
= 1024U;
395 struct mallinfo MI
= mallinfo();
396 FieldType Allocated
= MI
.uordblks
;
397 void *P
= malloc(BypassQuarantineSize
);
398 EXPECT_NE(P
, nullptr);
400 EXPECT_GE(MI
.uordblks
, Allocated
+ BypassQuarantineSize
);
401 EXPECT_GT(MI
.hblkhd
, static_cast<FieldType
>(0));
402 FieldType Free
= MI
.fordblks
;
405 EXPECT_GE(MI
.fordblks
, Free
+ BypassQuarantineSize
);
406 #pragma clang diagnostic pop
410 TEST_F(ScudoWrappersCTest
, MallInfo
) {
412 // Android accidentally set the fields to size_t instead of int.
413 MallInfoTest
<size_t>();
420 #if __GLIBC_PREREQ(2, 33) || SCUDO_ANDROID
421 TEST_F(ScudoWrappersCTest
, MallInfo2
) {
422 const size_t BypassQuarantineSize
= 1024U;
423 struct mallinfo2 MI
= mallinfo2();
424 size_t Allocated
= MI
.uordblks
;
425 void *P
= malloc(BypassQuarantineSize
);
426 EXPECT_NE(P
, nullptr);
428 EXPECT_GE(MI
.uordblks
, Allocated
+ BypassQuarantineSize
);
429 EXPECT_GT(MI
.hblkhd
, 0U);
430 size_t Free
= MI
.fordblks
;
433 EXPECT_GE(MI
.fordblks
, Free
+ BypassQuarantineSize
);
437 static uintptr_t BoundaryP
;
440 static void callback(uintptr_t Base
, UNUSED
size_t Size
, UNUSED
void *Arg
) {
441 if (scudo::archSupportsMemoryTagging()) {
442 Base
= scudo::untagPointer(Base
);
443 BoundaryP
= scudo::untagPointer(BoundaryP
);
445 if (Base
== BoundaryP
)
449 // Verify that a block located on an iteration boundary is not mis-accounted.
450 // To achieve this, we allocate a chunk for which the backing block will be
451 // aligned on a page, then run the malloc_iterate on both the pages that the
452 // block is a boundary for. It must only be seen once by the callback function.
453 TEST_F(ScudoWrappersCTest
, MallocIterateBoundary
) {
454 const size_t PageSize
= static_cast<size_t>(sysconf(_SC_PAGESIZE
));
456 // Android uses a 16 byte alignment for both 32 bit and 64 bit.
457 const size_t BlockDelta
= 16U;
459 const size_t BlockDelta
= FIRST_32_SECOND_64(8U, 16U);
461 const size_t SpecialSize
= PageSize
- BlockDelta
;
463 // We aren't guaranteed that any size class is exactly a page wide. So we need
464 // to keep making allocations until we get an allocation that starts exactly
465 // on a page boundary. The BlockDelta value is expected to be the number of
466 // bytes to subtract from a returned pointer to get to the actual start of
467 // the pointer in the size class. In practice, this means BlockDelta should
468 // be set to the minimum alignment in bytes for the allocation.
470 // With a 16-byte block alignment and 4096-byte page size, each allocation has
471 // a probability of (1 - (16/4096)) of failing to meet the alignment
472 // requirements, and the probability of failing 65536 times is
473 // (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
474 // 65536 tries, give up.
477 for (unsigned I
= 0; I
!= 65536; ++I
) {
479 P
= malloc(SpecialSize
);
480 EXPECT_NE(P
, nullptr);
481 *reinterpret_cast<void **>(P
) = PrevP
;
482 BoundaryP
= reinterpret_cast<uintptr_t>(P
);
483 Block
= BoundaryP
- BlockDelta
;
484 if ((Block
& (PageSize
- 1)) == 0U)
487 EXPECT_EQ((Block
& (PageSize
- 1)), 0U);
491 malloc_iterate(Block
- PageSize
, PageSize
, callback
, nullptr);
492 malloc_iterate(Block
, PageSize
, callback
, nullptr);
494 EXPECT_EQ(Count
, 1U);
497 void *NextP
= *reinterpret_cast<void **>(P
);
503 // Fuchsia doesn't have alarm, fork or malloc_info.
505 TEST_F(ScudoWrappersCDeathTest
, MallocDisableDeadlock
) {
506 // We expect heap operations within a disable/enable scope to deadlock.
509 void *P
= malloc(Size
);
510 EXPECT_NE(P
, nullptr);
520 TEST_F(ScudoWrappersCTest
, MallocInfo
) {
521 // Use volatile so that the allocations don't get optimized away.
522 void *volatile P1
= malloc(1234);
523 void *volatile P2
= malloc(4321);
526 FILE *F
= fmemopen(Buffer
, sizeof(Buffer
), "w+");
527 EXPECT_NE(F
, nullptr);
529 EXPECT_EQ(malloc_info(0, F
), 0);
532 EXPECT_EQ(strncmp(Buffer
, "<malloc version=\"scudo-", 23), 0);
533 EXPECT_NE(nullptr, strstr(Buffer
, "<alloc size=\"1234\" count=\""));
534 EXPECT_NE(nullptr, strstr(Buffer
, "<alloc size=\"4321\" count=\""));
540 TEST_F(ScudoWrappersCDeathTest
, Fork
) {
543 EXPECT_GE(Pid
, 0) << strerror(errno
);
546 EXPECT_NE(P
, nullptr);
547 memset(P
, 0x42, Size
);
551 waitpid(Pid
, nullptr, 0);
553 EXPECT_NE(P
, nullptr);
554 memset(P
, 0x42, Size
);
557 // fork should stall if the allocator has been disabled.
568 static pthread_mutex_t Mutex
;
569 static pthread_cond_t Conditional
= PTHREAD_COND_INITIALIZER
;
572 static void *enableMalloc(UNUSED
void *Unused
) {
573 // Initialize the allocator for this thread.
574 void *P
= malloc(Size
);
575 EXPECT_NE(P
, nullptr);
576 memset(P
, 0x42, Size
);
579 // Signal the main thread we are ready.
580 pthread_mutex_lock(&Mutex
);
582 pthread_cond_signal(&Conditional
);
583 pthread_mutex_unlock(&Mutex
);
585 // Wait for the malloc_disable & fork, then enable the allocator again.
592 TEST_F(ScudoWrappersCTest
, DisableForkEnable
) {
595 EXPECT_EQ(pthread_create(&ThreadId
, nullptr, &enableMalloc
, nullptr), 0);
597 // Wait for the thread to be warmed up.
598 pthread_mutex_lock(&Mutex
);
600 pthread_cond_wait(&Conditional
, &Mutex
);
601 pthread_mutex_unlock(&Mutex
);
603 // Disable the allocator and fork. fork should succeed after malloc_enable.
608 void *P
= malloc(Size
);
609 EXPECT_NE(P
, nullptr);
610 memset(P
, 0x42, Size
);
614 waitpid(Pid
, nullptr, 0);
615 EXPECT_EQ(pthread_join(ThreadId
, 0), 0);
618 #endif // SCUDO_FUCHSIA