[LVI] Add trunc to i1 handling. (#124480)
[llvm-project.git] / compiler-rt / lib / scudo / standalone / tests / wrappers_c_test.cpp
blobf5e17d7214863c8b828ca44d2eeb880c91259f8b
1 //===-- wrappers_c_test.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "common.h"
10 #include "memtag.h"
11 #include "scudo/interface.h"
12 #include "tests/scudo_unit_test.h"
14 #include <errno.h>
15 #include <limits.h>
16 #include <malloc.h>
17 #include <stdlib.h>
18 #include <unistd.h>
19 #include <vector>
21 #ifndef __GLIBC_PREREQ
22 #define __GLIBC_PREREQ(x, y) 0
23 #endif
25 #if SCUDO_FUCHSIA
26 // Fuchsia only has valloc
27 #define HAVE_VALLOC 1
28 #elif SCUDO_ANDROID
29 // Android only has pvalloc/valloc on 32 bit
30 #if !defined(__LP64__)
31 #define HAVE_PVALLOC 1
32 #define HAVE_VALLOC 1
33 #endif // !defined(__LP64__)
34 #else
35 // All others assumed to support both functions.
36 #define HAVE_PVALLOC 1
37 #define HAVE_VALLOC 1
38 #endif
40 extern "C" {
41 void malloc_enable(void);
42 void malloc_disable(void);
43 int malloc_iterate(uintptr_t base, size_t size,
44 void (*callback)(uintptr_t base, size_t size, void *arg),
45 void *arg);
46 void *valloc(size_t size);
47 void *pvalloc(size_t size);
49 #ifndef SCUDO_ENABLE_HOOKS_TESTS
50 #define SCUDO_ENABLE_HOOKS_TESTS 0
51 #endif
53 #if (SCUDO_ENABLE_HOOKS_TESTS == 1) && (SCUDO_ENABLE_HOOKS == 0)
54 #error "Hooks tests should have hooks enabled as well!"
55 #endif
57 struct AllocContext {
58 void *Ptr;
59 size_t Size;
61 struct DeallocContext {
62 void *Ptr;
64 struct ReallocContext {
65 void *AllocPtr;
66 void *DeallocPtr;
67 size_t Size;
69 static AllocContext AC;
70 static DeallocContext DC;
71 static ReallocContext RC;
73 #if (SCUDO_ENABLE_HOOKS_TESTS == 1)
74 __attribute__((visibility("default"))) void __scudo_allocate_hook(void *Ptr,
75 size_t Size) {
76 AC.Ptr = Ptr;
77 AC.Size = Size;
79 __attribute__((visibility("default"))) void __scudo_deallocate_hook(void *Ptr) {
80 DC.Ptr = Ptr;
82 __attribute__((visibility("default"))) void
83 __scudo_realloc_allocate_hook(void *OldPtr, void *NewPtr, size_t Size) {
84 // Verify that __scudo_realloc_deallocate_hook is called first and set the
85 // right pointer.
86 EXPECT_EQ(OldPtr, RC.DeallocPtr);
87 RC.AllocPtr = NewPtr;
88 RC.Size = Size;
90 // Note that this is only used for testing. In general, only one pair of hooks
91 // will be invoked in `realloc`. if __scudo_realloc_*_hook are not defined,
92 // it'll call the general hooks only. To make the test easier, we call the
93 // general one here so that either case (whether __scudo_realloc_*_hook are
94 // defined) will be verified without separating them into different tests.
95 __scudo_allocate_hook(NewPtr, Size);
97 __attribute__((visibility("default"))) void
98 __scudo_realloc_deallocate_hook(void *Ptr) {
99 RC.DeallocPtr = Ptr;
101 // See the comment in the __scudo_realloc_allocate_hook above.
102 __scudo_deallocate_hook(Ptr);
104 #endif // (SCUDO_ENABLE_HOOKS_TESTS == 1)
107 class ScudoWrappersCTest : public Test {
108 protected:
109 void SetUp() override {
110 if (SCUDO_ENABLE_HOOKS && !SCUDO_ENABLE_HOOKS_TESTS)
111 printf("Hooks are enabled but hooks tests are disabled.\n");
114 void invalidateHookPtrs() {
115 if (SCUDO_ENABLE_HOOKS_TESTS) {
116 void *InvalidPtr = reinterpret_cast<void *>(0xdeadbeef);
117 AC.Ptr = InvalidPtr;
118 DC.Ptr = InvalidPtr;
119 RC.AllocPtr = RC.DeallocPtr = InvalidPtr;
122 void verifyAllocHookPtr(UNUSED void *Ptr) {
123 if (SCUDO_ENABLE_HOOKS_TESTS)
124 EXPECT_EQ(Ptr, AC.Ptr);
126 void verifyAllocHookSize(UNUSED size_t Size) {
127 if (SCUDO_ENABLE_HOOKS_TESTS)
128 EXPECT_EQ(Size, AC.Size);
130 void verifyDeallocHookPtr(UNUSED void *Ptr) {
131 if (SCUDO_ENABLE_HOOKS_TESTS)
132 EXPECT_EQ(Ptr, DC.Ptr);
134 void verifyReallocHookPtrs(UNUSED void *OldPtr, void *NewPtr, size_t Size) {
135 if (SCUDO_ENABLE_HOOKS_TESTS) {
136 EXPECT_EQ(OldPtr, RC.DeallocPtr);
137 EXPECT_EQ(NewPtr, RC.AllocPtr);
138 EXPECT_EQ(Size, RC.Size);
142 using ScudoWrappersCDeathTest = ScudoWrappersCTest;
144 // Note that every C allocation function in the test binary will be fulfilled
145 // by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
146 // But this might also lead to unexpected side-effects, since the allocation and
147 // deallocation operations in the TEST functions will coexist with others (see
148 // the EXPECT_DEATH comment below).
150 // We have to use a small quarantine to make sure that our double-free tests
151 // trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just
152 // freed (this depends on the size obviously) and the following free succeeds.
154 static const size_t Size = 100U;
156 TEST_F(ScudoWrappersCDeathTest, Malloc) {
157 void *P = malloc(Size);
158 EXPECT_NE(P, nullptr);
159 EXPECT_LE(Size, malloc_usable_size(P));
160 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % FIRST_32_SECOND_64(8U, 16U), 0U);
161 verifyAllocHookPtr(P);
162 verifyAllocHookSize(Size);
164 // An update to this warning in Clang now triggers in this line, but it's ok
165 // because the check is expecting a bad pointer and should fail.
166 #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
167 #pragma GCC diagnostic push
168 #pragma GCC diagnostic ignored "-Wfree-nonheap-object"
169 #endif
170 EXPECT_DEATH(
171 free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(P) | 1U)), "");
172 #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
173 #pragma GCC diagnostic pop
174 #endif
176 free(P);
177 verifyDeallocHookPtr(P);
178 EXPECT_DEATH(free(P), "");
180 P = malloc(0U);
181 EXPECT_NE(P, nullptr);
182 free(P);
184 errno = 0;
185 EXPECT_EQ(malloc(SIZE_MAX), nullptr);
186 EXPECT_EQ(errno, ENOMEM);
189 TEST_F(ScudoWrappersCTest, Calloc) {
190 void *P = calloc(1U, Size);
191 EXPECT_NE(P, nullptr);
192 EXPECT_LE(Size, malloc_usable_size(P));
193 verifyAllocHookPtr(P);
194 verifyAllocHookSize(Size);
195 for (size_t I = 0; I < Size; I++)
196 EXPECT_EQ((reinterpret_cast<uint8_t *>(P))[I], 0U);
197 free(P);
198 verifyDeallocHookPtr(P);
200 P = calloc(1U, 0U);
201 EXPECT_NE(P, nullptr);
202 free(P);
203 P = calloc(0U, 1U);
204 EXPECT_NE(P, nullptr);
205 free(P);
207 errno = 0;
208 EXPECT_EQ(calloc(SIZE_MAX, 1U), nullptr);
209 EXPECT_EQ(errno, ENOMEM);
210 errno = 0;
211 EXPECT_EQ(calloc(static_cast<size_t>(LONG_MAX) + 1U, 2U), nullptr);
212 if (SCUDO_ANDROID)
213 EXPECT_EQ(errno, ENOMEM);
214 errno = 0;
215 EXPECT_EQ(calloc(SIZE_MAX, SIZE_MAX), nullptr);
216 EXPECT_EQ(errno, ENOMEM);
219 TEST_F(ScudoWrappersCTest, SmallAlign) {
220 // Allocating pointers by the powers of 2 from 1 to 0x10000
221 // Using powers of 2 due to memalign using powers of 2 and test more sizes
222 constexpr size_t MaxSize = 0x10000;
223 std::vector<void *> ptrs;
224 // Reserving space to prevent further allocation during the test
225 ptrs.reserve((scudo::getLeastSignificantSetBitIndex(MaxSize) + 1) *
226 (scudo::getLeastSignificantSetBitIndex(MaxSize) + 1) * 3);
227 for (size_t Size = 1; Size <= MaxSize; Size <<= 1) {
228 for (size_t Align = 1; Align <= MaxSize; Align <<= 1) {
229 for (size_t Count = 0; Count < 3; ++Count) {
230 void *P = memalign(Align, Size);
231 EXPECT_TRUE(reinterpret_cast<uintptr_t>(P) % Align == 0);
232 ptrs.push_back(P);
236 for (void *ptr : ptrs)
237 free(ptr);
240 TEST_F(ScudoWrappersCTest, Memalign) {
241 void *P;
242 for (size_t I = FIRST_32_SECOND_64(2U, 3U); I <= 18U; I++) {
243 const size_t Alignment = 1U << I;
245 P = memalign(Alignment, Size);
246 EXPECT_NE(P, nullptr);
247 EXPECT_LE(Size, malloc_usable_size(P));
248 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
249 verifyAllocHookPtr(P);
250 verifyAllocHookSize(Size);
251 free(P);
252 verifyDeallocHookPtr(P);
254 P = nullptr;
255 EXPECT_EQ(posix_memalign(&P, Alignment, Size), 0);
256 EXPECT_NE(P, nullptr);
257 EXPECT_LE(Size, malloc_usable_size(P));
258 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
259 verifyAllocHookPtr(P);
260 verifyAllocHookSize(Size);
261 free(P);
262 verifyDeallocHookPtr(P);
265 EXPECT_EQ(memalign(4096U, SIZE_MAX), nullptr);
266 EXPECT_EQ(posix_memalign(&P, 15U, Size), EINVAL);
267 EXPECT_EQ(posix_memalign(&P, 4096U, SIZE_MAX), ENOMEM);
269 // Android's memalign accepts non power-of-2 alignments, and 0.
270 if (SCUDO_ANDROID) {
271 for (size_t Alignment = 0U; Alignment <= 128U; Alignment++) {
272 P = memalign(Alignment, 1024U);
273 EXPECT_NE(P, nullptr);
274 verifyAllocHookPtr(P);
275 verifyAllocHookSize(Size);
276 free(P);
277 verifyDeallocHookPtr(P);
282 TEST_F(ScudoWrappersCTest, AlignedAlloc) {
283 const size_t Alignment = 4096U;
284 void *P = aligned_alloc(Alignment, Alignment * 4U);
285 EXPECT_NE(P, nullptr);
286 EXPECT_LE(Alignment * 4U, malloc_usable_size(P));
287 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
288 verifyAllocHookPtr(P);
289 verifyAllocHookSize(Alignment * 4U);
290 free(P);
291 verifyDeallocHookPtr(P);
293 errno = 0;
294 P = aligned_alloc(Alignment, Size);
295 EXPECT_EQ(P, nullptr);
296 EXPECT_EQ(errno, EINVAL);
299 TEST_F(ScudoWrappersCDeathTest, Realloc) {
300 invalidateHookPtrs();
301 // realloc(nullptr, N) is malloc(N)
302 void *P = realloc(nullptr, Size);
303 EXPECT_NE(P, nullptr);
304 verifyAllocHookPtr(P);
305 verifyAllocHookSize(Size);
306 free(P);
307 verifyDeallocHookPtr(P);
309 invalidateHookPtrs();
310 P = malloc(Size);
311 EXPECT_NE(P, nullptr);
312 // realloc(P, 0U) is free(P) and returns nullptr
313 EXPECT_EQ(realloc(P, 0U), nullptr);
314 verifyDeallocHookPtr(P);
316 P = malloc(Size);
317 EXPECT_NE(P, nullptr);
318 EXPECT_LE(Size, malloc_usable_size(P));
319 memset(P, 0x42, Size);
321 invalidateHookPtrs();
322 void *OldP = P;
323 P = realloc(P, Size * 2U);
324 EXPECT_NE(P, nullptr);
325 EXPECT_LE(Size * 2U, malloc_usable_size(P));
326 for (size_t I = 0; I < Size; I++)
327 EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
328 if (OldP == P) {
329 verifyDeallocHookPtr(OldP);
330 verifyAllocHookPtr(OldP);
331 } else {
332 verifyAllocHookPtr(P);
333 verifyAllocHookSize(Size * 2U);
334 verifyDeallocHookPtr(OldP);
336 verifyReallocHookPtrs(OldP, P, Size * 2U);
338 invalidateHookPtrs();
339 OldP = P;
340 P = realloc(P, Size / 2U);
341 EXPECT_NE(P, nullptr);
342 EXPECT_LE(Size / 2U, malloc_usable_size(P));
343 for (size_t I = 0; I < Size / 2U; I++)
344 EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
345 if (OldP == P) {
346 verifyDeallocHookPtr(OldP);
347 verifyAllocHookPtr(OldP);
348 } else {
349 verifyAllocHookPtr(P);
350 verifyAllocHookSize(Size / 2U);
352 verifyReallocHookPtrs(OldP, P, Size / 2U);
353 free(P);
355 EXPECT_DEATH(P = realloc(P, Size), "");
357 errno = 0;
358 EXPECT_EQ(realloc(nullptr, SIZE_MAX), nullptr);
359 EXPECT_EQ(errno, ENOMEM);
360 P = malloc(Size);
361 EXPECT_NE(P, nullptr);
362 errno = 0;
363 EXPECT_EQ(realloc(P, SIZE_MAX), nullptr);
364 EXPECT_EQ(errno, ENOMEM);
365 free(P);
367 // Android allows realloc of memalign pointers.
368 if (SCUDO_ANDROID) {
369 const size_t Alignment = 1024U;
370 P = memalign(Alignment, Size);
371 EXPECT_NE(P, nullptr);
372 EXPECT_LE(Size, malloc_usable_size(P));
373 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
374 memset(P, 0x42, Size);
376 P = realloc(P, Size * 2U);
377 EXPECT_NE(P, nullptr);
378 EXPECT_LE(Size * 2U, malloc_usable_size(P));
379 for (size_t I = 0; I < Size; I++)
380 EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
381 free(P);
385 #if !SCUDO_FUCHSIA
386 TEST_F(ScudoWrappersCTest, MallOpt) {
387 errno = 0;
388 EXPECT_EQ(mallopt(-1000, 1), 0);
389 // mallopt doesn't set errno.
390 EXPECT_EQ(errno, 0);
392 EXPECT_EQ(mallopt(M_PURGE, 0), 1);
394 EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
395 EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
396 EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
397 EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
399 if (SCUDO_ANDROID) {
400 EXPECT_EQ(mallopt(M_CACHE_COUNT_MAX, 100), 1);
401 EXPECT_EQ(mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2), 1);
402 EXPECT_EQ(mallopt(M_TSDS_COUNT_MAX, 10), 1);
405 #endif
407 TEST_F(ScudoWrappersCTest, OtherAlloc) {
408 #if HAVE_PVALLOC
409 const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
411 void *P = pvalloc(Size);
412 EXPECT_NE(P, nullptr);
413 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
414 EXPECT_LE(PageSize, malloc_usable_size(P));
415 verifyAllocHookPtr(P);
416 // Size will be rounded up to PageSize.
417 verifyAllocHookSize(PageSize);
418 free(P);
419 verifyDeallocHookPtr(P);
421 EXPECT_EQ(pvalloc(SIZE_MAX), nullptr);
423 P = pvalloc(Size);
424 EXPECT_NE(P, nullptr);
425 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
426 free(P);
427 #endif
429 #if HAVE_VALLOC
430 EXPECT_EQ(valloc(SIZE_MAX), nullptr);
431 #endif
434 template<typename FieldType>
435 void MallInfoTest() {
436 // mallinfo is deprecated.
437 #pragma clang diagnostic push
438 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
439 const FieldType BypassQuarantineSize = 1024U;
440 struct mallinfo MI = mallinfo();
441 FieldType Allocated = MI.uordblks;
442 void *P = malloc(BypassQuarantineSize);
443 EXPECT_NE(P, nullptr);
444 MI = mallinfo();
445 EXPECT_GE(MI.uordblks, Allocated + BypassQuarantineSize);
446 EXPECT_GT(MI.hblkhd, static_cast<FieldType>(0));
447 FieldType Free = MI.fordblks;
448 free(P);
449 MI = mallinfo();
450 EXPECT_GE(MI.fordblks, Free + BypassQuarantineSize);
451 #pragma clang diagnostic pop
454 #if !SCUDO_FUCHSIA
455 TEST_F(ScudoWrappersCTest, MallInfo) {
456 #if SCUDO_ANDROID
457 // Android accidentally set the fields to size_t instead of int.
458 MallInfoTest<size_t>();
459 #else
460 MallInfoTest<int>();
461 #endif
463 #endif
465 #if __GLIBC_PREREQ(2, 33) || SCUDO_ANDROID
466 TEST_F(ScudoWrappersCTest, MallInfo2) {
467 const size_t BypassQuarantineSize = 1024U;
468 struct mallinfo2 MI = mallinfo2();
469 size_t Allocated = MI.uordblks;
470 void *P = malloc(BypassQuarantineSize);
471 EXPECT_NE(P, nullptr);
472 MI = mallinfo2();
473 EXPECT_GE(MI.uordblks, Allocated + BypassQuarantineSize);
474 EXPECT_GT(MI.hblkhd, 0U);
475 size_t Free = MI.fordblks;
476 free(P);
477 MI = mallinfo2();
478 EXPECT_GE(MI.fordblks, Free + BypassQuarantineSize);
480 #endif
482 static uintptr_t BoundaryP;
483 static size_t Count;
485 static void callback(uintptr_t Base, UNUSED size_t Size, UNUSED void *Arg) {
486 if (scudo::archSupportsMemoryTagging()) {
487 Base = scudo::untagPointer(Base);
488 BoundaryP = scudo::untagPointer(BoundaryP);
490 if (Base == BoundaryP)
491 Count++;
494 // Verify that a block located on an iteration boundary is not mis-accounted.
495 // To achieve this, we allocate a chunk for which the backing block will be
496 // aligned on a page, then run the malloc_iterate on both the pages that the
497 // block is a boundary for. It must only be seen once by the callback function.
498 TEST_F(ScudoWrappersCTest, MallocIterateBoundary) {
499 const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
500 #if SCUDO_ANDROID
501 // Android uses a 16 byte alignment for both 32 bit and 64 bit.
502 const size_t BlockDelta = 16U;
503 #else
504 const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
505 #endif
506 const size_t SpecialSize = PageSize - BlockDelta;
508 // We aren't guaranteed that any size class is exactly a page wide. So we need
509 // to keep making allocations until we get an allocation that starts exactly
510 // on a page boundary. The BlockDelta value is expected to be the number of
511 // bytes to subtract from a returned pointer to get to the actual start of
512 // the pointer in the size class. In practice, this means BlockDelta should
513 // be set to the minimum alignment in bytes for the allocation.
515 // With a 16-byte block alignment and 4096-byte page size, each allocation has
516 // a probability of (1 - (16/4096)) of failing to meet the alignment
517 // requirements, and the probability of failing 65536 times is
518 // (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
519 // 65536 tries, give up.
520 uintptr_t Block;
521 void *P = nullptr;
522 for (unsigned I = 0; I != 65536; ++I) {
523 void *PrevP = P;
524 P = malloc(SpecialSize);
525 EXPECT_NE(P, nullptr);
526 *reinterpret_cast<void **>(P) = PrevP;
527 BoundaryP = reinterpret_cast<uintptr_t>(P);
528 Block = BoundaryP - BlockDelta;
529 if ((Block & (PageSize - 1)) == 0U)
530 break;
532 EXPECT_EQ((Block & (PageSize - 1)), 0U);
534 Count = 0U;
535 malloc_disable();
536 malloc_iterate(Block - PageSize, PageSize, callback, nullptr);
537 malloc_iterate(Block, PageSize, callback, nullptr);
538 malloc_enable();
539 EXPECT_EQ(Count, 1U);
541 while (P) {
542 void *NextP = *reinterpret_cast<void **>(P);
543 free(P);
544 P = NextP;
548 // Fuchsia doesn't have alarm, fork or malloc_info.
549 #if !SCUDO_FUCHSIA
550 TEST_F(ScudoWrappersCDeathTest, MallocDisableDeadlock) {
551 // We expect heap operations within a disable/enable scope to deadlock.
552 EXPECT_DEATH(
554 void *P = malloc(Size);
555 EXPECT_NE(P, nullptr);
556 free(P);
557 malloc_disable();
558 alarm(1);
559 P = malloc(Size);
560 malloc_enable();
562 "");
565 TEST_F(ScudoWrappersCTest, MallocInfo) {
566 // Use volatile so that the allocations don't get optimized away.
567 void *volatile P1 = malloc(1234);
568 void *volatile P2 = malloc(4321);
570 char Buffer[16384];
571 FILE *F = fmemopen(Buffer, sizeof(Buffer), "w+");
572 EXPECT_NE(F, nullptr);
573 errno = 0;
574 EXPECT_EQ(malloc_info(0, F), 0);
575 EXPECT_EQ(errno, 0);
576 fclose(F);
577 EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0);
578 EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\""));
579 EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\""));
581 free(P1);
582 free(P2);
585 TEST_F(ScudoWrappersCDeathTest, Fork) {
586 void *P;
587 pid_t Pid = fork();
588 EXPECT_GE(Pid, 0) << strerror(errno);
589 if (Pid == 0) {
590 P = malloc(Size);
591 EXPECT_NE(P, nullptr);
592 memset(P, 0x42, Size);
593 free(P);
594 _exit(0);
596 waitpid(Pid, nullptr, 0);
597 P = malloc(Size);
598 EXPECT_NE(P, nullptr);
599 memset(P, 0x42, Size);
600 free(P);
602 // fork should stall if the allocator has been disabled.
603 EXPECT_DEATH(
605 malloc_disable();
606 alarm(1);
607 Pid = fork();
608 EXPECT_GE(Pid, 0);
610 "");
613 static pthread_mutex_t Mutex;
614 static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER;
615 static bool Ready;
617 static void *enableMalloc(UNUSED void *Unused) {
618 // Initialize the allocator for this thread.
619 void *P = malloc(Size);
620 EXPECT_NE(P, nullptr);
621 memset(P, 0x42, Size);
622 free(P);
624 // Signal the main thread we are ready.
625 pthread_mutex_lock(&Mutex);
626 Ready = true;
627 pthread_cond_signal(&Conditional);
628 pthread_mutex_unlock(&Mutex);
630 // Wait for the malloc_disable & fork, then enable the allocator again.
631 sleep(1);
632 malloc_enable();
634 return nullptr;
637 TEST_F(ScudoWrappersCTest, DisableForkEnable) {
638 pthread_t ThreadId;
639 Ready = false;
640 EXPECT_EQ(pthread_create(&ThreadId, nullptr, &enableMalloc, nullptr), 0);
642 // Wait for the thread to be warmed up.
643 pthread_mutex_lock(&Mutex);
644 while (!Ready)
645 pthread_cond_wait(&Conditional, &Mutex);
646 pthread_mutex_unlock(&Mutex);
648 // Disable the allocator and fork. fork should succeed after malloc_enable.
649 malloc_disable();
650 pid_t Pid = fork();
651 EXPECT_GE(Pid, 0);
652 if (Pid == 0) {
653 void *P = malloc(Size);
654 EXPECT_NE(P, nullptr);
655 memset(P, 0x42, Size);
656 free(P);
657 _exit(0);
659 waitpid(Pid, nullptr, 0);
660 EXPECT_EQ(pthread_join(ThreadId, 0), 0);
663 #endif // SCUDO_FUCHSIA