Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / compiler-rt / lib / scudo / standalone / tests / wrappers_c_test.cpp
blob623550535b6c8e270c87214ca37a940d55b03d66
1 //===-- wrappers_c_test.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "common.h"
10 #include "memtag.h"
11 #include "scudo/interface.h"
12 #include "tests/scudo_unit_test.h"
14 #include <errno.h>
15 #include <limits.h>
16 #include <malloc.h>
17 #include <stdlib.h>
18 #include <unistd.h>
19 #include <vector>
21 #ifndef __GLIBC_PREREQ
22 #define __GLIBC_PREREQ(x, y) 0
23 #endif
25 #if SCUDO_FUCHSIA
26 // Fuchsia only has valloc
27 #define HAVE_VALLOC 1
28 #elif SCUDO_ANDROID
29 // Android only has pvalloc/valloc on 32 bit
30 #if !defined(__LP64__)
31 #define HAVE_PVALLOC 1
32 #define HAVE_VALLOC 1
33 #endif // !defined(__LP64__)
34 #else
35 // All others assumed to support both functions.
36 #define HAVE_PVALLOC 1
37 #define HAVE_VALLOC 1
38 #endif
40 extern "C" {
41 void malloc_enable(void);
42 void malloc_disable(void);
43 int malloc_iterate(uintptr_t base, size_t size,
44 void (*callback)(uintptr_t base, size_t size, void *arg),
45 void *arg);
46 void *valloc(size_t size);
47 void *pvalloc(size_t size);
49 #ifndef SCUDO_ENABLE_HOOKS_TESTS
50 #define SCUDO_ENABLE_HOOKS_TESTS 0
51 #endif
53 #if (SCUDO_ENABLE_HOOKS_TESTS == 1) && (SCUDO_ENABLE_HOOKS == 0)
54 #error "Hooks tests should have hooks enabled as well!"
55 #endif
57 struct AllocContext {
58 void *Ptr;
59 size_t Size;
61 struct DeallocContext {
62 void *Ptr;
64 static AllocContext AC;
65 static DeallocContext DC;
67 #if (SCUDO_ENABLE_HOOKS_TESTS == 1)
68 __attribute__((visibility("default"))) void __scudo_allocate_hook(void *Ptr,
69 size_t Size) {
70 AC.Ptr = Ptr;
71 AC.Size = Size;
73 __attribute__((visibility("default"))) void __scudo_deallocate_hook(void *Ptr) {
74 DC.Ptr = Ptr;
76 #endif // (SCUDO_ENABLE_HOOKS_TESTS == 1)
79 class ScudoWrappersCTest : public Test {
80 protected:
81 void SetUp() override {
82 if (SCUDO_ENABLE_HOOKS && !SCUDO_ENABLE_HOOKS_TESTS)
83 printf("Hooks are enabled but hooks tests are disabled.\n");
86 void invalidateAllocHookPtrAs(UNUSED void *Ptr) {
87 if (SCUDO_ENABLE_HOOKS_TESTS)
88 AC.Ptr = Ptr;
90 void verifyAllocHookPtr(UNUSED void *Ptr) {
91 if (SCUDO_ENABLE_HOOKS_TESTS)
92 EXPECT_EQ(Ptr, AC.Ptr);
94 void verifyAllocHookSize(UNUSED size_t Size) {
95 if (SCUDO_ENABLE_HOOKS_TESTS)
96 EXPECT_EQ(Size, AC.Size);
98 void verifyDeallocHookPtr(UNUSED void *Ptr) {
99 if (SCUDO_ENABLE_HOOKS_TESTS)
100 EXPECT_EQ(Ptr, DC.Ptr);
103 using ScudoWrappersCDeathTest = ScudoWrappersCTest;
105 // Note that every C allocation function in the test binary will be fulfilled
106 // by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
107 // But this might also lead to unexpected side-effects, since the allocation and
108 // deallocation operations in the TEST functions will coexist with others (see
109 // the EXPECT_DEATH comment below).
111 // We have to use a small quarantine to make sure that our double-free tests
112 // trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just
113 // freed (this depends on the size obviously) and the following free succeeds.
115 static const size_t Size = 100U;
117 TEST_F(ScudoWrappersCDeathTest, Malloc) {
118 void *P = malloc(Size);
119 EXPECT_NE(P, nullptr);
120 EXPECT_LE(Size, malloc_usable_size(P));
121 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % FIRST_32_SECOND_64(8U, 16U), 0U);
122 verifyAllocHookPtr(P);
123 verifyAllocHookSize(Size);
125 // An update to this warning in Clang now triggers in this line, but it's ok
126 // because the check is expecting a bad pointer and should fail.
127 #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
128 #pragma GCC diagnostic push
129 #pragma GCC diagnostic ignored "-Wfree-nonheap-object"
130 #endif
131 EXPECT_DEATH(
132 free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(P) | 1U)), "");
133 #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
134 #pragma GCC diagnostic pop
135 #endif
137 free(P);
138 verifyDeallocHookPtr(P);
139 EXPECT_DEATH(free(P), "");
141 P = malloc(0U);
142 EXPECT_NE(P, nullptr);
143 free(P);
145 errno = 0;
146 EXPECT_EQ(malloc(SIZE_MAX), nullptr);
147 EXPECT_EQ(errno, ENOMEM);
150 TEST_F(ScudoWrappersCTest, Calloc) {
151 void *P = calloc(1U, Size);
152 EXPECT_NE(P, nullptr);
153 EXPECT_LE(Size, malloc_usable_size(P));
154 verifyAllocHookPtr(P);
155 verifyAllocHookSize(Size);
156 for (size_t I = 0; I < Size; I++)
157 EXPECT_EQ((reinterpret_cast<uint8_t *>(P))[I], 0U);
158 free(P);
159 verifyDeallocHookPtr(P);
161 P = calloc(1U, 0U);
162 EXPECT_NE(P, nullptr);
163 free(P);
164 P = calloc(0U, 1U);
165 EXPECT_NE(P, nullptr);
166 free(P);
168 errno = 0;
169 EXPECT_EQ(calloc(SIZE_MAX, 1U), nullptr);
170 EXPECT_EQ(errno, ENOMEM);
171 errno = 0;
172 EXPECT_EQ(calloc(static_cast<size_t>(LONG_MAX) + 1U, 2U), nullptr);
173 if (SCUDO_ANDROID)
174 EXPECT_EQ(errno, ENOMEM);
175 errno = 0;
176 EXPECT_EQ(calloc(SIZE_MAX, SIZE_MAX), nullptr);
177 EXPECT_EQ(errno, ENOMEM);
180 TEST_F(ScudoWrappersCTest, SmallAlign) {
181 // Allocating pointers by the powers of 2 from 1 to 0x10000
182 // Using powers of 2 due to memalign using powers of 2 and test more sizes
183 constexpr size_t MaxSize = 0x10000;
184 std::vector<void *> ptrs;
185 // Reserving space to prevent further allocation during the test
186 ptrs.reserve((scudo::getLeastSignificantSetBitIndex(MaxSize) + 1) *
187 (scudo::getLeastSignificantSetBitIndex(MaxSize) + 1) * 3);
188 for (size_t Size = 1; Size <= MaxSize; Size <<= 1) {
189 for (size_t Align = 1; Align <= MaxSize; Align <<= 1) {
190 for (size_t Count = 0; Count < 3; ++Count) {
191 void *P = memalign(Align, Size);
192 EXPECT_TRUE(reinterpret_cast<uintptr_t>(P) % Align == 0);
193 ptrs.push_back(P);
197 for (void *ptr : ptrs)
198 free(ptr);
201 TEST_F(ScudoWrappersCTest, Memalign) {
202 void *P;
203 for (size_t I = FIRST_32_SECOND_64(2U, 3U); I <= 18U; I++) {
204 const size_t Alignment = 1U << I;
206 P = memalign(Alignment, Size);
207 EXPECT_NE(P, nullptr);
208 EXPECT_LE(Size, malloc_usable_size(P));
209 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
210 verifyAllocHookPtr(P);
211 verifyAllocHookSize(Size);
212 free(P);
213 verifyDeallocHookPtr(P);
215 P = nullptr;
216 EXPECT_EQ(posix_memalign(&P, Alignment, Size), 0);
217 EXPECT_NE(P, nullptr);
218 EXPECT_LE(Size, malloc_usable_size(P));
219 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
220 verifyAllocHookPtr(P);
221 verifyAllocHookSize(Size);
222 free(P);
223 verifyDeallocHookPtr(P);
226 EXPECT_EQ(memalign(4096U, SIZE_MAX), nullptr);
227 EXPECT_EQ(posix_memalign(&P, 15U, Size), EINVAL);
228 EXPECT_EQ(posix_memalign(&P, 4096U, SIZE_MAX), ENOMEM);
230 // Android's memalign accepts non power-of-2 alignments, and 0.
231 if (SCUDO_ANDROID) {
232 for (size_t Alignment = 0U; Alignment <= 128U; Alignment++) {
233 P = memalign(Alignment, 1024U);
234 EXPECT_NE(P, nullptr);
235 verifyAllocHookPtr(P);
236 verifyAllocHookSize(Size);
237 free(P);
238 verifyDeallocHookPtr(P);
243 TEST_F(ScudoWrappersCTest, AlignedAlloc) {
244 const size_t Alignment = 4096U;
245 void *P = aligned_alloc(Alignment, Alignment * 4U);
246 EXPECT_NE(P, nullptr);
247 EXPECT_LE(Alignment * 4U, malloc_usable_size(P));
248 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
249 verifyAllocHookPtr(P);
250 verifyAllocHookSize(Alignment * 4U);
251 free(P);
252 verifyDeallocHookPtr(P);
254 errno = 0;
255 P = aligned_alloc(Alignment, Size);
256 EXPECT_EQ(P, nullptr);
257 EXPECT_EQ(errno, EINVAL);
260 TEST_F(ScudoWrappersCDeathTest, Realloc) {
261 // realloc(nullptr, N) is malloc(N)
262 void *P = realloc(nullptr, Size);
263 EXPECT_NE(P, nullptr);
264 verifyAllocHookPtr(P);
265 verifyAllocHookSize(Size);
266 free(P);
267 verifyDeallocHookPtr(P);
269 P = malloc(Size);
270 EXPECT_NE(P, nullptr);
271 // realloc(P, 0U) is free(P) and returns nullptr
272 EXPECT_EQ(realloc(P, 0U), nullptr);
273 verifyDeallocHookPtr(P);
275 P = malloc(Size);
276 EXPECT_NE(P, nullptr);
277 EXPECT_LE(Size, malloc_usable_size(P));
278 memset(P, 0x42, Size);
280 invalidateAllocHookPtrAs(reinterpret_cast<void *>(0xdeadbeef));
281 void *OldP = P;
282 P = realloc(P, Size * 2U);
283 EXPECT_NE(P, nullptr);
284 EXPECT_LE(Size * 2U, malloc_usable_size(P));
285 for (size_t I = 0; I < Size; I++)
286 EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
287 if (OldP == P) {
288 verifyAllocHookPtr(reinterpret_cast<void *>(0xdeadbeef));
289 } else {
290 verifyAllocHookPtr(P);
291 verifyAllocHookSize(Size * 2U);
292 verifyDeallocHookPtr(OldP);
295 invalidateAllocHookPtrAs(reinterpret_cast<void *>(0xdeadbeef));
296 OldP = P;
297 P = realloc(P, Size / 2U);
298 EXPECT_NE(P, nullptr);
299 EXPECT_LE(Size / 2U, malloc_usable_size(P));
300 for (size_t I = 0; I < Size / 2U; I++)
301 EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
302 if (OldP == P) {
303 verifyAllocHookPtr(reinterpret_cast<void *>(0xdeadbeef));
304 } else {
305 verifyAllocHookPtr(P);
306 verifyAllocHookSize(Size / 2U);
308 free(P);
310 EXPECT_DEATH(P = realloc(P, Size), "");
312 errno = 0;
313 EXPECT_EQ(realloc(nullptr, SIZE_MAX), nullptr);
314 EXPECT_EQ(errno, ENOMEM);
315 P = malloc(Size);
316 EXPECT_NE(P, nullptr);
317 errno = 0;
318 EXPECT_EQ(realloc(P, SIZE_MAX), nullptr);
319 EXPECT_EQ(errno, ENOMEM);
320 free(P);
322 // Android allows realloc of memalign pointers.
323 if (SCUDO_ANDROID) {
324 const size_t Alignment = 1024U;
325 P = memalign(Alignment, Size);
326 EXPECT_NE(P, nullptr);
327 EXPECT_LE(Size, malloc_usable_size(P));
328 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
329 memset(P, 0x42, Size);
331 P = realloc(P, Size * 2U);
332 EXPECT_NE(P, nullptr);
333 EXPECT_LE(Size * 2U, malloc_usable_size(P));
334 for (size_t I = 0; I < Size; I++)
335 EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
336 free(P);
340 #if !SCUDO_FUCHSIA
341 TEST_F(ScudoWrappersCTest, MallOpt) {
342 errno = 0;
343 EXPECT_EQ(mallopt(-1000, 1), 0);
344 // mallopt doesn't set errno.
345 EXPECT_EQ(errno, 0);
347 EXPECT_EQ(mallopt(M_PURGE, 0), 1);
349 EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
350 EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
351 EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
352 EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
354 if (SCUDO_ANDROID) {
355 EXPECT_EQ(mallopt(M_CACHE_COUNT_MAX, 100), 1);
356 EXPECT_EQ(mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2), 1);
357 EXPECT_EQ(mallopt(M_TSDS_COUNT_MAX, 10), 1);
360 #endif
362 TEST_F(ScudoWrappersCTest, OtherAlloc) {
363 #if HAVE_PVALLOC
364 const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
366 void *P = pvalloc(Size);
367 EXPECT_NE(P, nullptr);
368 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
369 EXPECT_LE(PageSize, malloc_usable_size(P));
370 verifyAllocHookPtr(P);
371 // Size will be rounded up to PageSize.
372 verifyAllocHookSize(PageSize);
373 free(P);
374 verifyDeallocHookPtr(P);
376 EXPECT_EQ(pvalloc(SIZE_MAX), nullptr);
378 P = pvalloc(Size);
379 EXPECT_NE(P, nullptr);
380 EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
381 free(P);
382 #endif
384 #if HAVE_VALLOC
385 EXPECT_EQ(valloc(SIZE_MAX), nullptr);
386 #endif
389 template<typename FieldType>
390 void MallInfoTest() {
391 // mallinfo is deprecated.
392 #pragma clang diagnostic push
393 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
394 const FieldType BypassQuarantineSize = 1024U;
395 struct mallinfo MI = mallinfo();
396 FieldType Allocated = MI.uordblks;
397 void *P = malloc(BypassQuarantineSize);
398 EXPECT_NE(P, nullptr);
399 MI = mallinfo();
400 EXPECT_GE(MI.uordblks, Allocated + BypassQuarantineSize);
401 EXPECT_GT(MI.hblkhd, static_cast<FieldType>(0));
402 FieldType Free = MI.fordblks;
403 free(P);
404 MI = mallinfo();
405 EXPECT_GE(MI.fordblks, Free + BypassQuarantineSize);
406 #pragma clang diagnostic pop
409 #if !SCUDO_FUCHSIA
410 TEST_F(ScudoWrappersCTest, MallInfo) {
411 #if SCUDO_ANDROID
412 // Android accidentally set the fields to size_t instead of int.
413 MallInfoTest<size_t>();
414 #else
415 MallInfoTest<int>();
416 #endif
418 #endif
420 #if __GLIBC_PREREQ(2, 33) || SCUDO_ANDROID
421 TEST_F(ScudoWrappersCTest, MallInfo2) {
422 const size_t BypassQuarantineSize = 1024U;
423 struct mallinfo2 MI = mallinfo2();
424 size_t Allocated = MI.uordblks;
425 void *P = malloc(BypassQuarantineSize);
426 EXPECT_NE(P, nullptr);
427 MI = mallinfo2();
428 EXPECT_GE(MI.uordblks, Allocated + BypassQuarantineSize);
429 EXPECT_GT(MI.hblkhd, 0U);
430 size_t Free = MI.fordblks;
431 free(P);
432 MI = mallinfo2();
433 EXPECT_GE(MI.fordblks, Free + BypassQuarantineSize);
435 #endif
437 static uintptr_t BoundaryP;
438 static size_t Count;
440 static void callback(uintptr_t Base, UNUSED size_t Size, UNUSED void *Arg) {
441 if (scudo::archSupportsMemoryTagging()) {
442 Base = scudo::untagPointer(Base);
443 BoundaryP = scudo::untagPointer(BoundaryP);
445 if (Base == BoundaryP)
446 Count++;
449 // Verify that a block located on an iteration boundary is not mis-accounted.
450 // To achieve this, we allocate a chunk for which the backing block will be
451 // aligned on a page, then run the malloc_iterate on both the pages that the
452 // block is a boundary for. It must only be seen once by the callback function.
453 TEST_F(ScudoWrappersCTest, MallocIterateBoundary) {
454 const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
455 #if SCUDO_ANDROID
456 // Android uses a 16 byte alignment for both 32 bit and 64 bit.
457 const size_t BlockDelta = 16U;
458 #else
459 const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
460 #endif
461 const size_t SpecialSize = PageSize - BlockDelta;
463 // We aren't guaranteed that any size class is exactly a page wide. So we need
464 // to keep making allocations until we get an allocation that starts exactly
465 // on a page boundary. The BlockDelta value is expected to be the number of
466 // bytes to subtract from a returned pointer to get to the actual start of
467 // the pointer in the size class. In practice, this means BlockDelta should
468 // be set to the minimum alignment in bytes for the allocation.
470 // With a 16-byte block alignment and 4096-byte page size, each allocation has
471 // a probability of (1 - (16/4096)) of failing to meet the alignment
472 // requirements, and the probability of failing 65536 times is
473 // (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
474 // 65536 tries, give up.
475 uintptr_t Block;
476 void *P = nullptr;
477 for (unsigned I = 0; I != 65536; ++I) {
478 void *PrevP = P;
479 P = malloc(SpecialSize);
480 EXPECT_NE(P, nullptr);
481 *reinterpret_cast<void **>(P) = PrevP;
482 BoundaryP = reinterpret_cast<uintptr_t>(P);
483 Block = BoundaryP - BlockDelta;
484 if ((Block & (PageSize - 1)) == 0U)
485 break;
487 EXPECT_EQ((Block & (PageSize - 1)), 0U);
489 Count = 0U;
490 malloc_disable();
491 malloc_iterate(Block - PageSize, PageSize, callback, nullptr);
492 malloc_iterate(Block, PageSize, callback, nullptr);
493 malloc_enable();
494 EXPECT_EQ(Count, 1U);
496 while (P) {
497 void *NextP = *reinterpret_cast<void **>(P);
498 free(P);
499 P = NextP;
503 // Fuchsia doesn't have alarm, fork or malloc_info.
504 #if !SCUDO_FUCHSIA
505 TEST_F(ScudoWrappersCDeathTest, MallocDisableDeadlock) {
506 // We expect heap operations within a disable/enable scope to deadlock.
507 EXPECT_DEATH(
509 void *P = malloc(Size);
510 EXPECT_NE(P, nullptr);
511 free(P);
512 malloc_disable();
513 alarm(1);
514 P = malloc(Size);
515 malloc_enable();
517 "");
520 TEST_F(ScudoWrappersCTest, MallocInfo) {
521 // Use volatile so that the allocations don't get optimized away.
522 void *volatile P1 = malloc(1234);
523 void *volatile P2 = malloc(4321);
525 char Buffer[16384];
526 FILE *F = fmemopen(Buffer, sizeof(Buffer), "w+");
527 EXPECT_NE(F, nullptr);
528 errno = 0;
529 EXPECT_EQ(malloc_info(0, F), 0);
530 EXPECT_EQ(errno, 0);
531 fclose(F);
532 EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0);
533 EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\""));
534 EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\""));
536 free(P1);
537 free(P2);
540 TEST_F(ScudoWrappersCDeathTest, Fork) {
541 void *P;
542 pid_t Pid = fork();
543 EXPECT_GE(Pid, 0) << strerror(errno);
544 if (Pid == 0) {
545 P = malloc(Size);
546 EXPECT_NE(P, nullptr);
547 memset(P, 0x42, Size);
548 free(P);
549 _exit(0);
551 waitpid(Pid, nullptr, 0);
552 P = malloc(Size);
553 EXPECT_NE(P, nullptr);
554 memset(P, 0x42, Size);
555 free(P);
557 // fork should stall if the allocator has been disabled.
558 EXPECT_DEATH(
560 malloc_disable();
561 alarm(1);
562 Pid = fork();
563 EXPECT_GE(Pid, 0);
565 "");
568 static pthread_mutex_t Mutex;
569 static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER;
570 static bool Ready;
572 static void *enableMalloc(UNUSED void *Unused) {
573 // Initialize the allocator for this thread.
574 void *P = malloc(Size);
575 EXPECT_NE(P, nullptr);
576 memset(P, 0x42, Size);
577 free(P);
579 // Signal the main thread we are ready.
580 pthread_mutex_lock(&Mutex);
581 Ready = true;
582 pthread_cond_signal(&Conditional);
583 pthread_mutex_unlock(&Mutex);
585 // Wait for the malloc_disable & fork, then enable the allocator again.
586 sleep(1);
587 malloc_enable();
589 return nullptr;
592 TEST_F(ScudoWrappersCTest, DisableForkEnable) {
593 pthread_t ThreadId;
594 Ready = false;
595 EXPECT_EQ(pthread_create(&ThreadId, nullptr, &enableMalloc, nullptr), 0);
597 // Wait for the thread to be warmed up.
598 pthread_mutex_lock(&Mutex);
599 while (!Ready)
600 pthread_cond_wait(&Conditional, &Mutex);
601 pthread_mutex_unlock(&Mutex);
603 // Disable the allocator and fork. fork should succeed after malloc_enable.
604 malloc_disable();
605 pid_t Pid = fork();
606 EXPECT_GE(Pid, 0);
607 if (Pid == 0) {
608 void *P = malloc(Size);
609 EXPECT_NE(P, nullptr);
610 memset(P, 0x42, Size);
611 free(P);
612 _exit(0);
614 waitpid(Pid, nullptr, 0);
615 EXPECT_EQ(pthread_join(ThreadId, 0), 0);
618 #endif // SCUDO_FUCHSIA