1 //===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
11 /// FreeBSD-specific code.
13 //===----------------------------------------------------------------------===//
15 #include "sanitizer_common/sanitizer_platform.h"
16 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
26 # include <sys/prctl.h>
27 # include <sys/resource.h>
28 # include <sys/time.h>
33 # include "hwasan_dynamic_shadow.h"
34 # include "hwasan_interface_internal.h"
35 # include "hwasan_mapping.h"
36 # include "hwasan_report.h"
37 # include "hwasan_thread.h"
38 # include "hwasan_thread_list.h"
39 # include "sanitizer_common/sanitizer_common.h"
40 # include "sanitizer_common/sanitizer_procmaps.h"
41 # include "sanitizer_common/sanitizer_stackdepot.h"
43 // Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
45 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
46 // Not currently tested.
47 // HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
48 // Integration tests downstream exist.
49 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
50 // Tested with check-hwasan on x86_64-linux.
51 // HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
52 // Tested with check-hwasan on aarch64-linux-android.
53 # if !SANITIZER_ANDROID
54 SANITIZER_INTERFACE_ATTRIBUTE
55 THREADLOCAL uptr __hwasan_tls
;
60 // With the zero shadow base we can not actually map pages starting from 0.
61 // This constant is somewhat arbitrary.
62 constexpr uptr kZeroBaseShadowStart
= 0;
63 constexpr uptr kZeroBaseMaxShadowStart
= 1 << 18;
65 static void ProtectGap(uptr addr
, uptr size
) {
66 __sanitizer::ProtectGap(addr
, size
, kZeroBaseShadowStart
,
67 kZeroBaseMaxShadowStart
);
75 static void PrintRange(uptr start
, uptr end
, const char *name
) {
76 Printf("|| [%p, %p] || %.*s ||\n", (void *)start
, (void *)end
, 10, name
);
79 static void PrintAddressSpaceLayout() {
80 PrintRange(kHighMemStart
, kHighMemEnd
, "HighMem");
81 if (kHighShadowEnd
+ 1 < kHighMemStart
)
82 PrintRange(kHighShadowEnd
+ 1, kHighMemStart
- 1, "ShadowGap");
84 CHECK_EQ(kHighShadowEnd
+ 1, kHighMemStart
);
85 PrintRange(kHighShadowStart
, kHighShadowEnd
, "HighShadow");
86 if (kLowShadowEnd
+ 1 < kHighShadowStart
)
87 PrintRange(kLowShadowEnd
+ 1, kHighShadowStart
- 1, "ShadowGap");
89 CHECK_EQ(kLowMemEnd
+ 1, kHighShadowStart
);
90 PrintRange(kLowShadowStart
, kLowShadowEnd
, "LowShadow");
91 if (kLowMemEnd
+ 1 < kLowShadowStart
)
92 PrintRange(kLowMemEnd
+ 1, kLowShadowStart
- 1, "ShadowGap");
94 CHECK_EQ(kLowMemEnd
+ 1, kLowShadowStart
);
95 PrintRange(kLowMemStart
, kLowMemEnd
, "LowMem");
96 CHECK_EQ(0, kLowMemStart
);
99 static uptr
GetHighMemEnd() {
100 // HighMem covers the upper part of the address space.
101 uptr max_address
= GetMaxUserVirtualAddress();
102 // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
104 max_address
|= (GetMmapGranularity() << kShadowScale
) - 1;
108 static void InitializeShadowBaseAddress(uptr shadow_size_bytes
) {
109 // FIXME: Android should init flags before shadow.
110 if (!SANITIZER_ANDROID
&& flags()->fixed_shadow_base
!= (uptr
)-1) {
111 __hwasan_shadow_memory_dynamic_address
= flags()->fixed_shadow_base
;
112 uptr beg
= __hwasan_shadow_memory_dynamic_address
;
113 uptr end
= beg
+ shadow_size_bytes
;
114 if (!MemoryRangeIsAvailable(beg
, end
)) {
116 "FATAL: HWAddressSanitizer: Shadow range %p-%p is not available.\n",
117 (void *)beg
, (void *)end
);
119 CHECK(MemoryRangeIsAvailable(beg
, end
));
122 __hwasan_shadow_memory_dynamic_address
=
123 FindDynamicShadowStart(shadow_size_bytes
);
127 static void MaybeDieIfNoTaggingAbi(const char *message
) {
128 if (!flags()->fail_without_syscall_abi
)
130 Printf("FATAL: %s\n", message
);
134 # define PR_SET_TAGGED_ADDR_CTRL 55
135 # define PR_GET_TAGGED_ADDR_CTRL 56
136 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
137 # define ARCH_GET_UNTAG_MASK 0x4001
138 # define ARCH_ENABLE_TAGGED_ADDR 0x4002
139 # define ARCH_GET_MAX_TAG_BITS 0x4003
141 static bool CanUseTaggingAbi() {
142 # if defined(__x86_64__)
143 unsigned long num_bits
= 0;
144 // Check for x86 LAM support. This API is based on a currently unsubmitted
145 // patch to the Linux kernel (as of August 2022) and is thus subject to
146 // change. The patch is here:
147 // https://lore.kernel.org/all/20220815041803.17954-1-kirill.shutemov@linux.intel.com/
149 // arch_prctl(ARCH_GET_MAX_TAG_BITS, &bits) returns the maximum number of tag
150 // bits the user can request, or zero if LAM is not supported by the hardware.
151 if (internal_iserror(internal_arch_prctl(ARCH_GET_MAX_TAG_BITS
,
152 reinterpret_cast<uptr
>(&num_bits
))))
154 // The platform must provide enough bits for HWASan tags.
155 if (num_bits
< kTagBits
)
159 // Check for ARM TBI support.
160 return !internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL
, 0, 0, 0, 0));
161 # endif // __x86_64__
164 static bool EnableTaggingAbi() {
165 # if defined(__x86_64__)
166 // Enable x86 LAM tagging for the process.
168 // arch_prctl(ARCH_ENABLE_TAGGED_ADDR, bits) enables tagging if the number of
169 // tag bits requested by the user does not exceed that provided by the system.
170 // arch_prctl(ARCH_GET_UNTAG_MASK, &mask) returns the mask of significant
171 // address bits. It is ~0ULL if either LAM is disabled for the process or LAM
172 // is not supported by the hardware.
173 if (internal_iserror(internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR
, kTagBits
)))
175 unsigned long mask
= 0;
176 // Make sure the tag bits are where we expect them to be.
177 if (internal_iserror(internal_arch_prctl(ARCH_GET_UNTAG_MASK
,
178 reinterpret_cast<uptr
>(&mask
))))
180 // @mask has ones for non-tag bits, whereas @kAddressTagMask has ones for tag
181 // bits. Therefore these masks must not overlap.
182 if (mask
& kAddressTagMask
)
186 // Enable ARM TBI tagging for the process. If for some reason tagging is not
187 // supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns
189 if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL
,
190 PR_TAGGED_ADDR_ENABLE
, 0, 0, 0)))
192 // Ensure that TBI is enabled.
193 if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL
, 0, 0, 0, 0) !=
194 PR_TAGGED_ADDR_ENABLE
)
197 # endif // __x86_64__
200 void InitializeOsSupport() {
201 // Check we're running on a kernel that can use the tagged address ABI.
202 bool has_abi
= CanUseTaggingAbi();
205 # if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
206 // Some older Android kernels have the tagged pointer ABI on
207 // unconditionally, and hence don't have the tagged-addr prctl while still
209 // If targeting Android and the prctl is not around we assume this is the
213 MaybeDieIfNoTaggingAbi(
214 "HWAddressSanitizer requires a kernel with tagged address ABI.");
218 if (EnableTaggingAbi())
221 # if SANITIZER_ANDROID
222 MaybeDieIfNoTaggingAbi(
223 "HWAddressSanitizer failed to enable tagged address syscall ABI.\n"
224 "Check the `sysctl abi.tagged_addr_disabled` configuration.");
226 MaybeDieIfNoTaggingAbi(
227 "HWAddressSanitizer failed to enable tagged address syscall ABI.\n");
232 // Define the entire memory range.
233 kHighMemEnd
= GetHighMemEnd();
235 // Determine shadow memory base offset.
236 InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd
));
238 // Place the low memory first.
239 kLowMemEnd
= __hwasan_shadow_memory_dynamic_address
- 1;
242 // Define the low shadow based on the already placed low memory.
243 kLowShadowEnd
= MemToShadow(kLowMemEnd
);
244 kLowShadowStart
= __hwasan_shadow_memory_dynamic_address
;
246 // High shadow takes whatever memory is left up there (making sure it is not
247 // interfering with low memory in the fixed case).
248 kHighShadowEnd
= MemToShadow(kHighMemEnd
);
249 kHighShadowStart
= Max(kLowMemEnd
, MemToShadow(kHighShadowEnd
)) + 1;
251 // High memory starts where allocated shadow allows.
252 kHighMemStart
= ShadowToMem(kHighShadowStart
);
254 // Check the sanity of the defined memory ranges (there might be gaps).
255 CHECK_EQ(kHighMemStart
% GetMmapGranularity(), 0);
256 CHECK_GT(kHighMemStart
, kHighShadowEnd
);
257 CHECK_GT(kHighShadowEnd
, kHighShadowStart
);
258 CHECK_GT(kHighShadowStart
, kLowMemEnd
);
259 CHECK_GT(kLowMemEnd
, kLowMemStart
);
260 CHECK_GT(kLowShadowEnd
, kLowShadowStart
);
261 CHECK_GT(kLowShadowStart
, kLowMemEnd
);
263 // Reserve shadow memory.
264 ReserveShadowMemoryRange(kLowShadowStart
, kLowShadowEnd
, "low shadow");
265 ReserveShadowMemoryRange(kHighShadowStart
, kHighShadowEnd
, "high shadow");
267 // Protect all the gaps.
268 ProtectGap(0, Min(kLowMemStart
, kLowShadowStart
));
269 if (kLowMemEnd
+ 1 < kLowShadowStart
)
270 ProtectGap(kLowMemEnd
+ 1, kLowShadowStart
- kLowMemEnd
- 1);
271 if (kLowShadowEnd
+ 1 < kHighShadowStart
)
272 ProtectGap(kLowShadowEnd
+ 1, kHighShadowStart
- kLowShadowEnd
- 1);
273 if (kHighShadowEnd
+ 1 < kHighMemStart
)
274 ProtectGap(kHighShadowEnd
+ 1, kHighMemStart
- kHighShadowEnd
- 1);
277 PrintAddressSpaceLayout();
283 CHECK(__hwasan_shadow_memory_dynamic_address
);
284 uptr guard_page_size
= GetMmapGranularity();
285 uptr thread_space_start
=
286 __hwasan_shadow_memory_dynamic_address
- (1ULL << kShadowBaseAlignment
);
287 uptr thread_space_end
=
288 __hwasan_shadow_memory_dynamic_address
- guard_page_size
;
289 ReserveShadowMemoryRange(thread_space_start
, thread_space_end
- 1,
290 "hwasan threads", /*madvise_shadow*/ false);
291 ProtectGap(thread_space_end
,
292 __hwasan_shadow_memory_dynamic_address
- thread_space_end
);
293 InitThreadList(thread_space_start
, thread_space_end
- thread_space_start
);
294 hwasanThreadList().CreateCurrentThread();
297 bool MemIsApp(uptr p
) {
298 // Memory outside the alias range has non-zero tags.
299 # if !defined(HWASAN_ALIASING_MODE)
300 CHECK_EQ(GetTagFromPointer(p
), 0);
303 return (p
>= kHighMemStart
&& p
<= kHighMemEnd
) ||
304 (p
>= kLowMemStart
&& p
<= kLowMemEnd
);
307 void InstallAtExitHandler() { atexit(HwasanAtExit
); }
309 // ---------------------- TSD ---------------- {{{1
311 # if HWASAN_WITH_INTERCEPTORS
312 static pthread_key_t tsd_key
;
313 static bool tsd_key_inited
= false;
315 void HwasanTSDThreadInit() {
317 CHECK_EQ(0, pthread_setspecific(tsd_key
,
318 (void *)GetPthreadDestructorIterations()));
321 void HwasanTSDDtor(void *tsd
) {
322 uptr iterations
= (uptr
)tsd
;
323 if (iterations
> 1) {
324 CHECK_EQ(0, pthread_setspecific(tsd_key
, (void *)(iterations
- 1)));
327 __hwasan_thread_exit();
330 void HwasanTSDInit() {
331 CHECK(!tsd_key_inited
);
332 tsd_key_inited
= true;
333 CHECK_EQ(0, pthread_key_create(&tsd_key
, HwasanTSDDtor
));
336 void HwasanTSDInit() {}
337 void HwasanTSDThreadInit() {}
340 # if SANITIZER_ANDROID
341 uptr
*GetCurrentThreadLongPtr() { return (uptr
*)get_android_tls_ptr(); }
343 uptr
*GetCurrentThreadLongPtr() { return &__hwasan_tls
; }
346 # if SANITIZER_ANDROID
347 void AndroidTestTlsSlot() {
348 uptr kMagicValue
= 0x010203040A0B0C0D;
349 uptr
*tls_ptr
= GetCurrentThreadLongPtr();
350 uptr old_value
= *tls_ptr
;
351 *tls_ptr
= kMagicValue
;
353 if (*(uptr
*)get_android_tls_ptr() != kMagicValue
) {
355 "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
359 *tls_ptr
= old_value
;
362 void AndroidTestTlsSlot() {}
365 static AccessInfo
GetAccessInfo(siginfo_t
*info
, ucontext_t
*uc
) {
366 // Access type is passed in a platform dependent way (see below) and encoded
367 // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
368 // recoverable. Valid values of Y are 0 to 4, which are interpreted as
369 // log2(access_size), and 0xF, which means that access size is passed via
370 // platform dependent register (see below).
371 # if defined(__aarch64__)
372 // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
373 // access size is stored in X1 register. Access address is always in X0
375 uptr pc
= (uptr
)info
->si_addr
;
376 const unsigned code
= ((*(u32
*)pc
) >> 5) & 0xffff;
377 if ((code
& 0xff00) != 0x900)
378 return AccessInfo
{}; // Not ours.
380 const bool is_store
= code
& 0x10;
381 const bool recover
= code
& 0x20;
382 const uptr addr
= uc
->uc_mcontext
.regs
[0];
383 const unsigned size_log
= code
& 0xf;
384 if (size_log
> 4 && size_log
!= 0xf)
385 return AccessInfo
{}; // Not ours.
386 const uptr size
= size_log
== 0xf ? uc
->uc_mcontext
.regs
[1] : 1U << size_log
;
388 # elif defined(__x86_64__)
389 // Access type is encoded in the instruction following INT3 as
390 // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
391 // RSI register. Access address is always in RDI register.
392 uptr pc
= (uptr
)uc
->uc_mcontext
.gregs
[REG_RIP
];
393 uint8_t *nop
= (uint8_t *)pc
;
394 if (*nop
!= 0x0f || *(nop
+ 1) != 0x1f || *(nop
+ 2) != 0x40 ||
396 return AccessInfo
{}; // Not ours.
397 const unsigned code
= *(nop
+ 3);
399 const bool is_store
= code
& 0x10;
400 const bool recover
= code
& 0x20;
401 const uptr addr
= uc
->uc_mcontext
.gregs
[REG_RDI
];
402 const unsigned size_log
= code
& 0xf;
403 if (size_log
> 4 && size_log
!= 0xf)
404 return AccessInfo
{}; // Not ours.
406 size_log
== 0xf ? uc
->uc_mcontext
.gregs
[REG_RSI
] : 1U << size_log
;
408 # elif SANITIZER_RISCV64
409 // Access type is encoded in the instruction following EBREAK as
410 // ADDI x0, x0, [0x40 + 0xXY]. For Y == 0xF, access size is stored in
411 // X11 register. Access address is always in X10 register.
412 uptr pc
= (uptr
)uc
->uc_mcontext
.__gregs
[REG_PC
];
413 uint8_t byte1
= *((u8
*)(pc
+ 0));
414 uint8_t byte2
= *((u8
*)(pc
+ 1));
415 uint8_t byte3
= *((u8
*)(pc
+ 2));
416 uint8_t byte4
= *((u8
*)(pc
+ 3));
417 uint32_t ebreak
= (byte1
| (byte2
<< 8) | (byte3
<< 16) | (byte4
<< 24));
418 bool isFaultShort
= false;
419 bool isEbreak
= (ebreak
== 0x100073);
420 bool isShortEbreak
= false;
421 # if defined(__riscv_compressed)
422 isFaultShort
= ((ebreak
& 0x3) != 0x3);
423 isShortEbreak
= ((ebreak
& 0xffff) == 0x9002);
425 // faulted insn is not ebreak, not our case
426 if (!(isEbreak
|| isShortEbreak
))
428 // advance pc to point after ebreak and reconstruct addi instruction
429 pc
+= isFaultShort
? 2 : 4;
430 byte1
= *((u8
*)(pc
+ 0));
431 byte2
= *((u8
*)(pc
+ 1));
432 byte3
= *((u8
*)(pc
+ 2));
433 byte4
= *((u8
*)(pc
+ 3));
434 // reconstruct instruction
435 uint32_t instr
= (byte1
| (byte2
<< 8) | (byte3
<< 16) | (byte4
<< 24));
436 // check if this is really 32 bit instruction
437 // code is encoded in top 12 bits, since instruction is supposed to be with
439 const unsigned code
= (instr
>> 20) & 0xffff;
440 const uptr addr
= uc
->uc_mcontext
.__gregs
[10];
441 const bool is_store
= code
& 0x10;
442 const bool recover
= code
& 0x20;
443 const unsigned size_log
= code
& 0xf;
444 if (size_log
> 4 && size_log
!= 0xf)
445 return AccessInfo
{}; // Not our case
447 size_log
== 0xf ? uc
->uc_mcontext
.__gregs
[11] : 1U << size_log
;
450 # error Unsupported architecture
453 return AccessInfo
{addr
, size
, is_store
, !is_store
, recover
};
456 static bool HwasanOnSIGTRAP(int signo
, siginfo_t
*info
, ucontext_t
*uc
) {
457 AccessInfo ai
= GetAccessInfo(info
, uc
);
458 if (!ai
.is_store
&& !ai
.is_load
)
461 SignalContext sig
{info
, uc
};
462 HandleTagMismatch(ai
, StackTrace::GetNextInstructionPc(sig
.pc
), sig
.bp
, uc
);
464 # if defined(__aarch64__)
465 uc
->uc_mcontext
.pc
+= 4;
466 # elif defined(__x86_64__)
467 # elif SANITIZER_RISCV64
468 // pc points to EBREAK which is 2 bytes long
469 uint8_t *exception_source
= (uint8_t *)(uc
->uc_mcontext
.__gregs
[REG_PC
]);
470 uint8_t byte1
= (uint8_t)(*(exception_source
+ 0));
471 uint8_t byte2
= (uint8_t)(*(exception_source
+ 1));
472 uint8_t byte3
= (uint8_t)(*(exception_source
+ 2));
473 uint8_t byte4
= (uint8_t)(*(exception_source
+ 3));
474 uint32_t faulted
= (byte1
| (byte2
<< 8) | (byte3
<< 16) | (byte4
<< 24));
475 bool isFaultShort
= false;
476 # if defined(__riscv_compressed)
477 isFaultShort
= ((faulted
& 0x3) != 0x3);
479 uc
->uc_mcontext
.__gregs
[REG_PC
] += isFaultShort
? 2 : 4;
481 # error Unsupported architecture
486 static void OnStackUnwind(const SignalContext
&sig
, const void *,
487 BufferedStackTrace
*stack
) {
488 stack
->Unwind(StackTrace::GetNextInstructionPc(sig
.pc
), sig
.bp
, sig
.context
,
489 common_flags()->fast_unwind_on_fatal
);
492 void HwasanOnDeadlySignal(int signo
, void *info
, void *context
) {
493 // Probably a tag mismatch.
494 if (signo
== SIGTRAP
)
495 if (HwasanOnSIGTRAP(signo
, (siginfo_t
*)info
, (ucontext_t
*)context
))
498 HandleDeadlySignal(info
, context
, GetTid(), &OnStackUnwind
, nullptr);
501 void Thread::InitStackAndTls(const InitState
*) {
502 GetThreadStackAndTls(IsMainThread(), &stack_bottom_
, &stack_top_
, &tls_begin_
,
506 uptr
TagMemoryAligned(uptr p
, uptr size
, tag_t tag
) {
507 CHECK(IsAligned(p
, kShadowAlignment
));
508 CHECK(IsAligned(size
, kShadowAlignment
));
509 uptr shadow_start
= MemToShadow(p
);
510 uptr shadow_size
= MemToShadowSize(size
);
512 uptr page_size
= GetPageSizeCached();
513 uptr page_start
= RoundUpTo(shadow_start
, page_size
);
514 uptr page_end
= RoundDownTo(shadow_start
+ shadow_size
, page_size
);
515 uptr threshold
= common_flags()->clear_shadow_mmap_threshold
;
516 if (SANITIZER_LINUX
&&
517 UNLIKELY(page_end
>= page_start
+ threshold
&& tag
== 0)) {
518 internal_memset((void *)shadow_start
, tag
, page_start
- shadow_start
);
519 internal_memset((void *)page_end
, tag
,
520 shadow_start
+ shadow_size
- page_end
);
521 // For an anonymous private mapping MADV_DONTNEED will return a zero page on
523 ReleaseMemoryPagesToOSAndZeroFill(page_start
, page_end
);
525 internal_memset((void *)shadow_start
, tag
, shadow_size
);
527 return AddTagToPointer(p
, tag
);
530 static void BeforeFork() {
531 VReport(2, "BeforeFork tid: %llu\n", GetTid());
532 if (CAN_SANITIZE_LEAKS
) {
533 __lsan::LockGlobal();
535 // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and lock the
537 __lsan::LockThreads();
538 __lsan::LockAllocator();
539 StackDepotLockBeforeFork();
542 static void AfterFork(bool fork_child
) {
543 StackDepotUnlockAfterFork(fork_child
);
544 // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock
545 // the stuff we need.
546 __lsan::UnlockAllocator();
547 __lsan::UnlockThreads();
548 if (CAN_SANITIZE_LEAKS
) {
549 __lsan::UnlockGlobal();
551 VReport(2, "AfterFork tid: %llu\n", GetTid());
554 void HwasanInstallAtForkHandler() {
556 &BeforeFork
, []() { AfterFork(/* fork_child= */ false); },
557 []() { AfterFork(/* fork_child= */ true); });
560 void InstallAtExitCheckLeaks() {
561 if (CAN_SANITIZE_LEAKS
) {
562 if (common_flags()->detect_leaks
&& common_flags()->leak_check_at_exit
) {
563 if (flags()->halt_on_error
)
564 Atexit(__lsan::DoLeakCheck
);
566 Atexit(__lsan::DoRecoverableLeakCheckVoid
);
571 } // namespace __hwasan
573 using namespace __hwasan
;
575 extern "C" void __hwasan_thread_enter() {
576 hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
579 extern "C" void __hwasan_thread_exit() {
580 Thread
*t
= GetCurrentThread();
581 // Make sure that signal handler can not see a stale current thread pointer.
582 atomic_signal_fence(memory_order_seq_cst
);
584 // Block async signals on the thread as the handler can be instrumented.
585 // After this point instrumented code can't access essential data from TLS
587 // Bionic already calls __hwasan_thread_exit with blocked signals.
590 hwasanThreadList().ReleaseThread(t
);
594 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD