[Reland][Runtimes] Merge 'compile_commands.json' files from runtimes build (#116303)
[llvm-project.git] / compiler-rt / lib / msan / msan_linux.cpp
blob7140de7e9c54329190d8a392fce8e498c9b89278
1 //===-- msan_linux.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of MemorySanitizer.
11 // Linux-, NetBSD- and FreeBSD-specific code.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_platform.h"
15 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
17 # include <elf.h>
18 # include <link.h>
19 # include <pthread.h>
20 # include <signal.h>
21 # include <stdio.h>
22 # include <stdlib.h>
23 # if SANITIZER_LINUX
24 # include <sys/personality.h>
25 # endif
26 # include <sys/resource.h>
27 # include <sys/time.h>
28 # include <unistd.h>
29 # include <unwind.h>
31 # include "msan.h"
32 # include "msan_allocator.h"
33 # include "msan_chained_origin_depot.h"
34 # include "msan_report.h"
35 # include "msan_thread.h"
36 # include "sanitizer_common/sanitizer_common.h"
37 # include "sanitizer_common/sanitizer_procmaps.h"
38 # include "sanitizer_common/sanitizer_stackdepot.h"
40 namespace __msan {
42 void ReportMapRange(const char *descr, uptr beg, uptr size) {
43 if (size > 0) {
44 uptr end = beg + size - 1;
45 VPrintf(1, "%s : %p-%p\n", descr, (void *)beg, (void *)end);
49 static bool CheckMemoryRangeAvailability(uptr beg, uptr size, bool verbose) {
50 if (size > 0) {
51 uptr end = beg + size - 1;
52 if (!MemoryRangeIsAvailable(beg, end)) {
53 if (verbose)
54 Printf("FATAL: MemorySanitizer: Shadow range %p-%p is not available.\n",
55 (void *)beg, (void *)end);
56 return false;
59 return true;
62 static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
63 if (size > 0) {
64 void *addr = MmapFixedNoAccess(beg, size, name);
65 if (beg == 0 && addr) {
66 // Depending on the kernel configuration, we may not be able to protect
67 // the page at address zero.
68 uptr gap = 16 * GetPageSizeCached();
69 beg += gap;
70 size -= gap;
71 addr = MmapFixedNoAccess(beg, size, name);
73 if ((uptr)addr != beg) {
74 uptr end = beg + size - 1;
75 Printf(
76 "FATAL: MemorySanitizer: Cannot protect memory range %p-%p (%s).\n",
77 (void *)beg, (void *)end, name);
78 return false;
81 return true;
84 static void CheckMemoryLayoutSanity() {
85 uptr prev_end = 0;
86 for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
87 uptr start = kMemoryLayout[i].start;
88 uptr end = kMemoryLayout[i].end;
89 MappingDesc::Type type = kMemoryLayout[i].type;
90 CHECK_LT(start, end);
91 CHECK_EQ(prev_end, start);
92 CHECK(addr_is_type(start, type));
93 CHECK(addr_is_type((start + end) / 2, type));
94 CHECK(addr_is_type(end - 1, type));
95 if (type == MappingDesc::APP || type == MappingDesc::ALLOCATOR) {
96 uptr addr = start;
97 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
98 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
99 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
101 addr = (start + end) / 2;
102 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
103 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
104 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
106 addr = end - 1;
107 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
108 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
109 CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
111 prev_end = end;
115 static bool InitShadow(bool init_origins, bool dry_run) {
116 // Let user know mapping parameters first.
117 VPrintf(1, "__msan_init %p\n", reinterpret_cast<void *>(&__msan_init));
118 for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
119 VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
120 kMemoryLayout[i].end - 1);
122 CheckMemoryLayoutSanity();
124 if (!MEM_IS_APP(&__msan_init)) {
125 if (!dry_run)
126 Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
127 reinterpret_cast<void *>(&__msan_init));
128 return false;
131 const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
133 for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
134 uptr start = kMemoryLayout[i].start;
135 uptr end = kMemoryLayout[i].end;
136 uptr size = end - start;
137 MappingDesc::Type type = kMemoryLayout[i].type;
139 // Check if the segment should be mapped based on platform constraints.
140 if (start >= maxVirtualAddress)
141 continue;
143 bool map = type == MappingDesc::SHADOW ||
144 (init_origins && type == MappingDesc::ORIGIN);
145 bool protect = type == MappingDesc::INVALID ||
146 (!init_origins && type == MappingDesc::ORIGIN);
147 CHECK(!(map && protect));
148 if (!map && !protect) {
149 CHECK(type == MappingDesc::APP || type == MappingDesc::ALLOCATOR);
151 if (dry_run && type == MappingDesc::ALLOCATOR &&
152 !CheckMemoryRangeAvailability(start, size, !dry_run))
153 return false;
155 if (map) {
156 if (dry_run && !CheckMemoryRangeAvailability(start, size, !dry_run))
157 return false;
158 if (!dry_run &&
159 !MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name))
160 return false;
161 if (!dry_run && common_flags()->use_madv_dontdump)
162 DontDumpShadowMemory(start, size);
164 if (protect) {
165 if (dry_run && !CheckMemoryRangeAvailability(start, size, !dry_run))
166 return false;
167 if (!dry_run && !ProtectMemoryRange(start, size, kMemoryLayout[i].name))
168 return false;
172 return true;
175 bool InitShadowWithReExec(bool init_origins) {
176 // Start with dry run: check layout is ok, but don't print warnings because
177 // warning messages will cause tests to fail (even if we successfully re-exec
178 // after the warning).
179 bool success = InitShadow(init_origins, true);
180 if (!success) {
181 # if SANITIZER_LINUX
182 // Perhaps ASLR entropy is too high. If ASLR is enabled, re-exec without it.
183 int old_personality = personality(0xffffffff);
184 bool aslr_on =
185 (old_personality != -1) && ((old_personality & ADDR_NO_RANDOMIZE) == 0);
187 if (aslr_on) {
188 VReport(1,
189 "WARNING: MemorySanitizer: memory layout is incompatible, "
190 "possibly due to high-entropy ASLR.\n"
191 "Re-execing with fixed virtual address space.\n"
192 "N.B. reducing ASLR entropy is preferable.\n");
193 CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
194 ReExec();
196 # endif
199 // The earlier dry run didn't actually map or protect anything. Run again in
200 // non-dry run mode.
201 return success && InitShadow(init_origins, false);
204 static void MsanAtExit(void) {
205 if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
206 ReportStats();
207 if (msan_report_count > 0) {
208 ReportAtExitStatistics();
209 if (common_flags()->exitcode)
210 internal__exit(common_flags()->exitcode);
214 void InstallAtExitHandler() {
215 atexit(MsanAtExit);
218 // ---------------------- TSD ---------------- {{{1
220 #if SANITIZER_NETBSD
221 // Thread Static Data cannot be used in early init on NetBSD.
222 // Reuse the MSan TSD API for compatibility with existing code
223 // with an alternative implementation.
225 static void (*tsd_destructor)(void *tsd) = nullptr;
227 struct tsd_key {
228 tsd_key() : key(nullptr) {}
229 ~tsd_key() {
230 CHECK(tsd_destructor);
231 if (key)
232 (*tsd_destructor)(key);
234 MsanThread *key;
237 static thread_local struct tsd_key key;
239 void MsanTSDInit(void (*destructor)(void *tsd)) {
240 CHECK(!tsd_destructor);
241 tsd_destructor = destructor;
244 MsanThread *GetCurrentThread() {
245 CHECK(tsd_destructor);
246 return key.key;
249 void SetCurrentThread(MsanThread *tsd) {
250 CHECK(tsd_destructor);
251 CHECK(tsd);
252 CHECK(!key.key);
253 key.key = tsd;
256 void MsanTSDDtor(void *tsd) {
257 CHECK(tsd_destructor);
258 CHECK_EQ(key.key, tsd);
259 key.key = nullptr;
260 // Make sure that signal handler can not see a stale current thread pointer.
261 atomic_signal_fence(memory_order_seq_cst);
262 MsanThread::TSDDtor(tsd);
264 #else
265 static pthread_key_t tsd_key;
266 static bool tsd_key_inited = false;
268 void MsanTSDInit(void (*destructor)(void *tsd)) {
269 CHECK(!tsd_key_inited);
270 tsd_key_inited = true;
271 CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
274 static THREADLOCAL MsanThread* msan_current_thread;
276 MsanThread *GetCurrentThread() {
277 return msan_current_thread;
280 void SetCurrentThread(MsanThread *t) {
281 // Make sure we do not reset the current MsanThread.
282 CHECK_EQ(0, msan_current_thread);
283 msan_current_thread = t;
284 // Make sure that MsanTSDDtor gets called at the end.
285 CHECK(tsd_key_inited);
286 pthread_setspecific(tsd_key, (void *)t);
289 void MsanTSDDtor(void *tsd) {
290 MsanThread *t = (MsanThread*)tsd;
291 if (t->destructor_iterations_ > 1) {
292 t->destructor_iterations_--;
293 CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
294 return;
296 ScopedBlockSignals block(nullptr);
297 msan_current_thread = nullptr;
298 // Make sure that signal handler can not see a stale current thread pointer.
299 atomic_signal_fence(memory_order_seq_cst);
300 MsanThread::TSDDtor(tsd);
302 # endif
304 static void BeforeFork() {
305 VReport(2, "BeforeFork tid: %llu\n", GetTid());
306 // Usually we lock ThreadRegistry, but msan does not have one.
307 LockAllocator();
308 StackDepotLockBeforeFork();
309 ChainedOriginDepotBeforeFork();
312 static void AfterFork(bool fork_child) {
313 ChainedOriginDepotAfterFork(fork_child);
314 StackDepotUnlockAfterFork(fork_child);
315 UnlockAllocator();
316 // Usually we unlock ThreadRegistry, but msan does not have one.
317 VReport(2, "AfterFork tid: %llu\n", GetTid());
320 void InstallAtForkHandler() {
321 pthread_atfork(
322 &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
323 []() { AfterFork(/* fork_child= */ true); });
326 } // namespace __msan
328 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD