1 //===-- msan_linux.cpp ----------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of MemorySanitizer.
11 // Linux-, NetBSD- and FreeBSD-specific code.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_platform.h"
15 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
24 # include <sys/personality.h>
26 # include <sys/resource.h>
27 # include <sys/time.h>
32 # include "msan_allocator.h"
33 # include "msan_chained_origin_depot.h"
34 # include "msan_report.h"
35 # include "msan_thread.h"
36 # include "sanitizer_common/sanitizer_common.h"
37 # include "sanitizer_common/sanitizer_procmaps.h"
38 # include "sanitizer_common/sanitizer_stackdepot.h"
42 void ReportMapRange(const char *descr
, uptr beg
, uptr size
) {
44 uptr end
= beg
+ size
- 1;
45 VPrintf(1, "%s : %p-%p\n", descr
, (void *)beg
, (void *)end
);
49 static bool CheckMemoryRangeAvailability(uptr beg
, uptr size
, bool verbose
) {
51 uptr end
= beg
+ size
- 1;
52 if (!MemoryRangeIsAvailable(beg
, end
)) {
54 Printf("FATAL: MemorySanitizer: Shadow range %p-%p is not available.\n",
55 (void *)beg
, (void *)end
);
62 static bool ProtectMemoryRange(uptr beg
, uptr size
, const char *name
) {
64 void *addr
= MmapFixedNoAccess(beg
, size
, name
);
65 if (beg
== 0 && addr
) {
66 // Depending on the kernel configuration, we may not be able to protect
67 // the page at address zero.
68 uptr gap
= 16 * GetPageSizeCached();
71 addr
= MmapFixedNoAccess(beg
, size
, name
);
73 if ((uptr
)addr
!= beg
) {
74 uptr end
= beg
+ size
- 1;
76 "FATAL: MemorySanitizer: Cannot protect memory range %p-%p (%s).\n",
77 (void *)beg
, (void *)end
, name
);
84 static void CheckMemoryLayoutSanity() {
86 for (unsigned i
= 0; i
< kMemoryLayoutSize
; ++i
) {
87 uptr start
= kMemoryLayout
[i
].start
;
88 uptr end
= kMemoryLayout
[i
].end
;
89 MappingDesc::Type type
= kMemoryLayout
[i
].type
;
91 CHECK_EQ(prev_end
, start
);
92 CHECK(addr_is_type(start
, type
));
93 CHECK(addr_is_type((start
+ end
) / 2, type
));
94 CHECK(addr_is_type(end
- 1, type
));
95 if (type
== MappingDesc::APP
|| type
== MappingDesc::ALLOCATOR
) {
97 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr
)));
98 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr
)));
99 CHECK_EQ(MEM_TO_ORIGIN(addr
), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr
)));
101 addr
= (start
+ end
) / 2;
102 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr
)));
103 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr
)));
104 CHECK_EQ(MEM_TO_ORIGIN(addr
), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr
)));
107 CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr
)));
108 CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr
)));
109 CHECK_EQ(MEM_TO_ORIGIN(addr
), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr
)));
115 static bool InitShadow(bool init_origins
, bool dry_run
) {
116 // Let user know mapping parameters first.
117 VPrintf(1, "__msan_init %p\n", reinterpret_cast<void *>(&__msan_init
));
118 for (unsigned i
= 0; i
< kMemoryLayoutSize
; ++i
)
119 VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout
[i
].name
, kMemoryLayout
[i
].start
,
120 kMemoryLayout
[i
].end
- 1);
122 CheckMemoryLayoutSanity();
124 if (!MEM_IS_APP(&__msan_init
)) {
126 Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
127 reinterpret_cast<void *>(&__msan_init
));
131 const uptr maxVirtualAddress
= GetMaxUserVirtualAddress();
133 for (unsigned i
= 0; i
< kMemoryLayoutSize
; ++i
) {
134 uptr start
= kMemoryLayout
[i
].start
;
135 uptr end
= kMemoryLayout
[i
].end
;
136 uptr size
= end
- start
;
137 MappingDesc::Type type
= kMemoryLayout
[i
].type
;
139 // Check if the segment should be mapped based on platform constraints.
140 if (start
>= maxVirtualAddress
)
143 bool map
= type
== MappingDesc::SHADOW
||
144 (init_origins
&& type
== MappingDesc::ORIGIN
);
145 bool protect
= type
== MappingDesc::INVALID
||
146 (!init_origins
&& type
== MappingDesc::ORIGIN
);
147 CHECK(!(map
&& protect
));
148 if (!map
&& !protect
) {
149 CHECK(type
== MappingDesc::APP
|| type
== MappingDesc::ALLOCATOR
);
151 if (dry_run
&& type
== MappingDesc::ALLOCATOR
&&
152 !CheckMemoryRangeAvailability(start
, size
, !dry_run
))
156 if (dry_run
&& !CheckMemoryRangeAvailability(start
, size
, !dry_run
))
159 !MmapFixedSuperNoReserve(start
, size
, kMemoryLayout
[i
].name
))
161 if (!dry_run
&& common_flags()->use_madv_dontdump
)
162 DontDumpShadowMemory(start
, size
);
165 if (dry_run
&& !CheckMemoryRangeAvailability(start
, size
, !dry_run
))
167 if (!dry_run
&& !ProtectMemoryRange(start
, size
, kMemoryLayout
[i
].name
))
175 bool InitShadowWithReExec(bool init_origins
) {
176 // Start with dry run: check layout is ok, but don't print warnings because
177 // warning messages will cause tests to fail (even if we successfully re-exec
178 // after the warning).
179 bool success
= InitShadow(init_origins
, true);
182 // Perhaps ASLR entropy is too high. If ASLR is enabled, re-exec without it.
183 int old_personality
= personality(0xffffffff);
185 (old_personality
!= -1) && ((old_personality
& ADDR_NO_RANDOMIZE
) == 0);
189 "WARNING: MemorySanitizer: memory layout is incompatible, "
190 "possibly due to high-entropy ASLR.\n"
191 "Re-execing with fixed virtual address space.\n"
192 "N.B. reducing ASLR entropy is preferable.\n");
193 CHECK_NE(personality(old_personality
| ADDR_NO_RANDOMIZE
), -1);
199 // The earlier dry run didn't actually map or protect anything. Run again in
201 return success
&& InitShadow(init_origins
, false);
204 static void MsanAtExit(void) {
205 if (flags()->print_stats
&& (flags()->atexit
|| msan_report_count
> 0))
207 if (msan_report_count
> 0) {
208 ReportAtExitStatistics();
209 if (common_flags()->exitcode
)
210 internal__exit(common_flags()->exitcode
);
214 void InstallAtExitHandler() {
218 // ---------------------- TSD ---------------- {{{1
221 // Thread Static Data cannot be used in early init on NetBSD.
222 // Reuse the MSan TSD API for compatibility with existing code
223 // with an alternative implementation.
225 static void (*tsd_destructor
)(void *tsd
) = nullptr;
228 tsd_key() : key(nullptr) {}
230 CHECK(tsd_destructor
);
232 (*tsd_destructor
)(key
);
237 static thread_local
struct tsd_key key
;
239 void MsanTSDInit(void (*destructor
)(void *tsd
)) {
240 CHECK(!tsd_destructor
);
241 tsd_destructor
= destructor
;
244 MsanThread
*GetCurrentThread() {
245 CHECK(tsd_destructor
);
249 void SetCurrentThread(MsanThread
*tsd
) {
250 CHECK(tsd_destructor
);
256 void MsanTSDDtor(void *tsd
) {
257 CHECK(tsd_destructor
);
258 CHECK_EQ(key
.key
, tsd
);
260 // Make sure that signal handler can not see a stale current thread pointer.
261 atomic_signal_fence(memory_order_seq_cst
);
262 MsanThread::TSDDtor(tsd
);
265 static pthread_key_t tsd_key
;
266 static bool tsd_key_inited
= false;
268 void MsanTSDInit(void (*destructor
)(void *tsd
)) {
269 CHECK(!tsd_key_inited
);
270 tsd_key_inited
= true;
271 CHECK_EQ(0, pthread_key_create(&tsd_key
, destructor
));
274 static THREADLOCAL MsanThread
* msan_current_thread
;
276 MsanThread
*GetCurrentThread() {
277 return msan_current_thread
;
280 void SetCurrentThread(MsanThread
*t
) {
281 // Make sure we do not reset the current MsanThread.
282 CHECK_EQ(0, msan_current_thread
);
283 msan_current_thread
= t
;
284 // Make sure that MsanTSDDtor gets called at the end.
285 CHECK(tsd_key_inited
);
286 pthread_setspecific(tsd_key
, (void *)t
);
289 void MsanTSDDtor(void *tsd
) {
290 MsanThread
*t
= (MsanThread
*)tsd
;
291 if (t
->destructor_iterations_
> 1) {
292 t
->destructor_iterations_
--;
293 CHECK_EQ(0, pthread_setspecific(tsd_key
, tsd
));
296 ScopedBlockSignals
block(nullptr);
297 msan_current_thread
= nullptr;
298 // Make sure that signal handler can not see a stale current thread pointer.
299 atomic_signal_fence(memory_order_seq_cst
);
300 MsanThread::TSDDtor(tsd
);
304 static void BeforeFork() {
305 VReport(2, "BeforeFork tid: %llu\n", GetTid());
306 // Usually we lock ThreadRegistry, but msan does not have one.
308 StackDepotLockBeforeFork();
309 ChainedOriginDepotBeforeFork();
312 static void AfterFork(bool fork_child
) {
313 ChainedOriginDepotAfterFork(fork_child
);
314 StackDepotUnlockAfterFork(fork_child
);
316 // Usually we unlock ThreadRegistry, but msan does not have one.
317 VReport(2, "AfterFork tid: %llu\n", GetTid());
320 void InstallAtForkHandler() {
322 &BeforeFork
, []() { AfterFork(/* fork_child= */ false); },
323 []() { AfterFork(/* fork_child= */ true); });
326 } // namespace __msan
328 #endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD