1 //===-- tsan_platform_mac.cpp ---------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_platform.h"
17 #include "sanitizer_common/sanitizer_atomic.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_libc.h"
20 #include "sanitizer_common/sanitizer_posix.h"
21 #include "sanitizer_common/sanitizer_procmaps.h"
22 #include "sanitizer_common/sanitizer_ptrauth.h"
23 #include "sanitizer_common/sanitizer_stackdepot.h"
24 #include "tsan_platform.h"
26 #include "tsan_flags.h"
29 #include <mach/mach.h>
37 #include <sys/syscall.h>
39 #include <sys/types.h>
40 #include <sys/resource.h>
49 static char main_thread_state
[sizeof(ThreadState
)] ALIGNED(
50 SANITIZER_CACHE_LINE_SIZE
);
51 static ThreadState
*dead_thread_state
;
52 static pthread_key_t thread_state_key
;
54 // We rely on the following documented, but Darwin-specific behavior to keep the
55 // reference to the ThreadState object alive in TLS:
56 // pthread_key_create man page:
57 // If, after all the destructors have been called for all non-NULL values with
58 // associated destructors, there are still some non-NULL values with
59 // associated destructors, then the process is repeated. If, after at least
60 // [PTHREAD_DESTRUCTOR_ITERATIONS] iterations of destructor calls for
61 // outstanding non-NULL values, there are still some non-NULL values with
62 // associated destructors, the implementation stops calling destructors.
63 static_assert(PTHREAD_DESTRUCTOR_ITERATIONS
== 4, "Small number of iterations");
64 static void ThreadStateDestructor(void *thr
) {
65 int res
= pthread_setspecific(thread_state_key
, thr
);
69 static void InitializeThreadStateStorage() {
71 CHECK_EQ(thread_state_key
, 0);
72 res
= pthread_key_create(&thread_state_key
, ThreadStateDestructor
);
74 res
= pthread_setspecific(thread_state_key
, main_thread_state
);
77 auto dts
= (ThreadState
*)MmapOrDie(sizeof(ThreadState
), "ThreadState");
78 dts
->fast_state
.SetIgnoreBit();
79 dts
->ignore_interceptors
= 1;
81 const_cast<Tid
&>(dts
->tid
) = kInvalidTid
;
82 res
= internal_mprotect(dts
, sizeof(ThreadState
), PROT_READ
); // immutable
84 dead_thread_state
= dts
;
87 ThreadState
*cur_thread() {
88 // Some interceptors get called before libpthread has been initialized and in
89 // these cases we must avoid calling any pthread APIs.
90 if (UNLIKELY(!thread_state_key
)) {
91 return (ThreadState
*)main_thread_state
;
94 // We only reach this line after InitializeThreadStateStorage() ran, i.e,
95 // after TSan (and therefore libpthread) have been initialized.
96 ThreadState
*thr
= (ThreadState
*)pthread_getspecific(thread_state_key
);
98 thr
= (ThreadState
*)MmapOrDie(sizeof(ThreadState
), "ThreadState");
99 int res
= pthread_setspecific(thread_state_key
, thr
);
105 void set_cur_thread(ThreadState
*thr
) {
106 int res
= pthread_setspecific(thread_state_key
, thr
);
110 void cur_thread_finalize() {
111 ThreadState
*thr
= (ThreadState
*)pthread_getspecific(thread_state_key
);
113 if (thr
== (ThreadState
*)main_thread_state
) {
114 // Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
115 // exit the main thread. Let's keep the main thread's ThreadState.
118 // Intercepted functions can still get called after cur_thread_finalize()
119 // (called from DestroyThreadState()), so put a fake thread state for "dead"
120 // threads. An alternative solution would be to release the ThreadState
121 // object from THREAD_DESTROY (which is delivered later and on the parent
122 // thread) instead of THREAD_TERMINATE.
123 int res
= pthread_setspecific(thread_state_key
, dead_thread_state
);
125 UnmapOrDie(thr
, sizeof(ThreadState
));
129 static void RegionMemUsage(uptr start
, uptr end
, uptr
*res
, uptr
*dirty
) {
130 vm_address_t address
= start
;
131 vm_address_t end_address
= end
;
132 uptr resident_pages
= 0;
133 uptr dirty_pages
= 0;
134 while (address
< end_address
) {
135 vm_size_t vm_region_size
;
136 mach_msg_type_number_t count
= VM_REGION_EXTENDED_INFO_COUNT
;
137 vm_region_extended_info_data_t vm_region_info
;
138 mach_port_t object_name
;
139 kern_return_t ret
= vm_region_64(
140 mach_task_self(), &address
, &vm_region_size
, VM_REGION_EXTENDED_INFO
,
141 (vm_region_info_t
)&vm_region_info
, &count
, &object_name
);
142 if (ret
!= KERN_SUCCESS
) break;
144 resident_pages
+= vm_region_info
.pages_resident
;
145 dirty_pages
+= vm_region_info
.pages_dirtied
;
147 address
+= vm_region_size
;
149 *res
= resident_pages
* GetPageSizeCached();
150 *dirty
= dirty_pages
* GetPageSizeCached();
153 void WriteMemoryProfile(char *buf
, uptr buf_size
, u64 uptime_ns
) {
154 uptr shadow_res
, shadow_dirty
;
155 uptr meta_res
, meta_dirty
;
156 RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res
, &shadow_dirty
);
157 RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res
, &meta_dirty
);
160 uptr low_res
, low_dirty
;
161 uptr high_res
, high_dirty
;
162 uptr heap_res
, heap_dirty
;
163 RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &low_res
, &low_dirty
);
164 RegionMemUsage(HiAppMemBeg(), HiAppMemEnd(), &high_res
, &high_dirty
);
165 RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res
, &heap_dirty
);
166 #else // !SANITIZER_GO
167 uptr app_res
, app_dirty
;
168 RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &app_res
, &app_dirty
);
171 StackDepotStats stacks
= StackDepotGetStats();
173 ctx
->thread_registry
.GetNumberOfThreads(&nthread
, &nlive
);
176 "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
177 "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
179 "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
180 "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
181 "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
182 # else // !SANITIZER_GO
183 "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
185 "stacks: %zd unique IDs, %zd kB allocated\n"
186 "threads: %zd total, %zd live\n"
187 "------------------------------\n",
188 ShadowBeg(), ShadowEnd(), shadow_res
/ 1024, shadow_dirty
/ 1024,
189 MetaShadowBeg(), MetaShadowEnd(), meta_res
/ 1024, meta_dirty
/ 1024,
191 LoAppMemBeg(), LoAppMemEnd(), low_res
/ 1024, low_dirty
/ 1024,
192 HiAppMemBeg(), HiAppMemEnd(), high_res
/ 1024, high_dirty
/ 1024,
193 HeapMemBeg(), HeapMemEnd(), heap_res
/ 1024, heap_dirty
/ 1024,
194 # else // !SANITIZER_GO
195 LoAppMemBeg(), LoAppMemEnd(), app_res
/ 1024, app_dirty
/ 1024,
197 stacks
.n_uniq_ids
, stacks
.allocated
/ 1024, nthread
, nlive
);
201 void InitializeShadowMemoryPlatform() { }
203 // Register GCD worker threads, which are created without an observable call to
205 static void ThreadCreateCallback(uptr thread
, bool gcd_worker
) {
207 ThreadState
*thr
= cur_thread();
208 Processor
*proc
= ProcCreate();
210 ThreadState
*parent_thread_state
= nullptr; // No parent.
211 Tid tid
= ThreadCreate(parent_thread_state
, 0, (uptr
)thread
, true);
212 CHECK_NE(tid
, kMainTid
);
213 ThreadStart(thr
, tid
, GetTid(), ThreadType::Worker
);
217 // Destroy thread state for *all* threads.
218 static void ThreadTerminateCallback(uptr thread
) {
219 ThreadState
*thr
= cur_thread();
221 DestroyThreadState();
226 void InitializePlatformEarly() {
227 # if !SANITIZER_GO && SANITIZER_IOS
228 uptr max_vm
= GetMaxUserVirtualAddress() + 1;
229 if (max_vm
!= HiAppMemEnd()) {
230 Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
231 (void *)max_vm
, (void *)HiAppMemEnd());
237 static uptr longjmp_xor_key
= 0;
239 void InitializePlatform() {
240 DisableCoreDumperIfNecessary();
244 InitializeThreadStateStorage();
246 ThreadEventCallbacks callbacks
= {
247 .create
= ThreadCreateCallback
,
248 .terminate
= ThreadTerminateCallback
,
250 InstallPthreadIntrospectionHook(callbacks
);
253 if (GetMacosAlignedVersion() >= MacosVersion(10, 14)) {
254 // Libsystem currently uses a process-global key; this might change.
255 const unsigned kTLSLongjmpXorKeySlot
= 0x7;
256 longjmp_xor_key
= (uptr
)pthread_getspecific(kTLSLongjmpXorKeySlot
);
261 # define LONG_JMP_SP_ENV_SLOT \
262 ((GetMacosAlignedVersion() >= MacosVersion(10, 14)) ? 12 : 13)
264 # define LONG_JMP_SP_ENV_SLOT 2
267 uptr
ExtractLongJmpSp(uptr
*env
) {
268 uptr mangled_sp
= env
[LONG_JMP_SP_ENV_SLOT
];
269 uptr sp
= mangled_sp
^ longjmp_xor_key
;
270 sp
= (uptr
)ptrauth_auth_data((void *)sp
, ptrauth_key_asdb
,
271 ptrauth_string_discriminator("sp"));
276 extern "C" void __tsan_tls_initialization() {}
278 void ImitateTlsWrite(ThreadState
*thr
, uptr tls_addr
, uptr tls_size
) {
279 const uptr pc
= StackTrace::GetNextInstructionPc(
280 reinterpret_cast<uptr
>(__tsan_tls_initialization
));
281 // Unlike Linux, we only store a pointer to the ThreadState object in TLS;
282 // just mark the entire range as written to.
283 MemoryRangeImitateWrite(thr
, pc
, tls_addr
, tls_size
);
288 // Note: this function runs with async signals enabled,
289 // so it must not touch any tsan state.
290 int call_pthread_cancel_with_cleanup(int (*fn
)(void *arg
),
291 void (*cleanup
)(void *arg
), void *arg
) {
292 // pthread_cleanup_push/pop are hardcore macros mess.
293 // We can't intercept nor call them w/o including pthread.h.
295 pthread_cleanup_push(cleanup
, arg
);
297 pthread_cleanup_pop(0);
302 } // namespace __tsan
304 #endif // SANITIZER_APPLE