1 //===-- tsan_mman.cpp -----------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_allocator_checks.h"
13 #include "sanitizer_common/sanitizer_allocator_interface.h"
14 #include "sanitizer_common/sanitizer_allocator_report.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "tsan_mman.h"
20 #include "tsan_report.h"
21 #include "tsan_flags.h"
23 // May be overriden by front-end.
24 SANITIZER_WEAK_DEFAULT_IMPL
25 void __sanitizer_malloc_hook(void *ptr
, uptr size
) {
30 SANITIZER_WEAK_DEFAULT_IMPL
31 void __sanitizer_free_hook(void *ptr
) {
37 struct MapUnmapCallback
{
38 void OnMap(uptr p
, uptr size
) const { }
39 void OnUnmap(uptr p
, uptr size
) const {
40 // We are about to unmap a chunk of user memory.
41 // Mark the corresponding shadow memory as not needed.
42 DontNeedShadowFor(p
, size
);
43 // Mark the corresponding meta shadow memory as not needed.
44 // Note the block does not contain any meta info at this point
45 // (this happens after free).
46 const uptr kMetaRatio
= kMetaShadowCell
/ kMetaShadowSize
;
47 const uptr kPageSize
= GetPageSizeCached() * kMetaRatio
;
48 // Block came from LargeMmapAllocator, so must be large.
49 // We rely on this in the calculations below.
50 CHECK_GE(size
, 2 * kPageSize
);
51 uptr diff
= RoundUp(p
, kPageSize
) - p
;
56 diff
= p
+ size
- RoundDown(p
+ size
, kPageSize
);
59 uptr p_meta
= (uptr
)MemToMeta(p
);
60 ReleaseMemoryPagesToOS(p_meta
, p_meta
+ size
/ kMetaRatio
);
64 static char allocator_placeholder
[sizeof(Allocator
)] ALIGNED(64);
65 Allocator
*allocator() {
66 return reinterpret_cast<Allocator
*>(&allocator_placeholder
);
72 // This mutex represents the internal allocator combined for
73 // the purposes of deadlock detection. The internal allocator
74 // uses multiple mutexes, moreover they are locked only occasionally
75 // and they are spin mutexes which don't support deadlock detection.
76 // So we use this fake mutex to serve as a substitute for these mutexes.
77 CheckedMutex internal_alloc_mtx
;
80 : mtx(MutexTypeGlobalProc
),
82 internal_alloc_mtx(MutexTypeInternalAlloc
) {}
85 static char global_proc_placeholder
[sizeof(GlobalProc
)] ALIGNED(64);
86 GlobalProc
*global_proc() {
87 return reinterpret_cast<GlobalProc
*>(&global_proc_placeholder
);
90 static void InternalAllocAccess() {
91 global_proc()->internal_alloc_mtx
.Lock();
92 global_proc()->internal_alloc_mtx
.Unlock();
95 ScopedGlobalProcessor::ScopedGlobalProcessor() {
96 GlobalProc
*gp
= global_proc();
97 ThreadState
*thr
= cur_thread();
100 // If we don't have a proc, use the global one.
101 // There are currently only two known case where this path is triggered:
102 // __interceptor_free
103 // __nptl_deallocate_tsd
108 // __interceptor_munmap
109 // __deallocate_stack
112 // Ideally, we destroy thread state (and unwire proc) when a thread actually
113 // exits (i.e. when we join/wait it). Then we would not need the global proc
115 ProcWire(gp
->proc
, thr
);
118 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
119 GlobalProc
*gp
= global_proc();
120 ThreadState
*thr
= cur_thread();
121 if (thr
->proc() != gp
->proc
)
123 ProcUnwire(gp
->proc
, thr
);
127 void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
128 global_proc()->internal_alloc_mtx
.Lock();
129 InternalAllocatorLock();
132 void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
133 InternalAllocatorUnlock();
134 global_proc()->internal_alloc_mtx
.Unlock();
137 void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
138 global_proc()->mtx
.Lock();
141 void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
142 global_proc()->mtx
.Unlock();
145 static constexpr uptr kMaxAllowedMallocSize
= 1ull << 40;
146 static uptr max_user_defined_malloc_size
;
148 void InitializeAllocator() {
149 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null
);
150 allocator()->Init(common_flags()->allocator_release_to_os_interval_ms
);
151 max_user_defined_malloc_size
= common_flags()->max_allocation_size_mb
152 ? common_flags()->max_allocation_size_mb
154 : kMaxAllowedMallocSize
;
157 void InitializeAllocatorLate() {
158 new(global_proc()) GlobalProc();
161 void AllocatorProcStart(Processor
*proc
) {
162 allocator()->InitCache(&proc
->alloc_cache
);
163 internal_allocator()->InitCache(&proc
->internal_alloc_cache
);
166 void AllocatorProcFinish(Processor
*proc
) {
167 allocator()->DestroyCache(&proc
->alloc_cache
);
168 internal_allocator()->DestroyCache(&proc
->internal_alloc_cache
);
171 void AllocatorPrintStats() {
172 allocator()->PrintStats();
175 static void SignalUnsafeCall(ThreadState
*thr
, uptr pc
) {
176 if (atomic_load_relaxed(&thr
->in_signal_handler
) == 0 ||
177 !ShouldReport(thr
, ReportTypeSignalUnsafe
))
179 VarSizeStackTrace stack
;
180 ObtainCurrentStack(thr
, pc
, &stack
);
181 if (IsFiredSuppression(ctx
, ReportTypeSignalUnsafe
, stack
))
183 ThreadRegistryLock
l(&ctx
->thread_registry
);
184 ScopedReport
rep(ReportTypeSignalUnsafe
);
185 rep
.AddStack(stack
, true);
186 OutputReport(thr
, rep
);
190 void *user_alloc_internal(ThreadState
*thr
, uptr pc
, uptr sz
, uptr align
,
192 if (sz
>= kMaxAllowedMallocSize
|| align
>= kMaxAllowedMallocSize
||
193 sz
> max_user_defined_malloc_size
) {
194 if (AllocatorMayReturnNull())
197 Min(kMaxAllowedMallocSize
, max_user_defined_malloc_size
);
198 GET_STACK_TRACE_FATAL(thr
, pc
);
199 ReportAllocationSizeTooBig(sz
, malloc_limit
, &stack
);
201 if (UNLIKELY(IsRssLimitExceeded())) {
202 if (AllocatorMayReturnNull())
204 GET_STACK_TRACE_FATAL(thr
, pc
);
205 ReportRssLimitExceeded(&stack
);
207 void *p
= allocator()->Allocate(&thr
->proc()->alloc_cache
, sz
, align
);
209 SetAllocatorOutOfMemory();
210 if (AllocatorMayReturnNull())
212 GET_STACK_TRACE_FATAL(thr
, pc
);
213 ReportOutOfMemory(sz
, &stack
);
215 if (ctx
&& ctx
->initialized
)
216 OnUserAlloc(thr
, pc
, (uptr
)p
, sz
, true);
218 SignalUnsafeCall(thr
, pc
);
222 void user_free(ThreadState
*thr
, uptr pc
, void *p
, bool signal
) {
223 ScopedGlobalProcessor sgp
;
224 if (ctx
&& ctx
->initialized
)
225 OnUserFree(thr
, pc
, (uptr
)p
, true);
226 allocator()->Deallocate(&thr
->proc()->alloc_cache
, p
);
228 SignalUnsafeCall(thr
, pc
);
231 void *user_alloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
232 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, kDefaultAlignment
));
235 void *user_calloc(ThreadState
*thr
, uptr pc
, uptr size
, uptr n
) {
236 if (UNLIKELY(CheckForCallocOverflow(size
, n
))) {
237 if (AllocatorMayReturnNull())
238 return SetErrnoOnNull(nullptr);
239 GET_STACK_TRACE_FATAL(thr
, pc
);
240 ReportCallocOverflow(n
, size
, &stack
);
242 void *p
= user_alloc_internal(thr
, pc
, n
* size
);
244 internal_memset(p
, 0, n
* size
);
245 return SetErrnoOnNull(p
);
248 void *user_reallocarray(ThreadState
*thr
, uptr pc
, void *p
, uptr size
, uptr n
) {
249 if (UNLIKELY(CheckForCallocOverflow(size
, n
))) {
250 if (AllocatorMayReturnNull())
251 return SetErrnoOnNull(nullptr);
252 GET_STACK_TRACE_FATAL(thr
, pc
);
253 ReportReallocArrayOverflow(size
, n
, &stack
);
255 return user_realloc(thr
, pc
, p
, size
* n
);
258 void OnUserAlloc(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
, bool write
) {
259 DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr
->tid
, sz
, p
);
260 // Note: this can run before thread initialization/after finalization.
261 // As a result this is not necessarily synchronized with DoReset,
262 // which iterates over and resets all sync objects,
263 // but it is fine to create new MBlocks in this context.
264 ctx
->metamap
.AllocBlock(thr
, pc
, p
, sz
);
265 // If this runs before thread initialization/after finalization
266 // and we don't have trace initialized, we can't imitate writes.
267 // In such case just reset the shadow range, it is fine since
268 // it affects only a small fraction of special objects.
269 if (write
&& thr
->ignore_reads_and_writes
== 0 &&
270 atomic_load_relaxed(&thr
->trace_pos
))
271 MemoryRangeImitateWrite(thr
, pc
, (uptr
)p
, sz
);
273 MemoryResetRange(thr
, pc
, (uptr
)p
, sz
);
276 void OnUserFree(ThreadState
*thr
, uptr pc
, uptr p
, bool write
) {
277 CHECK_NE(p
, (void*)0);
279 // Very early/late in thread lifetime, or during fork.
280 UNUSED uptr sz
= ctx
->metamap
.FreeBlock(thr
->proc(), p
, false);
281 DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr
->tid
, p
, sz
);
284 SlotLocker
locker(thr
);
285 uptr sz
= ctx
->metamap
.FreeBlock(thr
->proc(), p
, true);
286 DPrintf("#%d: free(0x%zx, %zu)\n", thr
->tid
, p
, sz
);
287 if (write
&& thr
->ignore_reads_and_writes
== 0)
288 MemoryRangeFreed(thr
, pc
, (uptr
)p
, sz
);
291 void *user_realloc(ThreadState
*thr
, uptr pc
, void *p
, uptr sz
) {
292 // FIXME: Handle "shrinking" more efficiently,
293 // it seems that some software actually does this.
295 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
));
297 user_free(thr
, pc
, p
);
300 void *new_p
= user_alloc_internal(thr
, pc
, sz
);
302 uptr old_sz
= user_alloc_usable_size(p
);
303 internal_memcpy(new_p
, p
, min(old_sz
, sz
));
304 user_free(thr
, pc
, p
);
306 return SetErrnoOnNull(new_p
);
309 void *user_memalign(ThreadState
*thr
, uptr pc
, uptr align
, uptr sz
) {
310 if (UNLIKELY(!IsPowerOfTwo(align
))) {
311 errno
= errno_EINVAL
;
312 if (AllocatorMayReturnNull())
314 GET_STACK_TRACE_FATAL(thr
, pc
);
315 ReportInvalidAllocationAlignment(align
, &stack
);
317 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, align
));
320 int user_posix_memalign(ThreadState
*thr
, uptr pc
, void **memptr
, uptr align
,
322 if (UNLIKELY(!CheckPosixMemalignAlignment(align
))) {
323 if (AllocatorMayReturnNull())
325 GET_STACK_TRACE_FATAL(thr
, pc
);
326 ReportInvalidPosixMemalignAlignment(align
, &stack
);
328 void *ptr
= user_alloc_internal(thr
, pc
, sz
, align
);
330 // OOM error is already taken care of by user_alloc_internal.
332 CHECK(IsAligned((uptr
)ptr
, align
));
337 void *user_aligned_alloc(ThreadState
*thr
, uptr pc
, uptr align
, uptr sz
) {
338 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align
, sz
))) {
339 errno
= errno_EINVAL
;
340 if (AllocatorMayReturnNull())
342 GET_STACK_TRACE_FATAL(thr
, pc
);
343 ReportInvalidAlignedAllocAlignment(sz
, align
, &stack
);
345 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, align
));
348 void *user_valloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
349 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, GetPageSizeCached()));
352 void *user_pvalloc(ThreadState
*thr
, uptr pc
, uptr sz
) {
353 uptr PageSize
= GetPageSizeCached();
354 if (UNLIKELY(CheckForPvallocOverflow(sz
, PageSize
))) {
355 errno
= errno_ENOMEM
;
356 if (AllocatorMayReturnNull())
358 GET_STACK_TRACE_FATAL(thr
, pc
);
359 ReportPvallocOverflow(sz
, &stack
);
361 // pvalloc(0) should allocate one page.
362 sz
= sz
? RoundUpTo(sz
, PageSize
) : PageSize
;
363 return SetErrnoOnNull(user_alloc_internal(thr
, pc
, sz
, PageSize
));
366 uptr
user_alloc_usable_size(const void *p
) {
367 if (p
== 0 || !IsAppMem((uptr
)p
))
369 MBlock
*b
= ctx
->metamap
.GetBlock((uptr
)p
);
371 return 0; // Not a valid pointer.
373 return 1; // Zero-sized allocations are actually 1 byte.
377 void invoke_malloc_hook(void *ptr
, uptr size
) {
378 ThreadState
*thr
= cur_thread();
379 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
381 __sanitizer_malloc_hook(ptr
, size
);
382 RunMallocHooks(ptr
, size
);
385 void invoke_free_hook(void *ptr
) {
386 ThreadState
*thr
= cur_thread();
387 if (ctx
== 0 || !ctx
->initialized
|| thr
->ignore_interceptors
)
389 __sanitizer_free_hook(ptr
);
393 void *Alloc(uptr sz
) {
394 ThreadState
*thr
= cur_thread();
396 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
399 InternalAllocAccess();
400 return InternalAlloc(sz
, &thr
->proc()->internal_alloc_cache
);
403 void FreeImpl(void *p
) {
404 ThreadState
*thr
= cur_thread();
406 thr
->nomalloc
= 0; // CHECK calls internal_malloc().
409 InternalAllocAccess();
410 InternalFree(p
, &thr
->proc()->internal_alloc_cache
);
413 } // namespace __tsan
415 using namespace __tsan
;
418 uptr
__sanitizer_get_current_allocated_bytes() {
419 uptr stats
[AllocatorStatCount
];
420 allocator()->GetStats(stats
);
421 return stats
[AllocatorStatAllocated
];
424 uptr
__sanitizer_get_heap_size() {
425 uptr stats
[AllocatorStatCount
];
426 allocator()->GetStats(stats
);
427 return stats
[AllocatorStatMapped
];
430 uptr
__sanitizer_get_free_bytes() {
434 uptr
__sanitizer_get_unmapped_bytes() {
438 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) {
442 int __sanitizer_get_ownership(const void *p
) {
443 return allocator()->GetBlockBegin(p
) != 0;
446 uptr
__sanitizer_get_allocated_size(const void *p
) {
447 return user_alloc_usable_size(p
);
450 void __tsan_on_thread_idle() {
451 ThreadState
*thr
= cur_thread();
452 allocator()->SwallowCache(&thr
->proc()->alloc_cache
);
453 internal_allocator()->SwallowCache(&thr
->proc()->internal_alloc_cache
);
454 ctx
->metamap
.OnProcIdle(thr
->proc());