1 //===-- msan_allocator.cpp -------------------------- ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of MemorySanitizer.
11 // MemorySanitizer allocator.
12 //===----------------------------------------------------------------------===//
14 #include "msan_allocator.h"
17 #include "msan_interface_internal.h"
18 #include "msan_origin.h"
19 #include "msan_poisoning.h"
20 #include "msan_thread.h"
21 #include "sanitizer_common/sanitizer_allocator.h"
22 #include "sanitizer_common/sanitizer_allocator_checks.h"
23 #include "sanitizer_common/sanitizer_allocator_interface.h"
24 #include "sanitizer_common/sanitizer_allocator_report.h"
25 #include "sanitizer_common/sanitizer_errno.h"
27 using namespace __msan
;
34 struct MsanMapUnmapCallback
{
35 void OnMap(uptr p
, uptr size
) const {}
36 void OnMapSecondary(uptr p
, uptr size
, uptr user_begin
,
37 uptr user_size
) const {}
38 void OnUnmap(uptr p
, uptr size
) const {
39 __msan_unpoison((void *)p
, size
);
41 // We are about to unmap a chunk of user memory.
42 // Mark the corresponding shadow memory as not needed.
43 uptr shadow_p
= MEM_TO_SHADOW(p
);
44 ReleaseMemoryPagesToOS(shadow_p
, shadow_p
+ size
);
45 if (__msan_get_track_origins()) {
46 uptr origin_p
= MEM_TO_ORIGIN(p
);
47 ReleaseMemoryPagesToOS(origin_p
, origin_p
+ size
);
52 // Note: to ensure that the allocator is compatible with the application memory
53 // layout (especially with high-entropy ASLR), kSpaceBeg and kSpaceSize must be
54 // duplicated as MappingDesc::ALLOCATOR in msan.h.
56 const uptr kMaxAllowedMallocSize
= 2UL << 30;
59 static const uptr kSpaceBeg
= 0;
60 static const u64 kSpaceSize
= SANITIZER_MMAP_RANGE_SIZE
;
61 static const uptr kMetadataSize
= sizeof(Metadata
);
62 using SizeClassMap
= __sanitizer::CompactSizeClassMap
;
63 static const uptr kRegionSizeLog
= 20;
64 using AddressSpaceView
= LocalAddressSpaceView
;
65 using MapUnmapCallback
= MsanMapUnmapCallback
;
66 static const uptr kFlags
= 0;
68 using PrimaryAllocator
= SizeClassAllocator32
<AP32
>;
69 #elif defined(__x86_64__)
70 #if SANITIZER_NETBSD || SANITIZER_LINUX
71 const uptr kAllocatorSpace
= 0x700000000000ULL
;
73 const uptr kAllocatorSpace
= 0x600000000000ULL
;
75 const uptr kMaxAllowedMallocSize
= 1ULL << 40;
77 struct AP64
{ // Allocator64 parameters. Deliberately using a short name.
78 static const uptr kSpaceBeg
= kAllocatorSpace
;
79 static const uptr kSpaceSize
= 0x40000000000; // 4T.
80 static const uptr kMetadataSize
= sizeof(Metadata
);
81 using SizeClassMap
= DefaultSizeClassMap
;
82 using MapUnmapCallback
= MsanMapUnmapCallback
;
83 static const uptr kFlags
= 0;
84 using AddressSpaceView
= LocalAddressSpaceView
;
87 using PrimaryAllocator
= SizeClassAllocator64
<AP64
>;
89 #elif defined(__loongarch_lp64)
90 const uptr kAllocatorSpace
= 0x700000000000ULL
;
91 const uptr kMaxAllowedMallocSize
= 8UL << 30;
93 struct AP64
{ // Allocator64 parameters. Deliberately using a short name.
94 static const uptr kSpaceBeg
= kAllocatorSpace
;
95 static const uptr kSpaceSize
= 0x40000000000; // 4T.
96 static const uptr kMetadataSize
= sizeof(Metadata
);
97 using SizeClassMap
= DefaultSizeClassMap
;
98 using MapUnmapCallback
= MsanMapUnmapCallback
;
99 static const uptr kFlags
= 0;
100 using AddressSpaceView
= LocalAddressSpaceView
;
103 using PrimaryAllocator
= SizeClassAllocator64
<AP64
>;
105 #elif defined(__powerpc64__)
106 const uptr kMaxAllowedMallocSize
= 2UL << 30; // 2G
108 struct AP64
{ // Allocator64 parameters. Deliberately using a short name.
109 static const uptr kSpaceBeg
= 0x300000000000;
110 static const uptr kSpaceSize
= 0x020000000000; // 2T.
111 static const uptr kMetadataSize
= sizeof(Metadata
);
112 using SizeClassMap
= DefaultSizeClassMap
;
113 using MapUnmapCallback
= MsanMapUnmapCallback
;
114 static const uptr kFlags
= 0;
115 using AddressSpaceView
= LocalAddressSpaceView
;
118 using PrimaryAllocator
= SizeClassAllocator64
<AP64
>;
119 #elif defined(__s390x__)
120 const uptr kMaxAllowedMallocSize
= 2UL << 30; // 2G
122 struct AP64
{ // Allocator64 parameters. Deliberately using a short name.
123 static const uptr kSpaceBeg
= 0x440000000000;
124 static const uptr kSpaceSize
= 0x020000000000; // 2T.
125 static const uptr kMetadataSize
= sizeof(Metadata
);
126 using SizeClassMap
= DefaultSizeClassMap
;
127 using MapUnmapCallback
= MsanMapUnmapCallback
;
128 static const uptr kFlags
= 0;
129 using AddressSpaceView
= LocalAddressSpaceView
;
132 using PrimaryAllocator
= SizeClassAllocator64
<AP64
>;
133 #elif defined(__aarch64__)
134 const uptr kMaxAllowedMallocSize
= 8UL << 30;
137 static const uptr kSpaceBeg
= 0xE00000000000ULL
;
138 static const uptr kSpaceSize
= 0x40000000000; // 4T.
139 static const uptr kMetadataSize
= sizeof(Metadata
);
140 using SizeClassMap
= DefaultSizeClassMap
;
141 using MapUnmapCallback
= MsanMapUnmapCallback
;
142 static const uptr kFlags
= 0;
143 using AddressSpaceView
= LocalAddressSpaceView
;
145 using PrimaryAllocator
= SizeClassAllocator64
<AP64
>;
147 using Allocator
= CombinedAllocator
<PrimaryAllocator
>;
148 using AllocatorCache
= Allocator::AllocatorCache
;
149 } // namespace __msan
151 static Allocator allocator
;
152 static AllocatorCache fallback_allocator_cache
;
153 static StaticSpinMutex fallback_mutex
;
155 static uptr max_malloc_size
;
157 void __msan::MsanAllocatorInit() {
158 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null
);
159 allocator
.Init(common_flags()->allocator_release_to_os_interval_ms
);
160 if (common_flags()->max_allocation_size_mb
)
161 max_malloc_size
= Min(common_flags()->max_allocation_size_mb
<< 20,
162 kMaxAllowedMallocSize
);
164 max_malloc_size
= kMaxAllowedMallocSize
;
167 void __msan::LockAllocator() { allocator
.ForceLock(); }
169 void __msan::UnlockAllocator() { allocator
.ForceUnlock(); }
171 AllocatorCache
*GetAllocatorCache(MsanThreadLocalMallocStorage
*ms
) {
172 CHECK_LE(sizeof(AllocatorCache
), sizeof(ms
->allocator_cache
));
173 return reinterpret_cast<AllocatorCache
*>(ms
->allocator_cache
);
176 void MsanThreadLocalMallocStorage::Init() {
177 allocator
.InitCache(GetAllocatorCache(this));
180 void MsanThreadLocalMallocStorage::CommitBack() {
181 allocator
.SwallowCache(GetAllocatorCache(this));
182 allocator
.DestroyCache(GetAllocatorCache(this));
185 static void *MsanAllocate(BufferedStackTrace
*stack
, uptr size
, uptr alignment
,
187 if (UNLIKELY(size
> max_malloc_size
)) {
188 if (AllocatorMayReturnNull()) {
189 Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size
);
192 GET_FATAL_STACK_TRACE_IF_EMPTY(stack
);
193 ReportAllocationSizeTooBig(size
, max_malloc_size
, stack
);
195 if (UNLIKELY(IsRssLimitExceeded())) {
196 if (AllocatorMayReturnNull())
198 GET_FATAL_STACK_TRACE_IF_EMPTY(stack
);
199 ReportRssLimitExceeded(stack
);
201 MsanThread
*t
= GetCurrentThread();
204 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
205 allocated
= allocator
.Allocate(cache
, size
, alignment
);
207 SpinMutexLock
l(&fallback_mutex
);
208 AllocatorCache
*cache
= &fallback_allocator_cache
;
209 allocated
= allocator
.Allocate(cache
, size
, alignment
);
211 if (UNLIKELY(!allocated
)) {
212 SetAllocatorOutOfMemory();
213 if (AllocatorMayReturnNull())
215 GET_FATAL_STACK_TRACE_IF_EMPTY(stack
);
216 ReportOutOfMemory(size
, stack
);
218 auto *meta
= reinterpret_cast<Metadata
*>(allocator
.GetMetaData(allocated
));
219 meta
->requested_size
= size
;
221 if (allocator
.FromPrimary(allocated
))
222 __msan_clear_and_unpoison(allocated
, size
);
224 __msan_unpoison(allocated
, size
); // Mem is already zeroed.
225 } else if (flags()->poison_in_malloc
) {
226 __msan_poison(allocated
, size
);
227 if (__msan_get_track_origins()) {
228 stack
->tag
= StackTrace::TAG_ALLOC
;
229 Origin o
= Origin::CreateHeapOrigin(stack
);
230 __msan_set_origin(allocated
, size
, o
.raw_id());
234 RunMallocHooks(allocated
, size
);
238 void __msan::MsanDeallocate(BufferedStackTrace
*stack
, void *p
) {
243 Metadata
*meta
= reinterpret_cast<Metadata
*>(allocator
.GetMetaData(p
));
244 uptr size
= meta
->requested_size
;
245 meta
->requested_size
= 0;
246 // This memory will not be reused by anyone else, so we are free to keep it
247 // poisoned. The secondary allocator will unmap and unpoison by
248 // MsanMapUnmapCallback, no need to poison it here.
249 if (flags()->poison_in_free
&& allocator
.FromPrimary(p
)) {
250 __msan_poison(p
, size
);
251 if (__msan_get_track_origins()) {
252 stack
->tag
= StackTrace::TAG_DEALLOC
;
253 Origin o
= Origin::CreateHeapOrigin(stack
);
254 __msan_set_origin(p
, size
, o
.raw_id());
257 if (MsanThread
*t
= GetCurrentThread()) {
258 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
259 allocator
.Deallocate(cache
, p
);
261 SpinMutexLock
l(&fallback_mutex
);
262 AllocatorCache
*cache
= &fallback_allocator_cache
;
263 allocator
.Deallocate(cache
, p
);
267 static void *MsanReallocate(BufferedStackTrace
*stack
, void *old_p
,
268 uptr new_size
, uptr alignment
) {
269 Metadata
*meta
= reinterpret_cast<Metadata
*>(allocator
.GetMetaData(old_p
));
270 uptr old_size
= meta
->requested_size
;
271 uptr actually_allocated_size
= allocator
.GetActuallyAllocatedSize(old_p
);
272 if (new_size
<= actually_allocated_size
) {
273 // We are not reallocating here.
274 meta
->requested_size
= new_size
;
275 if (new_size
> old_size
) {
276 if (flags()->poison_in_malloc
) {
277 stack
->tag
= StackTrace::TAG_ALLOC
;
278 PoisonMemory((char *)old_p
+ old_size
, new_size
- old_size
, stack
);
283 uptr memcpy_size
= Min(new_size
, old_size
);
284 void *new_p
= MsanAllocate(stack
, new_size
, alignment
, false);
286 CopyMemory(new_p
, old_p
, memcpy_size
, stack
);
287 MsanDeallocate(stack
, old_p
);
292 static void *MsanCalloc(BufferedStackTrace
*stack
, uptr nmemb
, uptr size
) {
293 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
294 if (AllocatorMayReturnNull())
296 GET_FATAL_STACK_TRACE_IF_EMPTY(stack
);
297 ReportCallocOverflow(nmemb
, size
, stack
);
299 return MsanAllocate(stack
, nmemb
* size
, sizeof(u64
), true);
302 static const void *AllocationBegin(const void *p
) {
305 void *beg
= allocator
.GetBlockBegin(p
);
308 auto *b
= reinterpret_cast<Metadata
*>(allocator
.GetMetaData(beg
));
311 if (b
->requested_size
== 0)
317 static uptr
AllocationSizeFast(const void *p
) {
318 return reinterpret_cast<Metadata
*>(allocator
.GetMetaData(p
))->requested_size
;
321 static uptr
AllocationSize(const void *p
) {
324 if (allocator
.GetBlockBegin(p
) != p
)
326 return AllocationSizeFast(p
);
329 void *__msan::msan_malloc(uptr size
, BufferedStackTrace
*stack
) {
330 return SetErrnoOnNull(MsanAllocate(stack
, size
, sizeof(u64
), false));
333 void *__msan::msan_calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
334 return SetErrnoOnNull(MsanCalloc(stack
, nmemb
, size
));
337 void *__msan::msan_realloc(void *ptr
, uptr size
, BufferedStackTrace
*stack
) {
339 return SetErrnoOnNull(MsanAllocate(stack
, size
, sizeof(u64
), false));
341 MsanDeallocate(stack
, ptr
);
344 return SetErrnoOnNull(MsanReallocate(stack
, ptr
, size
, sizeof(u64
)));
347 void *__msan::msan_reallocarray(void *ptr
, uptr nmemb
, uptr size
,
348 BufferedStackTrace
*stack
) {
349 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
350 errno
= errno_ENOMEM
;
351 if (AllocatorMayReturnNull())
353 GET_FATAL_STACK_TRACE_IF_EMPTY(stack
);
354 ReportReallocArrayOverflow(nmemb
, size
, stack
);
356 return msan_realloc(ptr
, nmemb
* size
, stack
);
359 void *__msan::msan_valloc(uptr size
, BufferedStackTrace
*stack
) {
360 return SetErrnoOnNull(MsanAllocate(stack
, size
, GetPageSizeCached(), false));
363 void *__msan::msan_pvalloc(uptr size
, BufferedStackTrace
*stack
) {
364 uptr PageSize
= GetPageSizeCached();
365 if (UNLIKELY(CheckForPvallocOverflow(size
, PageSize
))) {
366 errno
= errno_ENOMEM
;
367 if (AllocatorMayReturnNull())
369 GET_FATAL_STACK_TRACE_IF_EMPTY(stack
);
370 ReportPvallocOverflow(size
, stack
);
372 // pvalloc(0) should allocate one page.
373 size
= size
? RoundUpTo(size
, PageSize
) : PageSize
;
374 return SetErrnoOnNull(MsanAllocate(stack
, size
, PageSize
, false));
377 void *__msan::msan_aligned_alloc(uptr alignment
, uptr size
,
378 BufferedStackTrace
*stack
) {
379 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment
, size
))) {
380 errno
= errno_EINVAL
;
381 if (AllocatorMayReturnNull())
383 GET_FATAL_STACK_TRACE_IF_EMPTY(stack
);
384 ReportInvalidAlignedAllocAlignment(size
, alignment
, stack
);
386 return SetErrnoOnNull(MsanAllocate(stack
, size
, alignment
, false));
389 void *__msan::msan_memalign(uptr alignment
, uptr size
,
390 BufferedStackTrace
*stack
) {
391 if (UNLIKELY(!IsPowerOfTwo(alignment
))) {
392 errno
= errno_EINVAL
;
393 if (AllocatorMayReturnNull())
395 GET_FATAL_STACK_TRACE_IF_EMPTY(stack
);
396 ReportInvalidAllocationAlignment(alignment
, stack
);
398 return SetErrnoOnNull(MsanAllocate(stack
, size
, alignment
, false));
401 int __msan::msan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
402 BufferedStackTrace
*stack
) {
403 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment
))) {
404 if (AllocatorMayReturnNull())
406 GET_FATAL_STACK_TRACE_IF_EMPTY(stack
);
407 ReportInvalidPosixMemalignAlignment(alignment
, stack
);
409 void *ptr
= MsanAllocate(stack
, size
, alignment
, false);
411 // OOM error is already taken care of by MsanAllocate.
413 CHECK(IsAligned((uptr
)ptr
, alignment
));
419 uptr
__sanitizer_get_current_allocated_bytes() {
420 uptr stats
[AllocatorStatCount
];
421 allocator
.GetStats(stats
);
422 return stats
[AllocatorStatAllocated
];
425 uptr
__sanitizer_get_heap_size() {
426 uptr stats
[AllocatorStatCount
];
427 allocator
.GetStats(stats
);
428 return stats
[AllocatorStatMapped
];
431 uptr
__sanitizer_get_free_bytes() { return 1; }
433 uptr
__sanitizer_get_unmapped_bytes() { return 1; }
435 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) { return size
; }
437 int __sanitizer_get_ownership(const void *p
) { return AllocationSize(p
) != 0; }
439 const void *__sanitizer_get_allocated_begin(const void *p
) {
440 return AllocationBegin(p
);
443 uptr
__sanitizer_get_allocated_size(const void *p
) { return AllocationSize(p
); }
445 uptr
__sanitizer_get_allocated_size_fast(const void *p
) {
446 DCHECK_EQ(p
, __sanitizer_get_allocated_begin(p
));
447 uptr ret
= AllocationSizeFast(p
);
448 DCHECK_EQ(ret
, __sanitizer_get_allocated_size(p
));
452 void __sanitizer_purge_allocator() { allocator
.ForceReleaseToOS(); }