1 //===-- msan_allocator.cpp -------------------------- ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of MemorySanitizer.
11 // MemorySanitizer allocator.
12 //===----------------------------------------------------------------------===//
14 #include "msan_allocator.h"
17 #include "msan_interface_internal.h"
18 #include "msan_origin.h"
19 #include "msan_poisoning.h"
20 #include "msan_thread.h"
21 #include "sanitizer_common/sanitizer_allocator.h"
22 #include "sanitizer_common/sanitizer_allocator_checks.h"
23 #include "sanitizer_common/sanitizer_allocator_interface.h"
24 #include "sanitizer_common/sanitizer_allocator_report.h"
25 #include "sanitizer_common/sanitizer_errno.h"
33 struct MsanMapUnmapCallback
{
34 void OnMap(uptr p
, uptr size
) const {}
35 void OnMapSecondary(uptr p
, uptr size
, uptr user_begin
,
36 uptr user_size
) const {}
37 void OnUnmap(uptr p
, uptr size
) const {
38 __msan_unpoison((void *)p
, size
);
40 // We are about to unmap a chunk of user memory.
41 // Mark the corresponding shadow memory as not needed.
42 uptr shadow_p
= MEM_TO_SHADOW(p
);
43 ReleaseMemoryPagesToOS(shadow_p
, shadow_p
+ size
);
44 if (__msan_get_track_origins()) {
45 uptr origin_p
= MEM_TO_ORIGIN(p
);
46 ReleaseMemoryPagesToOS(origin_p
, origin_p
+ size
);
52 static const uptr kMaxAllowedMallocSize
= 2UL << 30;
55 static const uptr kSpaceBeg
= 0;
56 static const u64 kSpaceSize
= SANITIZER_MMAP_RANGE_SIZE
;
57 static const uptr kMetadataSize
= sizeof(Metadata
);
58 typedef __sanitizer::CompactSizeClassMap SizeClassMap
;
59 static const uptr kRegionSizeLog
= 20;
60 using AddressSpaceView
= LocalAddressSpaceView
;
61 typedef MsanMapUnmapCallback MapUnmapCallback
;
62 static const uptr kFlags
= 0;
64 typedef SizeClassAllocator32
<AP32
> PrimaryAllocator
;
65 #elif defined(__x86_64__)
66 #if SANITIZER_NETBSD || SANITIZER_LINUX
67 static const uptr kAllocatorSpace
= 0x700000000000ULL
;
69 static const uptr kAllocatorSpace
= 0x600000000000ULL
;
71 static const uptr kMaxAllowedMallocSize
= 8UL << 30;
73 struct AP64
{ // Allocator64 parameters. Deliberately using a short name.
74 static const uptr kSpaceBeg
= kAllocatorSpace
;
75 static const uptr kSpaceSize
= 0x40000000000; // 4T.
76 static const uptr kMetadataSize
= sizeof(Metadata
);
77 typedef DefaultSizeClassMap SizeClassMap
;
78 typedef MsanMapUnmapCallback MapUnmapCallback
;
79 static const uptr kFlags
= 0;
80 using AddressSpaceView
= LocalAddressSpaceView
;
83 typedef SizeClassAllocator64
<AP64
> PrimaryAllocator
;
85 #elif defined(__loongarch_lp64)
86 const uptr kAllocatorSpace
= 0x700000000000ULL
;
87 const uptr kMaxAllowedMallocSize
= 8UL << 30;
89 struct AP64
{ // Allocator64 parameters. Deliberately using a short name.
90 static const uptr kSpaceBeg
= kAllocatorSpace
;
91 static const uptr kSpaceSize
= 0x40000000000; // 4T.
92 static const uptr kMetadataSize
= sizeof(Metadata
);
93 typedef DefaultSizeClassMap SizeClassMap
;
94 typedef MsanMapUnmapCallback MapUnmapCallback
;
95 static const uptr kFlags
= 0;
96 using AddressSpaceView
= LocalAddressSpaceView
;
99 typedef SizeClassAllocator64
<AP64
> PrimaryAllocator
;
101 #elif defined(__powerpc64__)
102 static const uptr kMaxAllowedMallocSize
= 2UL << 30; // 2G
104 struct AP64
{ // Allocator64 parameters. Deliberately using a short name.
105 static const uptr kSpaceBeg
= 0x300000000000;
106 static const uptr kSpaceSize
= 0x020000000000; // 2T.
107 static const uptr kMetadataSize
= sizeof(Metadata
);
108 typedef DefaultSizeClassMap SizeClassMap
;
109 typedef MsanMapUnmapCallback MapUnmapCallback
;
110 static const uptr kFlags
= 0;
111 using AddressSpaceView
= LocalAddressSpaceView
;
114 typedef SizeClassAllocator64
<AP64
> PrimaryAllocator
;
115 #elif defined(__s390x__)
116 static const uptr kMaxAllowedMallocSize
= 2UL << 30; // 2G
118 struct AP64
{ // Allocator64 parameters. Deliberately using a short name.
119 static const uptr kSpaceBeg
= 0x440000000000;
120 static const uptr kSpaceSize
= 0x020000000000; // 2T.
121 static const uptr kMetadataSize
= sizeof(Metadata
);
122 typedef DefaultSizeClassMap SizeClassMap
;
123 typedef MsanMapUnmapCallback MapUnmapCallback
;
124 static const uptr kFlags
= 0;
125 using AddressSpaceView
= LocalAddressSpaceView
;
128 typedef SizeClassAllocator64
<AP64
> PrimaryAllocator
;
129 #elif defined(__aarch64__)
130 static const uptr kMaxAllowedMallocSize
= 8UL << 30;
133 static const uptr kSpaceBeg
= 0xE00000000000ULL
;
134 static const uptr kSpaceSize
= 0x40000000000; // 4T.
135 static const uptr kMetadataSize
= sizeof(Metadata
);
136 typedef DefaultSizeClassMap SizeClassMap
;
137 typedef MsanMapUnmapCallback MapUnmapCallback
;
138 static const uptr kFlags
= 0;
139 using AddressSpaceView
= LocalAddressSpaceView
;
141 typedef SizeClassAllocator64
<AP64
> PrimaryAllocator
;
143 typedef CombinedAllocator
<PrimaryAllocator
> Allocator
;
144 typedef Allocator::AllocatorCache AllocatorCache
;
146 static Allocator allocator
;
147 static AllocatorCache fallback_allocator_cache
;
148 static StaticSpinMutex fallback_mutex
;
150 static uptr max_malloc_size
;
152 void MsanAllocatorInit() {
153 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null
);
154 allocator
.Init(common_flags()->allocator_release_to_os_interval_ms
);
155 if (common_flags()->max_allocation_size_mb
)
156 max_malloc_size
= Min(common_flags()->max_allocation_size_mb
<< 20,
157 kMaxAllowedMallocSize
);
159 max_malloc_size
= kMaxAllowedMallocSize
;
162 AllocatorCache
*GetAllocatorCache(MsanThreadLocalMallocStorage
*ms
) {
164 CHECK_LE(sizeof(AllocatorCache
), sizeof(ms
->allocator_cache
));
165 return reinterpret_cast<AllocatorCache
*>(ms
->allocator_cache
);
168 void MsanThreadLocalMallocStorage::Init() {
169 allocator
.InitCache(GetAllocatorCache(this));
172 void MsanThreadLocalMallocStorage::CommitBack() {
173 allocator
.SwallowCache(GetAllocatorCache(this));
174 allocator
.DestroyCache(GetAllocatorCache(this));
177 static void *MsanAllocate(StackTrace
*stack
, uptr size
, uptr alignment
,
179 if (size
> max_malloc_size
) {
180 if (AllocatorMayReturnNull()) {
181 Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size
);
184 ReportAllocationSizeTooBig(size
, max_malloc_size
, stack
);
186 if (UNLIKELY(IsRssLimitExceeded())) {
187 if (AllocatorMayReturnNull())
189 ReportRssLimitExceeded(stack
);
191 MsanThread
*t
= GetCurrentThread();
194 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
195 allocated
= allocator
.Allocate(cache
, size
, alignment
);
197 SpinMutexLock
l(&fallback_mutex
);
198 AllocatorCache
*cache
= &fallback_allocator_cache
;
199 allocated
= allocator
.Allocate(cache
, size
, alignment
);
201 if (UNLIKELY(!allocated
)) {
202 SetAllocatorOutOfMemory();
203 if (AllocatorMayReturnNull())
205 ReportOutOfMemory(size
, stack
);
208 reinterpret_cast<Metadata
*>(allocator
.GetMetaData(allocated
));
209 meta
->requested_size
= size
;
211 if (allocator
.FromPrimary(allocated
))
212 __msan_clear_and_unpoison(allocated
, size
);
214 __msan_unpoison(allocated
, size
); // Mem is already zeroed.
215 } else if (flags()->poison_in_malloc
) {
216 __msan_poison(allocated
, size
);
217 if (__msan_get_track_origins()) {
218 stack
->tag
= StackTrace::TAG_ALLOC
;
219 Origin o
= Origin::CreateHeapOrigin(stack
);
220 __msan_set_origin(allocated
, size
, o
.raw_id());
224 RunMallocHooks(allocated
, size
);
228 void MsanDeallocate(StackTrace
*stack
, void *p
) {
233 Metadata
*meta
= reinterpret_cast<Metadata
*>(allocator
.GetMetaData(p
));
234 uptr size
= meta
->requested_size
;
235 meta
->requested_size
= 0;
236 // This memory will not be reused by anyone else, so we are free to keep it
237 // poisoned. The secondary allocator will unmap and unpoison by
238 // MsanMapUnmapCallback, no need to poison it here.
239 if (flags()->poison_in_free
&& allocator
.FromPrimary(p
)) {
240 __msan_poison(p
, size
);
241 if (__msan_get_track_origins()) {
242 stack
->tag
= StackTrace::TAG_DEALLOC
;
243 Origin o
= Origin::CreateHeapOrigin(stack
);
244 __msan_set_origin(p
, size
, o
.raw_id());
247 MsanThread
*t
= GetCurrentThread();
249 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
250 allocator
.Deallocate(cache
, p
);
252 SpinMutexLock
l(&fallback_mutex
);
253 AllocatorCache
*cache
= &fallback_allocator_cache
;
254 allocator
.Deallocate(cache
, p
);
258 static void *MsanReallocate(StackTrace
*stack
, void *old_p
, uptr new_size
,
260 Metadata
*meta
= reinterpret_cast<Metadata
*>(allocator
.GetMetaData(old_p
));
261 uptr old_size
= meta
->requested_size
;
262 uptr actually_allocated_size
= allocator
.GetActuallyAllocatedSize(old_p
);
263 if (new_size
<= actually_allocated_size
) {
264 // We are not reallocating here.
265 meta
->requested_size
= new_size
;
266 if (new_size
> old_size
) {
267 if (flags()->poison_in_malloc
) {
268 stack
->tag
= StackTrace::TAG_ALLOC
;
269 PoisonMemory((char *)old_p
+ old_size
, new_size
- old_size
, stack
);
274 uptr memcpy_size
= Min(new_size
, old_size
);
275 void *new_p
= MsanAllocate(stack
, new_size
, alignment
, false /*zeroise*/);
277 CopyMemory(new_p
, old_p
, memcpy_size
, stack
);
278 MsanDeallocate(stack
, old_p
);
283 static void *MsanCalloc(StackTrace
*stack
, uptr nmemb
, uptr size
) {
284 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
285 if (AllocatorMayReturnNull())
287 ReportCallocOverflow(nmemb
, size
, stack
);
289 return MsanAllocate(stack
, nmemb
* size
, sizeof(u64
), true);
292 static const void *AllocationBegin(const void *p
) {
295 void *beg
= allocator
.GetBlockBegin(p
);
298 Metadata
*b
= (Metadata
*)allocator
.GetMetaData(beg
);
301 if (b
->requested_size
== 0)
304 return (const void *)beg
;
307 static uptr
AllocationSize(const void *p
) {
309 const void *beg
= allocator
.GetBlockBegin(p
);
310 if (beg
!= p
) return 0;
311 Metadata
*b
= (Metadata
*)allocator
.GetMetaData(p
);
312 return b
->requested_size
;
315 static uptr
AllocationSizeFast(const void *p
) {
316 return reinterpret_cast<Metadata
*>(allocator
.GetMetaData(p
))->requested_size
;
319 void *msan_malloc(uptr size
, StackTrace
*stack
) {
320 return SetErrnoOnNull(MsanAllocate(stack
, size
, sizeof(u64
), false));
323 void *msan_calloc(uptr nmemb
, uptr size
, StackTrace
*stack
) {
324 return SetErrnoOnNull(MsanCalloc(stack
, nmemb
, size
));
327 void *msan_realloc(void *ptr
, uptr size
, StackTrace
*stack
) {
329 return SetErrnoOnNull(MsanAllocate(stack
, size
, sizeof(u64
), false));
331 MsanDeallocate(stack
, ptr
);
334 return SetErrnoOnNull(MsanReallocate(stack
, ptr
, size
, sizeof(u64
)));
337 void *msan_reallocarray(void *ptr
, uptr nmemb
, uptr size
, StackTrace
*stack
) {
338 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
339 errno
= errno_ENOMEM
;
340 if (AllocatorMayReturnNull())
342 ReportReallocArrayOverflow(nmemb
, size
, stack
);
344 return msan_realloc(ptr
, nmemb
* size
, stack
);
347 void *msan_valloc(uptr size
, StackTrace
*stack
) {
348 return SetErrnoOnNull(MsanAllocate(stack
, size
, GetPageSizeCached(), false));
351 void *msan_pvalloc(uptr size
, StackTrace
*stack
) {
352 uptr PageSize
= GetPageSizeCached();
353 if (UNLIKELY(CheckForPvallocOverflow(size
, PageSize
))) {
354 errno
= errno_ENOMEM
;
355 if (AllocatorMayReturnNull())
357 ReportPvallocOverflow(size
, stack
);
359 // pvalloc(0) should allocate one page.
360 size
= size
? RoundUpTo(size
, PageSize
) : PageSize
;
361 return SetErrnoOnNull(MsanAllocate(stack
, size
, PageSize
, false));
364 void *msan_aligned_alloc(uptr alignment
, uptr size
, StackTrace
*stack
) {
365 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment
, size
))) {
366 errno
= errno_EINVAL
;
367 if (AllocatorMayReturnNull())
369 ReportInvalidAlignedAllocAlignment(size
, alignment
, stack
);
371 return SetErrnoOnNull(MsanAllocate(stack
, size
, alignment
, false));
374 void *msan_memalign(uptr alignment
, uptr size
, StackTrace
*stack
) {
375 if (UNLIKELY(!IsPowerOfTwo(alignment
))) {
376 errno
= errno_EINVAL
;
377 if (AllocatorMayReturnNull())
379 ReportInvalidAllocationAlignment(alignment
, stack
);
381 return SetErrnoOnNull(MsanAllocate(stack
, size
, alignment
, false));
384 int msan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
386 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment
))) {
387 if (AllocatorMayReturnNull())
389 ReportInvalidPosixMemalignAlignment(alignment
, stack
);
391 void *ptr
= MsanAllocate(stack
, size
, alignment
, false);
393 // OOM error is already taken care of by MsanAllocate.
395 CHECK(IsAligned((uptr
)ptr
, alignment
));
400 } // namespace __msan
402 using namespace __msan
;
404 uptr
__sanitizer_get_current_allocated_bytes() {
405 uptr stats
[AllocatorStatCount
];
406 allocator
.GetStats(stats
);
407 return stats
[AllocatorStatAllocated
];
410 uptr
__sanitizer_get_heap_size() {
411 uptr stats
[AllocatorStatCount
];
412 allocator
.GetStats(stats
);
413 return stats
[AllocatorStatMapped
];
416 uptr
__sanitizer_get_free_bytes() { return 1; }
418 uptr
__sanitizer_get_unmapped_bytes() { return 1; }
420 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) { return size
; }
422 int __sanitizer_get_ownership(const void *p
) { return AllocationSize(p
) != 0; }
424 const void *__sanitizer_get_allocated_begin(const void *p
) {
425 return AllocationBegin(p
);
428 uptr
__sanitizer_get_allocated_size(const void *p
) { return AllocationSize(p
); }
430 uptr
__sanitizer_get_allocated_size_fast(const void *p
) {
431 DCHECK_EQ(p
, __sanitizer_get_allocated_begin(p
));
432 uptr ret
= AllocationSizeFast(p
);
433 DCHECK_EQ(ret
, __sanitizer_get_allocated_size(p
));
437 void __sanitizer_purge_allocator() { allocator
.ForceReleaseToOS(); }