1 //===-- memprof_allocator.cpp --------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of MemProfiler, a memory profiler.
11 // Implementation of MemProf's memory allocator, which uses the allocator
12 // from sanitizer_common.
14 //===----------------------------------------------------------------------===//
16 #include "memprof_allocator.h"
17 #include "memprof_mapping.h"
18 #include "memprof_mibmap.h"
19 #include "memprof_rawprofile.h"
20 #include "memprof_stack.h"
21 #include "memprof_thread.h"
22 #include "profile/MemProfData.inc"
23 #include "sanitizer_common/sanitizer_allocator_checks.h"
24 #include "sanitizer_common/sanitizer_allocator_interface.h"
25 #include "sanitizer_common/sanitizer_allocator_report.h"
26 #include "sanitizer_common/sanitizer_array_ref.h"
27 #include "sanitizer_common/sanitizer_common.h"
28 #include "sanitizer_common/sanitizer_errno.h"
29 #include "sanitizer_common/sanitizer_file.h"
30 #include "sanitizer_common/sanitizer_flags.h"
31 #include "sanitizer_common/sanitizer_internal_defs.h"
32 #include "sanitizer_common/sanitizer_stackdepot.h"
39 using ::llvm::memprof::MemInfoBlock
;
41 void Print(const MemInfoBlock
&M
, const u64 id
, bool print_terse
) {
45 p
= M
.TotalSize
* 100 / M
.AllocCount
;
46 Printf("MIB:%llu/%u/%llu.%02llu/%u/%u/", id
, M
.AllocCount
, p
/ 100, p
% 100,
47 M
.MinSize
, M
.MaxSize
);
48 p
= M
.TotalAccessCount
* 100 / M
.AllocCount
;
49 Printf("%llu.%02llu/%llu/%llu/", p
/ 100, p
% 100, M
.MinAccessCount
,
51 p
= M
.TotalLifetime
* 100 / M
.AllocCount
;
52 Printf("%llu.%02llu/%u/%u/", p
/ 100, p
% 100, M
.MinLifetime
,
54 Printf("%u/%u/%u/%u\n", M
.NumMigratedCpu
, M
.NumLifetimeOverlaps
,
55 M
.NumSameAllocCpu
, M
.NumSameDeallocCpu
);
57 p
= M
.TotalSize
* 100 / M
.AllocCount
;
58 Printf("Memory allocation stack id = %llu\n", id
);
59 Printf("\talloc_count %u, size (ave/min/max) %llu.%02llu / %u / %u\n",
60 M
.AllocCount
, p
/ 100, p
% 100, M
.MinSize
, M
.MaxSize
);
61 p
= M
.TotalAccessCount
* 100 / M
.AllocCount
;
62 Printf("\taccess_count (ave/min/max): %llu.%02llu / %llu / %llu\n", p
/ 100,
63 p
% 100, M
.MinAccessCount
, M
.MaxAccessCount
);
64 p
= M
.TotalLifetime
* 100 / M
.AllocCount
;
65 Printf("\tlifetime (ave/min/max): %llu.%02llu / %u / %u\n", p
/ 100,
66 p
% 100, M
.MinLifetime
, M
.MaxLifetime
);
67 Printf("\tnum migrated: %u, num lifetime overlaps: %u, num same alloc "
68 "cpu: %u, num same dealloc_cpu: %u\n",
69 M
.NumMigratedCpu
, M
.NumLifetimeOverlaps
, M
.NumSameAllocCpu
,
75 static int GetCpuId(void) {
76 // _memprof_preinit is called via the preinit_array, which subsequently calls
77 // malloc. Since this is before _dl_init calls VDSO_SETUP, sched_getcpu
78 // will seg fault as the address of __vdso_getcpu will be null.
81 return sched_getcpu();
84 // Compute the timestamp in ms.
85 static int GetTimestamp(void) {
86 // timespec_get will segfault if called from dl_init
87 if (!memprof_timestamp_inited
) {
88 // By returning 0, this will be effectively treated as being
89 // timestamped at memprof init time (when memprof_init_timestamp_s
94 clock_gettime(CLOCK_REALTIME
, &ts
);
95 return (ts
.tv_sec
- memprof_init_timestamp_s
) * 1000 + ts
.tv_nsec
/ 1000000;
98 static MemprofAllocator
&get_allocator();
100 // The memory chunk allocated from the underlying allocator looks like this:
102 // H -- ChunkHeader (32 bytes)
105 // If there is left padding before the ChunkHeader (due to use of memalign),
106 // we store a magic value in the first uptr word of the memory block and
107 // store the address of ChunkHeader in the next uptr.
108 // M B L L L L L L L L L H H U U U U U U
110 // ---------------------|
111 // M -- magic value kAllocBegMagic
112 // B -- address of ChunkHeader pointing to the first 'H'
114 constexpr uptr kMaxAllowedMallocBits
= 40;
116 // Should be no more than 32-bytes
119 u32 alloc_context_id
;
125 // Note only 1 bit is needed for this flag if we need space in the future for
128 // 5-th and 6-th 4 bytes
129 // The max size of an allocation is 2^40 (kMaxAllowedMallocSize), so this
130 // could be shrunk to kMaxAllowedMallocBits if we need space in the future for
132 atomic_uint64_t user_requested_size
;
134 // 7-th and 8-th 4 bytes
135 u64 data_type_id
; // TODO: hash of type name
138 static const uptr kChunkHeaderSize
= sizeof(ChunkHeader
);
139 COMPILER_CHECK(kChunkHeaderSize
== 32);
141 struct MemprofChunk
: ChunkHeader
{
142 uptr
Beg() { return reinterpret_cast<uptr
>(this) + kChunkHeaderSize
; }
144 return atomic_load(&user_requested_size
, memory_order_relaxed
);
148 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
149 return reinterpret_cast<void *>(this);
153 class LargeChunkHeader
{
154 static constexpr uptr kAllocBegMagic
=
155 FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL
);
156 atomic_uintptr_t magic
;
157 MemprofChunk
*chunk_header
;
160 MemprofChunk
*Get() const {
161 return atomic_load(&magic
, memory_order_acquire
) == kAllocBegMagic
166 void Set(MemprofChunk
*p
) {
169 atomic_store(&magic
, kAllocBegMagic
, memory_order_release
);
173 uptr old
= kAllocBegMagic
;
174 if (!atomic_compare_exchange_strong(&magic
, &old
, 0,
175 memory_order_release
)) {
176 CHECK_EQ(old
, kAllocBegMagic
);
181 void FlushUnneededMemProfShadowMemory(uptr p
, uptr size
) {
182 // Since memprof's mapping is compacting, the shadow chunk may be
183 // not page-aligned, so we only flush the page-aligned portion.
184 ReleaseMemoryPagesToOS(MemToShadow(p
), MemToShadow(p
+ size
));
187 void MemprofMapUnmapCallback::OnMap(uptr p
, uptr size
) const {
189 MemprofStats
&thread_stats
= GetCurrentThreadStats();
190 thread_stats
.mmaps
++;
191 thread_stats
.mmaped
+= size
;
194 void MemprofMapUnmapCallback::OnUnmap(uptr p
, uptr size
) const {
195 // We are about to unmap a chunk of user memory.
196 // Mark the corresponding shadow memory as not needed.
197 FlushUnneededMemProfShadowMemory(p
, size
);
199 MemprofStats
&thread_stats
= GetCurrentThreadStats();
200 thread_stats
.munmaps
++;
201 thread_stats
.munmaped
+= size
;
204 AllocatorCache
*GetAllocatorCache(MemprofThreadLocalMallocStorage
*ms
) {
206 return &ms
->allocator_cache
;
209 // Accumulates the access count from the shadow for the given pointer and size.
210 u64
GetShadowCount(uptr p
, u32 size
) {
211 u64
*shadow
= (u64
*)MEM_TO_SHADOW(p
);
212 u64
*shadow_end
= (u64
*)MEM_TO_SHADOW(p
+ size
);
214 for (; shadow
<= shadow_end
; shadow
++)
219 // Clears the shadow counters (when memory is allocated).
220 void ClearShadow(uptr addr
, uptr size
) {
221 CHECK(AddrIsAlignedByGranularity(addr
));
222 CHECK(AddrIsInMem(addr
));
223 CHECK(AddrIsAlignedByGranularity(addr
+ size
));
224 CHECK(AddrIsInMem(addr
+ size
- SHADOW_GRANULARITY
));
226 uptr shadow_beg
= MEM_TO_SHADOW(addr
);
227 uptr shadow_end
= MEM_TO_SHADOW(addr
+ size
- SHADOW_GRANULARITY
) + 1;
228 if (shadow_end
- shadow_beg
< common_flags()->clear_shadow_mmap_threshold
) {
229 REAL(memset
)((void *)shadow_beg
, 0, shadow_end
- shadow_beg
);
231 uptr page_size
= GetPageSizeCached();
232 uptr page_beg
= RoundUpTo(shadow_beg
, page_size
);
233 uptr page_end
= RoundDownTo(shadow_end
, page_size
);
235 if (page_beg
>= page_end
) {
236 REAL(memset
)((void *)shadow_beg
, 0, shadow_end
- shadow_beg
);
238 if (page_beg
!= shadow_beg
) {
239 REAL(memset
)((void *)shadow_beg
, 0, page_beg
- shadow_beg
);
241 if (page_end
!= shadow_end
) {
242 REAL(memset
)((void *)page_end
, 0, shadow_end
- page_end
);
244 ReserveShadowMemoryRange(page_beg
, page_end
- 1, nullptr);
250 static const uptr kMaxAllowedMallocSize
= 1ULL << kMaxAllowedMallocBits
;
252 MemprofAllocator allocator
;
253 StaticSpinMutex fallback_mutex
;
254 AllocatorCache fallback_allocator_cache
;
256 uptr max_user_defined_malloc_size
;
258 // Holds the mapping of stack ids to MemInfoBlocks.
261 atomic_uint8_t destructing
;
262 atomic_uint8_t constructed
;
265 // ------------------- Initialization ------------------------
266 explicit Allocator(LinkerInitialized
) : print_text(flags()->print_text
) {
267 atomic_store_relaxed(&destructing
, 0);
268 atomic_store_relaxed(&constructed
, 1);
272 atomic_store_relaxed(&destructing
, 1);
276 static void PrintCallback(const uptr Key
, LockedMemInfoBlock
*const &Value
,
278 SpinMutexLock
l(&Value
->mutex
);
279 Print(Value
->mib
, Key
, bool(Arg
));
282 void FinishAndWrite() {
283 if (print_text
&& common_flags()->print_module_map
)
286 allocator
.ForceLock();
290 if (!flags()->print_terse
)
291 Printf("Recorded MIBs (incl. live on exit):\n");
292 MIBMap
.ForEach(PrintCallback
,
293 reinterpret_cast<void *>(flags()->print_terse
));
294 StackDepotPrintAll();
296 // Serialize the contents to a raw profile. Format documented in
297 // memprof_rawprofile.h.
298 char *Buffer
= nullptr;
300 __sanitizer::ListOfModules List
;
302 ArrayRef
<LoadedModule
> Modules(List
.begin(), List
.end());
303 u64 BytesSerialized
= SerializeToRawProfile(MIBMap
, Modules
, Buffer
);
304 CHECK(Buffer
&& BytesSerialized
&& "could not serialize to buffer");
305 report_file
.Write(Buffer
, BytesSerialized
);
308 allocator
.ForceUnlock();
311 // Inserts any blocks which have been allocated but not yet deallocated.
312 void InsertLiveBlocks() {
313 allocator
.ForEachChunk(
314 [](uptr chunk
, void *alloc
) {
315 u64 user_requested_size
;
316 Allocator
*A
= (Allocator
*)alloc
;
318 A
->GetMemprofChunk((void *)chunk
, user_requested_size
);
321 uptr user_beg
= ((uptr
)m
) + kChunkHeaderSize
;
322 u64 c
= GetShadowCount(user_beg
, user_requested_size
);
323 long curtime
= GetTimestamp();
324 MemInfoBlock
newMIB(user_requested_size
, c
, m
->timestamp_ms
, curtime
,
325 m
->cpu_id
, GetCpuId());
326 InsertOrMerge(m
->alloc_context_id
, newMIB
, A
->MIBMap
);
331 void InitLinkerInitialized() {
332 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null
);
333 allocator
.InitLinkerInitialized(
334 common_flags()->allocator_release_to_os_interval_ms
);
335 max_user_defined_malloc_size
= common_flags()->max_allocation_size_mb
336 ? common_flags()->max_allocation_size_mb
338 : kMaxAllowedMallocSize
;
341 // -------------------- Allocation/Deallocation routines ---------------
342 void *Allocate(uptr size
, uptr alignment
, BufferedStackTrace
*stack
,
343 AllocType alloc_type
) {
344 if (UNLIKELY(!memprof_inited
))
345 MemprofInitFromRtl();
346 if (UNLIKELY(IsRssLimitExceeded())) {
347 if (AllocatorMayReturnNull())
349 ReportRssLimitExceeded(stack
);
352 const uptr min_alignment
= MEMPROF_ALIGNMENT
;
353 if (alignment
< min_alignment
)
354 alignment
= min_alignment
;
356 // We'd be happy to avoid allocating memory for zero-size requests, but
357 // some programs/tests depend on this behavior and assume that malloc
358 // would not return NULL even for zero-size allocations. Moreover, it
359 // looks like operator new should never return NULL, and results of
360 // consecutive "new" calls must be different even if the allocated size
364 CHECK(IsPowerOfTwo(alignment
));
365 uptr rounded_size
= RoundUpTo(size
, alignment
);
366 uptr needed_size
= rounded_size
+ kChunkHeaderSize
;
367 if (alignment
> min_alignment
)
368 needed_size
+= alignment
;
369 CHECK(IsAligned(needed_size
, min_alignment
));
370 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
||
371 size
> max_user_defined_malloc_size
) {
372 if (AllocatorMayReturnNull()) {
373 Report("WARNING: MemProfiler failed to allocate 0x%zx bytes\n", size
);
377 Min(kMaxAllowedMallocSize
, max_user_defined_malloc_size
);
378 ReportAllocationSizeTooBig(size
, malloc_limit
, stack
);
381 MemprofThread
*t
= GetCurrentThread();
384 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
385 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
387 SpinMutexLock
l(&fallback_mutex
);
388 AllocatorCache
*cache
= &fallback_allocator_cache
;
389 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
391 if (UNLIKELY(!allocated
)) {
392 SetAllocatorOutOfMemory();
393 if (AllocatorMayReturnNull())
395 ReportOutOfMemory(size
, stack
);
398 uptr alloc_beg
= reinterpret_cast<uptr
>(allocated
);
399 uptr alloc_end
= alloc_beg
+ needed_size
;
400 uptr beg_plus_header
= alloc_beg
+ kChunkHeaderSize
;
401 uptr user_beg
= beg_plus_header
;
402 if (!IsAligned(user_beg
, alignment
))
403 user_beg
= RoundUpTo(user_beg
, alignment
);
404 uptr user_end
= user_beg
+ size
;
405 CHECK_LE(user_end
, alloc_end
);
406 uptr chunk_beg
= user_beg
- kChunkHeaderSize
;
407 MemprofChunk
*m
= reinterpret_cast<MemprofChunk
*>(chunk_beg
);
408 m
->from_memalign
= alloc_beg
!= chunk_beg
;
411 m
->cpu_id
= GetCpuId();
412 m
->timestamp_ms
= GetTimestamp();
413 m
->alloc_context_id
= StackDepotPut(*stack
);
415 uptr size_rounded_down_to_granularity
=
416 RoundDownTo(size
, SHADOW_GRANULARITY
);
417 if (size_rounded_down_to_granularity
)
418 ClearShadow(user_beg
, size_rounded_down_to_granularity
);
420 MemprofStats
&thread_stats
= GetCurrentThreadStats();
421 thread_stats
.mallocs
++;
422 thread_stats
.malloced
+= size
;
423 thread_stats
.malloced_overhead
+= needed_size
- size
;
424 if (needed_size
> SizeClassMap::kMaxSize
)
425 thread_stats
.malloc_large
++;
427 thread_stats
.malloced_by_size
[SizeClassMap::ClassID(needed_size
)]++;
429 void *res
= reinterpret_cast<void *>(user_beg
);
430 atomic_store(&m
->user_requested_size
, size
, memory_order_release
);
431 if (alloc_beg
!= chunk_beg
) {
432 CHECK_LE(alloc_beg
+ sizeof(LargeChunkHeader
), chunk_beg
);
433 reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Set(m
);
435 RunMallocHooks(res
, size
);
439 void Deallocate(void *ptr
, uptr delete_size
, uptr delete_alignment
,
440 BufferedStackTrace
*stack
, AllocType alloc_type
) {
441 uptr p
= reinterpret_cast<uptr
>(ptr
);
447 uptr chunk_beg
= p
- kChunkHeaderSize
;
448 MemprofChunk
*m
= reinterpret_cast<MemprofChunk
*>(chunk_beg
);
450 u64 user_requested_size
=
451 atomic_exchange(&m
->user_requested_size
, 0, memory_order_acquire
);
452 if (memprof_inited
&& atomic_load_relaxed(&constructed
) &&
453 !atomic_load_relaxed(&destructing
)) {
454 u64 c
= GetShadowCount(p
, user_requested_size
);
455 long curtime
= GetTimestamp();
457 MemInfoBlock
newMIB(user_requested_size
, c
, m
->timestamp_ms
, curtime
,
458 m
->cpu_id
, GetCpuId());
459 InsertOrMerge(m
->alloc_context_id
, newMIB
, MIBMap
);
462 MemprofStats
&thread_stats
= GetCurrentThreadStats();
463 thread_stats
.frees
++;
464 thread_stats
.freed
+= user_requested_size
;
466 void *alloc_beg
= m
->AllocBeg();
467 if (alloc_beg
!= m
) {
468 // Clear the magic value, as allocator internals may overwrite the
469 // contents of deallocated chunk, confusing GetMemprofChunk lookup.
470 reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Set(nullptr);
473 MemprofThread
*t
= GetCurrentThread();
475 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
476 allocator
.Deallocate(cache
, alloc_beg
);
478 SpinMutexLock
l(&fallback_mutex
);
479 AllocatorCache
*cache
= &fallback_allocator_cache
;
480 allocator
.Deallocate(cache
, alloc_beg
);
484 void *Reallocate(void *old_ptr
, uptr new_size
, BufferedStackTrace
*stack
) {
485 CHECK(old_ptr
&& new_size
);
486 uptr p
= reinterpret_cast<uptr
>(old_ptr
);
487 uptr chunk_beg
= p
- kChunkHeaderSize
;
488 MemprofChunk
*m
= reinterpret_cast<MemprofChunk
*>(chunk_beg
);
490 MemprofStats
&thread_stats
= GetCurrentThreadStats();
491 thread_stats
.reallocs
++;
492 thread_stats
.realloced
+= new_size
;
494 void *new_ptr
= Allocate(new_size
, 8, stack
, FROM_MALLOC
);
496 CHECK_NE(REAL(memcpy
), nullptr);
497 uptr memcpy_size
= Min(new_size
, m
->UsedSize());
498 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
499 Deallocate(old_ptr
, 0, 0, stack
, FROM_MALLOC
);
504 void *Calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
505 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
506 if (AllocatorMayReturnNull())
508 ReportCallocOverflow(nmemb
, size
, stack
);
510 void *ptr
= Allocate(nmemb
* size
, 8, stack
, FROM_MALLOC
);
511 // If the memory comes from the secondary allocator no need to clear it
512 // as it comes directly from mmap.
513 if (ptr
&& allocator
.FromPrimary(ptr
))
514 REAL(memset
)(ptr
, 0, nmemb
* size
);
518 void CommitBack(MemprofThreadLocalMallocStorage
*ms
,
519 BufferedStackTrace
*stack
) {
520 AllocatorCache
*ac
= GetAllocatorCache(ms
);
521 allocator
.SwallowCache(ac
);
524 // -------------------------- Chunk lookup ----------------------
526 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
527 MemprofChunk
*GetMemprofChunk(void *alloc_beg
, u64
&user_requested_size
) {
530 MemprofChunk
*p
= reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Get();
532 if (!allocator
.FromPrimary(alloc_beg
))
534 p
= reinterpret_cast<MemprofChunk
*>(alloc_beg
);
536 // The size is reset to 0 on deallocation (and a min of 1 on
538 user_requested_size
=
539 atomic_load(&p
->user_requested_size
, memory_order_acquire
);
540 if (user_requested_size
)
545 MemprofChunk
*GetMemprofChunkByAddr(uptr p
, u64
&user_requested_size
) {
546 void *alloc_beg
= allocator
.GetBlockBegin(reinterpret_cast<void *>(p
));
547 return GetMemprofChunk(alloc_beg
, user_requested_size
);
550 uptr
AllocationSize(uptr p
) {
551 u64 user_requested_size
;
552 MemprofChunk
*m
= GetMemprofChunkByAddr(p
, user_requested_size
);
557 return user_requested_size
;
560 uptr
AllocationSizeFast(uptr p
) {
561 return reinterpret_cast<MemprofChunk
*>(p
- kChunkHeaderSize
)->UsedSize();
564 void Purge(BufferedStackTrace
*stack
) { allocator
.ForceReleaseToOS(); }
566 void PrintStats() { allocator
.PrintStats(); }
568 void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
569 allocator
.ForceLock();
570 fallback_mutex
.Lock();
573 void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
574 fallback_mutex
.Unlock();
575 allocator
.ForceUnlock();
579 static Allocator
instance(LINKER_INITIALIZED
);
581 static MemprofAllocator
&get_allocator() { return instance
.allocator
; }
583 void InitializeAllocator() { instance
.InitLinkerInitialized(); }
585 void MemprofThreadLocalMallocStorage::CommitBack() {
586 GET_STACK_TRACE_MALLOC
;
587 instance
.CommitBack(this, &stack
);
590 void PrintInternalAllocatorStats() { instance
.PrintStats(); }
592 void memprof_free(void *ptr
, BufferedStackTrace
*stack
, AllocType alloc_type
) {
593 instance
.Deallocate(ptr
, 0, 0, stack
, alloc_type
);
596 void memprof_delete(void *ptr
, uptr size
, uptr alignment
,
597 BufferedStackTrace
*stack
, AllocType alloc_type
) {
598 instance
.Deallocate(ptr
, size
, alignment
, stack
, alloc_type
);
601 void *memprof_malloc(uptr size
, BufferedStackTrace
*stack
) {
602 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
));
605 void *memprof_calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
606 return SetErrnoOnNull(instance
.Calloc(nmemb
, size
, stack
));
609 void *memprof_reallocarray(void *p
, uptr nmemb
, uptr size
,
610 BufferedStackTrace
*stack
) {
611 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
612 errno
= errno_ENOMEM
;
613 if (AllocatorMayReturnNull())
615 ReportReallocArrayOverflow(nmemb
, size
, stack
);
617 return memprof_realloc(p
, nmemb
* size
, stack
);
620 void *memprof_realloc(void *p
, uptr size
, BufferedStackTrace
*stack
) {
622 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
));
624 if (flags()->allocator_frees_and_returns_null_on_realloc_zero
) {
625 instance
.Deallocate(p
, 0, 0, stack
, FROM_MALLOC
);
628 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
631 return SetErrnoOnNull(instance
.Reallocate(p
, size
, stack
));
634 void *memprof_valloc(uptr size
, BufferedStackTrace
*stack
) {
635 return SetErrnoOnNull(
636 instance
.Allocate(size
, GetPageSizeCached(), stack
, FROM_MALLOC
));
639 void *memprof_pvalloc(uptr size
, BufferedStackTrace
*stack
) {
640 uptr PageSize
= GetPageSizeCached();
641 if (UNLIKELY(CheckForPvallocOverflow(size
, PageSize
))) {
642 errno
= errno_ENOMEM
;
643 if (AllocatorMayReturnNull())
645 ReportPvallocOverflow(size
, stack
);
647 // pvalloc(0) should allocate one page.
648 size
= size
? RoundUpTo(size
, PageSize
) : PageSize
;
649 return SetErrnoOnNull(instance
.Allocate(size
, PageSize
, stack
, FROM_MALLOC
));
652 void *memprof_memalign(uptr alignment
, uptr size
, BufferedStackTrace
*stack
,
653 AllocType alloc_type
) {
654 if (UNLIKELY(!IsPowerOfTwo(alignment
))) {
655 errno
= errno_EINVAL
;
656 if (AllocatorMayReturnNull())
658 ReportInvalidAllocationAlignment(alignment
, stack
);
660 return SetErrnoOnNull(instance
.Allocate(size
, alignment
, stack
, alloc_type
));
663 void *memprof_aligned_alloc(uptr alignment
, uptr size
,
664 BufferedStackTrace
*stack
) {
665 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment
, size
))) {
666 errno
= errno_EINVAL
;
667 if (AllocatorMayReturnNull())
669 ReportInvalidAlignedAllocAlignment(size
, alignment
, stack
);
671 return SetErrnoOnNull(instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
));
674 int memprof_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
675 BufferedStackTrace
*stack
) {
676 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment
))) {
677 if (AllocatorMayReturnNull())
679 ReportInvalidPosixMemalignAlignment(alignment
, stack
);
681 void *ptr
= instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
);
683 // OOM error is already taken care of by Allocate.
685 CHECK(IsAligned((uptr
)ptr
, alignment
));
690 static const void *memprof_malloc_begin(const void *p
) {
691 u64 user_requested_size
;
693 instance
.GetMemprofChunkByAddr((uptr
)p
, user_requested_size
);
696 if (user_requested_size
== 0)
699 return (const void *)m
->Beg();
702 uptr
memprof_malloc_usable_size(const void *ptr
, uptr pc
, uptr bp
) {
705 uptr usable_size
= instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
709 } // namespace __memprof
711 // ---------------------- Interface ---------------- {{{1
712 using namespace __memprof
;
714 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) { return size
; }
716 int __sanitizer_get_ownership(const void *p
) {
717 return memprof_malloc_usable_size(p
, 0, 0) != 0;
720 const void *__sanitizer_get_allocated_begin(const void *p
) {
721 return memprof_malloc_begin(p
);
724 uptr
__sanitizer_get_allocated_size(const void *p
) {
725 return memprof_malloc_usable_size(p
, 0, 0);
728 uptr
__sanitizer_get_allocated_size_fast(const void *p
) {
729 DCHECK_EQ(p
, __sanitizer_get_allocated_begin(p
));
730 uptr ret
= instance
.AllocationSizeFast(reinterpret_cast<uptr
>(p
));
731 DCHECK_EQ(ret
, __sanitizer_get_allocated_size(p
));
735 int __memprof_profile_dump() {
736 instance
.FinishAndWrite();
737 // In the future we may want to return non-zero if there are any errors
738 // detected during the dumping process.