1 //===-- memprof_allocator.cpp --------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of MemProfiler, a memory profiler.
11 // Implementation of MemProf's memory allocator, which uses the allocator
12 // from sanitizer_common.
14 //===----------------------------------------------------------------------===//
16 #include "memprof_allocator.h"
17 #include "memprof_mapping.h"
18 #include "memprof_mibmap.h"
19 #include "memprof_rawprofile.h"
20 #include "memprof_stack.h"
21 #include "memprof_thread.h"
22 #include "profile/MemProfData.inc"
23 #include "sanitizer_common/sanitizer_allocator_checks.h"
24 #include "sanitizer_common/sanitizer_allocator_interface.h"
25 #include "sanitizer_common/sanitizer_allocator_report.h"
26 #include "sanitizer_common/sanitizer_common.h"
27 #include "sanitizer_common/sanitizer_errno.h"
28 #include "sanitizer_common/sanitizer_file.h"
29 #include "sanitizer_common/sanitizer_flags.h"
30 #include "sanitizer_common/sanitizer_internal_defs.h"
31 #include "sanitizer_common/sanitizer_stackdepot.h"
38 using ::llvm::memprof::MemInfoBlock
;
40 void Print(const MemInfoBlock
&M
, const u64 id
, bool print_terse
) {
44 p
= M
.TotalSize
* 100 / M
.AllocCount
;
45 Printf("MIB:%llu/%u/%llu.%02llu/%u/%u/", id
, M
.AllocCount
, p
/ 100, p
% 100,
46 M
.MinSize
, M
.MaxSize
);
47 p
= M
.TotalAccessCount
* 100 / M
.AllocCount
;
48 Printf("%llu.%02llu/%llu/%llu/", p
/ 100, p
% 100, M
.MinAccessCount
,
50 p
= M
.TotalLifetime
* 100 / M
.AllocCount
;
51 Printf("%llu.%02llu/%u/%u/", p
/ 100, p
% 100, M
.MinLifetime
,
53 Printf("%u/%u/%u/%u\n", M
.NumMigratedCpu
, M
.NumLifetimeOverlaps
,
54 M
.NumSameAllocCpu
, M
.NumSameDeallocCpu
);
56 p
= M
.TotalSize
* 100 / M
.AllocCount
;
57 Printf("Memory allocation stack id = %llu\n", id
);
58 Printf("\talloc_count %u, size (ave/min/max) %llu.%02llu / %u / %u\n",
59 M
.AllocCount
, p
/ 100, p
% 100, M
.MinSize
, M
.MaxSize
);
60 p
= M
.TotalAccessCount
* 100 / M
.AllocCount
;
61 Printf("\taccess_count (ave/min/max): %llu.%02llu / %llu / %llu\n", p
/ 100,
62 p
% 100, M
.MinAccessCount
, M
.MaxAccessCount
);
63 p
= M
.TotalLifetime
* 100 / M
.AllocCount
;
64 Printf("\tlifetime (ave/min/max): %llu.%02llu / %u / %u\n", p
/ 100,
65 p
% 100, M
.MinLifetime
, M
.MaxLifetime
);
66 Printf("\tnum migrated: %u, num lifetime overlaps: %u, num same alloc "
67 "cpu: %u, num same dealloc_cpu: %u\n",
68 M
.NumMigratedCpu
, M
.NumLifetimeOverlaps
, M
.NumSameAllocCpu
,
74 static int GetCpuId(void) {
75 // _memprof_preinit is called via the preinit_array, which subsequently calls
76 // malloc. Since this is before _dl_init calls VDSO_SETUP, sched_getcpu
77 // will seg fault as the address of __vdso_getcpu will be null.
80 return sched_getcpu();
83 // Compute the timestamp in ms.
84 static int GetTimestamp(void) {
85 // timespec_get will segfault if called from dl_init
86 if (!memprof_timestamp_inited
) {
87 // By returning 0, this will be effectively treated as being
88 // timestamped at memprof init time (when memprof_init_timestamp_s
93 clock_gettime(CLOCK_REALTIME
, &ts
);
94 return (ts
.tv_sec
- memprof_init_timestamp_s
) * 1000 + ts
.tv_nsec
/ 1000000;
97 static MemprofAllocator
&get_allocator();
99 // The memory chunk allocated from the underlying allocator looks like this:
101 // H -- ChunkHeader (32 bytes)
104 // If there is left padding before the ChunkHeader (due to use of memalign),
105 // we store a magic value in the first uptr word of the memory block and
106 // store the address of ChunkHeader in the next uptr.
107 // M B L L L L L L L L L H H U U U U U U
109 // ---------------------|
110 // M -- magic value kAllocBegMagic
111 // B -- address of ChunkHeader pointing to the first 'H'
113 constexpr uptr kMaxAllowedMallocBits
= 40;
115 // Should be no more than 32-bytes
118 u32 alloc_context_id
;
124 // Note only 1 bit is needed for this flag if we need space in the future for
127 // 5-th and 6-th 4 bytes
128 // The max size of an allocation is 2^40 (kMaxAllowedMallocSize), so this
129 // could be shrunk to kMaxAllowedMallocBits if we need space in the future for
131 atomic_uint64_t user_requested_size
;
133 // 7-th and 8-th 4 bytes
134 u64 data_type_id
; // TODO: hash of type name
137 static const uptr kChunkHeaderSize
= sizeof(ChunkHeader
);
138 COMPILER_CHECK(kChunkHeaderSize
== 32);
140 struct MemprofChunk
: ChunkHeader
{
141 uptr
Beg() { return reinterpret_cast<uptr
>(this) + kChunkHeaderSize
; }
143 return atomic_load(&user_requested_size
, memory_order_relaxed
);
147 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
148 return reinterpret_cast<void *>(this);
152 class LargeChunkHeader
{
153 static constexpr uptr kAllocBegMagic
=
154 FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL
);
155 atomic_uintptr_t magic
;
156 MemprofChunk
*chunk_header
;
159 MemprofChunk
*Get() const {
160 return atomic_load(&magic
, memory_order_acquire
) == kAllocBegMagic
165 void Set(MemprofChunk
*p
) {
168 atomic_store(&magic
, kAllocBegMagic
, memory_order_release
);
172 uptr old
= kAllocBegMagic
;
173 if (!atomic_compare_exchange_strong(&magic
, &old
, 0,
174 memory_order_release
)) {
175 CHECK_EQ(old
, kAllocBegMagic
);
180 void FlushUnneededMemProfShadowMemory(uptr p
, uptr size
) {
181 // Since memprof's mapping is compacting, the shadow chunk may be
182 // not page-aligned, so we only flush the page-aligned portion.
183 ReleaseMemoryPagesToOS(MemToShadow(p
), MemToShadow(p
+ size
));
186 void MemprofMapUnmapCallback::OnMap(uptr p
, uptr size
) const {
188 MemprofStats
&thread_stats
= GetCurrentThreadStats();
189 thread_stats
.mmaps
++;
190 thread_stats
.mmaped
+= size
;
192 void MemprofMapUnmapCallback::OnUnmap(uptr p
, uptr size
) const {
193 // We are about to unmap a chunk of user memory.
194 // Mark the corresponding shadow memory as not needed.
195 FlushUnneededMemProfShadowMemory(p
, size
);
197 MemprofStats
&thread_stats
= GetCurrentThreadStats();
198 thread_stats
.munmaps
++;
199 thread_stats
.munmaped
+= size
;
202 AllocatorCache
*GetAllocatorCache(MemprofThreadLocalMallocStorage
*ms
) {
204 return &ms
->allocator_cache
;
207 // Accumulates the access count from the shadow for the given pointer and size.
208 u64
GetShadowCount(uptr p
, u32 size
) {
209 u64
*shadow
= (u64
*)MEM_TO_SHADOW(p
);
210 u64
*shadow_end
= (u64
*)MEM_TO_SHADOW(p
+ size
);
212 for (; shadow
<= shadow_end
; shadow
++)
217 // Clears the shadow counters (when memory is allocated).
218 void ClearShadow(uptr addr
, uptr size
) {
219 CHECK(AddrIsAlignedByGranularity(addr
));
220 CHECK(AddrIsInMem(addr
));
221 CHECK(AddrIsAlignedByGranularity(addr
+ size
));
222 CHECK(AddrIsInMem(addr
+ size
- SHADOW_GRANULARITY
));
224 uptr shadow_beg
= MEM_TO_SHADOW(addr
);
225 uptr shadow_end
= MEM_TO_SHADOW(addr
+ size
- SHADOW_GRANULARITY
) + 1;
226 if (shadow_end
- shadow_beg
< common_flags()->clear_shadow_mmap_threshold
) {
227 REAL(memset
)((void *)shadow_beg
, 0, shadow_end
- shadow_beg
);
229 uptr page_size
= GetPageSizeCached();
230 uptr page_beg
= RoundUpTo(shadow_beg
, page_size
);
231 uptr page_end
= RoundDownTo(shadow_end
, page_size
);
233 if (page_beg
>= page_end
) {
234 REAL(memset
)((void *)shadow_beg
, 0, shadow_end
- shadow_beg
);
236 if (page_beg
!= shadow_beg
) {
237 REAL(memset
)((void *)shadow_beg
, 0, page_beg
- shadow_beg
);
239 if (page_end
!= shadow_end
) {
240 REAL(memset
)((void *)page_end
, 0, shadow_end
- page_end
);
242 ReserveShadowMemoryRange(page_beg
, page_end
- 1, nullptr);
248 static const uptr kMaxAllowedMallocSize
= 1ULL << kMaxAllowedMallocBits
;
250 MemprofAllocator allocator
;
251 StaticSpinMutex fallback_mutex
;
252 AllocatorCache fallback_allocator_cache
;
254 uptr max_user_defined_malloc_size
;
256 // Holds the mapping of stack ids to MemInfoBlocks.
259 atomic_uint8_t destructing
;
260 atomic_uint8_t constructed
;
263 // ------------------- Initialization ------------------------
264 explicit Allocator(LinkerInitialized
) : print_text(flags()->print_text
) {
265 atomic_store_relaxed(&destructing
, 0);
266 atomic_store_relaxed(&constructed
, 1);
270 atomic_store_relaxed(&destructing
, 1);
274 static void PrintCallback(const uptr Key
, LockedMemInfoBlock
*const &Value
,
276 SpinMutexLock
l(&Value
->mutex
);
277 Print(Value
->mib
, Key
, bool(Arg
));
280 void FinishAndWrite() {
281 if (print_text
&& common_flags()->print_module_map
)
284 allocator
.ForceLock();
288 if (!flags()->print_terse
)
289 Printf("Recorded MIBs (incl. live on exit):\n");
290 MIBMap
.ForEach(PrintCallback
,
291 reinterpret_cast<void *>(flags()->print_terse
));
292 StackDepotPrintAll();
294 // Serialize the contents to a raw profile. Format documented in
295 // memprof_rawprofile.h.
296 char *Buffer
= nullptr;
298 __sanitizer::ListOfModules List
;
300 ArrayRef
<LoadedModule
> Modules(List
.begin(), List
.end());
301 u64 BytesSerialized
= SerializeToRawProfile(MIBMap
, Modules
, Buffer
);
302 CHECK(Buffer
&& BytesSerialized
&& "could not serialize to buffer");
303 report_file
.Write(Buffer
, BytesSerialized
);
306 allocator
.ForceUnlock();
309 // Inserts any blocks which have been allocated but not yet deallocated.
310 void InsertLiveBlocks() {
311 allocator
.ForEachChunk(
312 [](uptr chunk
, void *alloc
) {
313 u64 user_requested_size
;
314 Allocator
*A
= (Allocator
*)alloc
;
316 A
->GetMemprofChunk((void *)chunk
, user_requested_size
);
319 uptr user_beg
= ((uptr
)m
) + kChunkHeaderSize
;
320 u64 c
= GetShadowCount(user_beg
, user_requested_size
);
321 long curtime
= GetTimestamp();
322 MemInfoBlock
newMIB(user_requested_size
, c
, m
->timestamp_ms
, curtime
,
323 m
->cpu_id
, GetCpuId());
324 InsertOrMerge(m
->alloc_context_id
, newMIB
, A
->MIBMap
);
329 void InitLinkerInitialized() {
330 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null
);
331 allocator
.InitLinkerInitialized(
332 common_flags()->allocator_release_to_os_interval_ms
);
333 max_user_defined_malloc_size
= common_flags()->max_allocation_size_mb
334 ? common_flags()->max_allocation_size_mb
336 : kMaxAllowedMallocSize
;
339 // -------------------- Allocation/Deallocation routines ---------------
340 void *Allocate(uptr size
, uptr alignment
, BufferedStackTrace
*stack
,
341 AllocType alloc_type
) {
342 if (UNLIKELY(!memprof_inited
))
343 MemprofInitFromRtl();
344 if (UNLIKELY(IsRssLimitExceeded())) {
345 if (AllocatorMayReturnNull())
347 ReportRssLimitExceeded(stack
);
350 const uptr min_alignment
= MEMPROF_ALIGNMENT
;
351 if (alignment
< min_alignment
)
352 alignment
= min_alignment
;
354 // We'd be happy to avoid allocating memory for zero-size requests, but
355 // some programs/tests depend on this behavior and assume that malloc
356 // would not return NULL even for zero-size allocations. Moreover, it
357 // looks like operator new should never return NULL, and results of
358 // consecutive "new" calls must be different even if the allocated size
362 CHECK(IsPowerOfTwo(alignment
));
363 uptr rounded_size
= RoundUpTo(size
, alignment
);
364 uptr needed_size
= rounded_size
+ kChunkHeaderSize
;
365 if (alignment
> min_alignment
)
366 needed_size
+= alignment
;
367 CHECK(IsAligned(needed_size
, min_alignment
));
368 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
||
369 size
> max_user_defined_malloc_size
) {
370 if (AllocatorMayReturnNull()) {
371 Report("WARNING: MemProfiler failed to allocate 0x%zx bytes\n", size
);
375 Min(kMaxAllowedMallocSize
, max_user_defined_malloc_size
);
376 ReportAllocationSizeTooBig(size
, malloc_limit
, stack
);
379 MemprofThread
*t
= GetCurrentThread();
382 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
383 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
385 SpinMutexLock
l(&fallback_mutex
);
386 AllocatorCache
*cache
= &fallback_allocator_cache
;
387 allocated
= allocator
.Allocate(cache
, needed_size
, 8);
389 if (UNLIKELY(!allocated
)) {
390 SetAllocatorOutOfMemory();
391 if (AllocatorMayReturnNull())
393 ReportOutOfMemory(size
, stack
);
396 uptr alloc_beg
= reinterpret_cast<uptr
>(allocated
);
397 uptr alloc_end
= alloc_beg
+ needed_size
;
398 uptr beg_plus_header
= alloc_beg
+ kChunkHeaderSize
;
399 uptr user_beg
= beg_plus_header
;
400 if (!IsAligned(user_beg
, alignment
))
401 user_beg
= RoundUpTo(user_beg
, alignment
);
402 uptr user_end
= user_beg
+ size
;
403 CHECK_LE(user_end
, alloc_end
);
404 uptr chunk_beg
= user_beg
- kChunkHeaderSize
;
405 MemprofChunk
*m
= reinterpret_cast<MemprofChunk
*>(chunk_beg
);
406 m
->from_memalign
= alloc_beg
!= chunk_beg
;
409 m
->cpu_id
= GetCpuId();
410 m
->timestamp_ms
= GetTimestamp();
411 m
->alloc_context_id
= StackDepotPut(*stack
);
413 uptr size_rounded_down_to_granularity
=
414 RoundDownTo(size
, SHADOW_GRANULARITY
);
415 if (size_rounded_down_to_granularity
)
416 ClearShadow(user_beg
, size_rounded_down_to_granularity
);
418 MemprofStats
&thread_stats
= GetCurrentThreadStats();
419 thread_stats
.mallocs
++;
420 thread_stats
.malloced
+= size
;
421 thread_stats
.malloced_overhead
+= needed_size
- size
;
422 if (needed_size
> SizeClassMap::kMaxSize
)
423 thread_stats
.malloc_large
++;
425 thread_stats
.malloced_by_size
[SizeClassMap::ClassID(needed_size
)]++;
427 void *res
= reinterpret_cast<void *>(user_beg
);
428 atomic_store(&m
->user_requested_size
, size
, memory_order_release
);
429 if (alloc_beg
!= chunk_beg
) {
430 CHECK_LE(alloc_beg
+ sizeof(LargeChunkHeader
), chunk_beg
);
431 reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Set(m
);
433 RunMallocHooks(res
, size
);
437 void Deallocate(void *ptr
, uptr delete_size
, uptr delete_alignment
,
438 BufferedStackTrace
*stack
, AllocType alloc_type
) {
439 uptr p
= reinterpret_cast<uptr
>(ptr
);
445 uptr chunk_beg
= p
- kChunkHeaderSize
;
446 MemprofChunk
*m
= reinterpret_cast<MemprofChunk
*>(chunk_beg
);
448 u64 user_requested_size
=
449 atomic_exchange(&m
->user_requested_size
, 0, memory_order_acquire
);
450 if (memprof_inited
&& atomic_load_relaxed(&constructed
) &&
451 !atomic_load_relaxed(&destructing
)) {
452 u64 c
= GetShadowCount(p
, user_requested_size
);
453 long curtime
= GetTimestamp();
455 MemInfoBlock
newMIB(user_requested_size
, c
, m
->timestamp_ms
, curtime
,
456 m
->cpu_id
, GetCpuId());
457 InsertOrMerge(m
->alloc_context_id
, newMIB
, MIBMap
);
460 MemprofStats
&thread_stats
= GetCurrentThreadStats();
461 thread_stats
.frees
++;
462 thread_stats
.freed
+= user_requested_size
;
464 void *alloc_beg
= m
->AllocBeg();
465 if (alloc_beg
!= m
) {
466 // Clear the magic value, as allocator internals may overwrite the
467 // contents of deallocated chunk, confusing GetMemprofChunk lookup.
468 reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Set(nullptr);
471 MemprofThread
*t
= GetCurrentThread();
473 AllocatorCache
*cache
= GetAllocatorCache(&t
->malloc_storage());
474 allocator
.Deallocate(cache
, alloc_beg
);
476 SpinMutexLock
l(&fallback_mutex
);
477 AllocatorCache
*cache
= &fallback_allocator_cache
;
478 allocator
.Deallocate(cache
, alloc_beg
);
482 void *Reallocate(void *old_ptr
, uptr new_size
, BufferedStackTrace
*stack
) {
483 CHECK(old_ptr
&& new_size
);
484 uptr p
= reinterpret_cast<uptr
>(old_ptr
);
485 uptr chunk_beg
= p
- kChunkHeaderSize
;
486 MemprofChunk
*m
= reinterpret_cast<MemprofChunk
*>(chunk_beg
);
488 MemprofStats
&thread_stats
= GetCurrentThreadStats();
489 thread_stats
.reallocs
++;
490 thread_stats
.realloced
+= new_size
;
492 void *new_ptr
= Allocate(new_size
, 8, stack
, FROM_MALLOC
);
494 CHECK_NE(REAL(memcpy
), nullptr);
495 uptr memcpy_size
= Min(new_size
, m
->UsedSize());
496 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
497 Deallocate(old_ptr
, 0, 0, stack
, FROM_MALLOC
);
502 void *Calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
503 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
504 if (AllocatorMayReturnNull())
506 ReportCallocOverflow(nmemb
, size
, stack
);
508 void *ptr
= Allocate(nmemb
* size
, 8, stack
, FROM_MALLOC
);
509 // If the memory comes from the secondary allocator no need to clear it
510 // as it comes directly from mmap.
511 if (ptr
&& allocator
.FromPrimary(ptr
))
512 REAL(memset
)(ptr
, 0, nmemb
* size
);
516 void CommitBack(MemprofThreadLocalMallocStorage
*ms
,
517 BufferedStackTrace
*stack
) {
518 AllocatorCache
*ac
= GetAllocatorCache(ms
);
519 allocator
.SwallowCache(ac
);
522 // -------------------------- Chunk lookup ----------------------
524 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
525 MemprofChunk
*GetMemprofChunk(void *alloc_beg
, u64
&user_requested_size
) {
528 MemprofChunk
*p
= reinterpret_cast<LargeChunkHeader
*>(alloc_beg
)->Get();
530 if (!allocator
.FromPrimary(alloc_beg
))
532 p
= reinterpret_cast<MemprofChunk
*>(alloc_beg
);
534 // The size is reset to 0 on deallocation (and a min of 1 on
536 user_requested_size
=
537 atomic_load(&p
->user_requested_size
, memory_order_acquire
);
538 if (user_requested_size
)
543 MemprofChunk
*GetMemprofChunkByAddr(uptr p
, u64
&user_requested_size
) {
544 void *alloc_beg
= allocator
.GetBlockBegin(reinterpret_cast<void *>(p
));
545 return GetMemprofChunk(alloc_beg
, user_requested_size
);
548 uptr
AllocationSize(uptr p
) {
549 u64 user_requested_size
;
550 MemprofChunk
*m
= GetMemprofChunkByAddr(p
, user_requested_size
);
555 return user_requested_size
;
558 void Purge(BufferedStackTrace
*stack
) { allocator
.ForceReleaseToOS(); }
560 void PrintStats() { allocator
.PrintStats(); }
562 void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
563 allocator
.ForceLock();
564 fallback_mutex
.Lock();
567 void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
568 fallback_mutex
.Unlock();
569 allocator
.ForceUnlock();
573 static Allocator
instance(LINKER_INITIALIZED
);
575 static MemprofAllocator
&get_allocator() { return instance
.allocator
; }
577 void InitializeAllocator() { instance
.InitLinkerInitialized(); }
579 void MemprofThreadLocalMallocStorage::CommitBack() {
580 GET_STACK_TRACE_MALLOC
;
581 instance
.CommitBack(this, &stack
);
584 void PrintInternalAllocatorStats() { instance
.PrintStats(); }
586 void memprof_free(void *ptr
, BufferedStackTrace
*stack
, AllocType alloc_type
) {
587 instance
.Deallocate(ptr
, 0, 0, stack
, alloc_type
);
590 void memprof_delete(void *ptr
, uptr size
, uptr alignment
,
591 BufferedStackTrace
*stack
, AllocType alloc_type
) {
592 instance
.Deallocate(ptr
, size
, alignment
, stack
, alloc_type
);
595 void *memprof_malloc(uptr size
, BufferedStackTrace
*stack
) {
596 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
));
599 void *memprof_calloc(uptr nmemb
, uptr size
, BufferedStackTrace
*stack
) {
600 return SetErrnoOnNull(instance
.Calloc(nmemb
, size
, stack
));
603 void *memprof_reallocarray(void *p
, uptr nmemb
, uptr size
,
604 BufferedStackTrace
*stack
) {
605 if (UNLIKELY(CheckForCallocOverflow(size
, nmemb
))) {
606 errno
= errno_ENOMEM
;
607 if (AllocatorMayReturnNull())
609 ReportReallocArrayOverflow(nmemb
, size
, stack
);
611 return memprof_realloc(p
, nmemb
* size
, stack
);
614 void *memprof_realloc(void *p
, uptr size
, BufferedStackTrace
*stack
) {
616 return SetErrnoOnNull(instance
.Allocate(size
, 8, stack
, FROM_MALLOC
));
618 if (flags()->allocator_frees_and_returns_null_on_realloc_zero
) {
619 instance
.Deallocate(p
, 0, 0, stack
, FROM_MALLOC
);
622 // Allocate a size of 1 if we shouldn't free() on Realloc to 0
625 return SetErrnoOnNull(instance
.Reallocate(p
, size
, stack
));
628 void *memprof_valloc(uptr size
, BufferedStackTrace
*stack
) {
629 return SetErrnoOnNull(
630 instance
.Allocate(size
, GetPageSizeCached(), stack
, FROM_MALLOC
));
633 void *memprof_pvalloc(uptr size
, BufferedStackTrace
*stack
) {
634 uptr PageSize
= GetPageSizeCached();
635 if (UNLIKELY(CheckForPvallocOverflow(size
, PageSize
))) {
636 errno
= errno_ENOMEM
;
637 if (AllocatorMayReturnNull())
639 ReportPvallocOverflow(size
, stack
);
641 // pvalloc(0) should allocate one page.
642 size
= size
? RoundUpTo(size
, PageSize
) : PageSize
;
643 return SetErrnoOnNull(instance
.Allocate(size
, PageSize
, stack
, FROM_MALLOC
));
646 void *memprof_memalign(uptr alignment
, uptr size
, BufferedStackTrace
*stack
,
647 AllocType alloc_type
) {
648 if (UNLIKELY(!IsPowerOfTwo(alignment
))) {
649 errno
= errno_EINVAL
;
650 if (AllocatorMayReturnNull())
652 ReportInvalidAllocationAlignment(alignment
, stack
);
654 return SetErrnoOnNull(instance
.Allocate(size
, alignment
, stack
, alloc_type
));
657 void *memprof_aligned_alloc(uptr alignment
, uptr size
,
658 BufferedStackTrace
*stack
) {
659 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment
, size
))) {
660 errno
= errno_EINVAL
;
661 if (AllocatorMayReturnNull())
663 ReportInvalidAlignedAllocAlignment(size
, alignment
, stack
);
665 return SetErrnoOnNull(instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
));
668 int memprof_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
669 BufferedStackTrace
*stack
) {
670 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment
))) {
671 if (AllocatorMayReturnNull())
673 ReportInvalidPosixMemalignAlignment(alignment
, stack
);
675 void *ptr
= instance
.Allocate(size
, alignment
, stack
, FROM_MALLOC
);
677 // OOM error is already taken care of by Allocate.
679 CHECK(IsAligned((uptr
)ptr
, alignment
));
684 static const void *memprof_malloc_begin(const void *p
) {
685 u64 user_requested_size
;
687 instance
.GetMemprofChunkByAddr((uptr
)p
, user_requested_size
);
690 if (user_requested_size
== 0)
693 return (const void *)m
->Beg();
696 uptr
memprof_malloc_usable_size(const void *ptr
, uptr pc
, uptr bp
) {
699 uptr usable_size
= instance
.AllocationSize(reinterpret_cast<uptr
>(ptr
));
703 } // namespace __memprof
705 // ---------------------- Interface ---------------- {{{1
706 using namespace __memprof
;
708 uptr
__sanitizer_get_estimated_allocated_size(uptr size
) { return size
; }
710 int __sanitizer_get_ownership(const void *p
) {
711 return memprof_malloc_usable_size(p
, 0, 0) != 0;
714 const void *__sanitizer_get_allocated_begin(const void *p
) {
715 return memprof_malloc_begin(p
);
718 uptr
__sanitizer_get_allocated_size(const void *p
) {
719 return memprof_malloc_usable_size(p
, 0, 0);
722 int __memprof_profile_dump() {
723 instance
.FinishAndWrite();
724 // In the future we may want to return non-zero if there are any errors
725 // detected during the dumping process.