1 //===-- memprof_stats.cpp ------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of MemProfiler, a memory profiler.
11 // Code related to statistics collected by MemProfiler.
12 //===----------------------------------------------------------------------===//
13 #include "memprof_stats.h"
14 #include "memprof_interceptors.h"
15 #include "memprof_internal.h"
16 #include "memprof_thread.h"
17 #include "sanitizer_common/sanitizer_allocator_interface.h"
18 #include "sanitizer_common/sanitizer_mutex.h"
19 #include "sanitizer_common/sanitizer_stackdepot.h"
23 MemprofStats::MemprofStats() { Clear(); }
25 void MemprofStats::Clear() {
27 return (void)REAL(memset
)(this, 0, sizeof(MemprofStats
));
28 internal_memset(this, 0, sizeof(MemprofStats
));
31 static void PrintMallocStatsArray(const char *prefix
,
32 uptr (&array
)[kNumberOfSizeClasses
]) {
34 for (uptr i
= 0; i
< kNumberOfSizeClasses
; i
++) {
37 Printf("%zu:%zu; ", i
, array
[i
]);
42 void MemprofStats::Print() {
43 Printf("Stats: %zuM malloced (%zuM for overhead) by %zu calls\n",
44 malloced
>> 20, malloced_overhead
>> 20, mallocs
);
45 Printf("Stats: %zuM realloced by %zu calls\n", realloced
>> 20, reallocs
);
46 Printf("Stats: %zuM freed by %zu calls\n", freed
>> 20, frees
);
47 Printf("Stats: %zuM really freed by %zu calls\n", really_freed
>> 20,
49 Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n",
50 (mmaped
- munmaped
) >> 20, mmaped
>> 20, munmaped
>> 20, mmaps
,
53 PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size
);
54 Printf("Stats: malloc large: %zu\n", malloc_large
);
57 void MemprofStats::MergeFrom(const MemprofStats
*stats
) {
58 uptr
*dst_ptr
= reinterpret_cast<uptr
*>(this);
59 const uptr
*src_ptr
= reinterpret_cast<const uptr
*>(stats
);
60 uptr num_fields
= sizeof(*this) / sizeof(uptr
);
61 for (uptr i
= 0; i
< num_fields
; i
++)
62 dst_ptr
[i
] += src_ptr
[i
];
65 static Mutex print_lock
;
67 static MemprofStats
unknown_thread_stats(LINKER_INITIALIZED
);
68 static MemprofStats
dead_threads_stats(LINKER_INITIALIZED
);
69 static Mutex dead_threads_stats_lock
;
70 // Required for malloc_zone_statistics() on OS X. This can't be stored in
71 // per-thread MemprofStats.
72 static uptr max_malloced_memory
;
74 static void MergeThreadStats(ThreadContextBase
*tctx_base
, void *arg
) {
75 MemprofStats
*accumulated_stats
= reinterpret_cast<MemprofStats
*>(arg
);
76 MemprofThreadContext
*tctx
= static_cast<MemprofThreadContext
*>(tctx_base
);
77 if (MemprofThread
*t
= tctx
->thread
)
78 accumulated_stats
->MergeFrom(&t
->stats());
81 static void GetAccumulatedStats(MemprofStats
*stats
) {
84 ThreadRegistryLock
l(&memprofThreadRegistry());
85 memprofThreadRegistry().RunCallbackForEachThreadLocked(MergeThreadStats
,
88 stats
->MergeFrom(&unknown_thread_stats
);
90 Lock
lock(&dead_threads_stats_lock
);
91 stats
->MergeFrom(&dead_threads_stats
);
93 // This is not very accurate: we may miss allocation peaks that happen
94 // between two updates of accumulated_stats_. For more accurate bookkeeping
95 // the maximum should be updated on every malloc(), which is unacceptable.
96 if (max_malloced_memory
< stats
->malloced
) {
97 max_malloced_memory
= stats
->malloced
;
101 void FlushToDeadThreadStats(MemprofStats
*stats
) {
102 Lock
lock(&dead_threads_stats_lock
);
103 dead_threads_stats
.MergeFrom(stats
);
107 MemprofStats
&GetCurrentThreadStats() {
108 MemprofThread
*t
= GetCurrentThread();
109 return (t
) ? t
->stats() : unknown_thread_stats
;
112 static void PrintAccumulatedStats() {
114 GetAccumulatedStats(&stats
);
115 // Use lock to keep reports from mixing up.
116 Lock
lock(&print_lock
);
118 StackDepotStats stack_depot_stats
= StackDepotGetStats();
119 Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
120 stack_depot_stats
.n_uniq_ids
, stack_depot_stats
.allocated
>> 20);
121 PrintInternalAllocatorStats();
124 } // namespace __memprof
126 // ---------------------- Interface ---------------- {{{1
127 using namespace __memprof
;
129 uptr
__sanitizer_get_current_allocated_bytes() {
131 GetAccumulatedStats(&stats
);
132 uptr malloced
= stats
.malloced
;
133 uptr freed
= stats
.freed
;
134 // Return sane value if malloced < freed due to racy
135 // way we update accumulated stats.
136 return (malloced
> freed
) ? malloced
- freed
: 1;
139 uptr
__sanitizer_get_heap_size() {
141 GetAccumulatedStats(&stats
);
142 return stats
.mmaped
- stats
.munmaped
;
145 uptr
__sanitizer_get_free_bytes() {
147 GetAccumulatedStats(&stats
);
148 uptr total_free
= stats
.mmaped
- stats
.munmaped
+ stats
.really_freed
;
149 uptr total_used
= stats
.malloced
;
150 // Return sane value if total_free < total_used due to racy
151 // way we update accumulated stats.
152 return (total_free
> total_used
) ? total_free
- total_used
: 1;
155 uptr
__sanitizer_get_unmapped_bytes() { return 0; }
157 void __memprof_print_accumulated_stats() { PrintAccumulatedStats(); }