1 // Copyright (c) 2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include <tlhelp32.h> // for CreateToolhelp32Snapshot()
10 #include "tools/memory_watcher/memory_watcher.h"
11 #include "base/file_util.h"
12 #include "base/logging.h"
13 #include "base/metrics/stats_counters.h"
14 #include "base/strings/string_util.h"
15 #include "base/strings/utf_string_conversions.h"
16 #include "base/synchronization/lock.h"
17 #include "tools/memory_watcher/call_stack.h"
18 #include "tools/memory_watcher/preamble_patcher.h"
20 static base::StatsCounter
mem_in_use("MemoryInUse.Bytes");
21 static base::StatsCounter
mem_in_use_blocks("MemoryInUse.Blocks");
22 static base::StatsCounter
mem_in_use_allocs("MemoryInUse.Allocs");
23 static base::StatsCounter
mem_in_use_frees("MemoryInUse.Frees");
25 // ---------------------------------------------------------------------
27 MemoryWatcher::MemoryWatcher()
30 active_thread_id_(0) {
31 MemoryHook::Initialize();
32 CallStack::Initialize();
34 block_map_
= new CallStackMap();
36 // Register last - only after we're ready for notifications!
40 MemoryWatcher::~MemoryWatcher() {
45 // Pointers in the block_map are part of the MemoryHook heap. Be sure
46 // to delete the map before closing the heap.
50 void MemoryWatcher::Hook() {
52 MemoryHook::RegisterWatcher(this);
56 void MemoryWatcher::Unhook() {
58 MemoryHook::UnregisterWatcher(this);
63 void MemoryWatcher::OpenLogFile() {
64 DCHECK(file_
== NULL
);
65 file_name_
= "memwatcher";
66 if (!log_name_
.empty()) {
68 file_name_
+= log_name_
;
72 file_name_
+= _itoa(GetCurrentProcessId(), buf
, 10);
74 std::string
tmp_name(file_name_
);
76 file_
= fopen(tmp_name
.c_str(), "w+");
79 void MemoryWatcher::CloseLogFile() {
83 std::wstring tmp_name
= ASCIIToWide(file_name_
);
85 base::Move(base::FilePath(tmp_name
),
86 base::FilePath(ASCIIToWide(file_name_
)));
90 bool MemoryWatcher::LockedRecursionDetected() const {
91 if (!active_thread_id_
) return false;
92 DWORD thread_id
= GetCurrentThreadId();
93 // TODO(jar): Perchance we should use atomic access to member.
94 return thread_id
== active_thread_id_
;
97 void MemoryWatcher::OnTrack(HANDLE heap
, int32 id
, int32 size
) {
98 // Don't track zeroes. It's a waste of time.
102 if (LockedRecursionDetected())
105 // AllocationStack overrides new/delete to not allocate
106 // from the main heap.
107 AllocationStack
* stack
= new AllocationStack(size
);
108 if (!stack
->Valid()) return; // Recursion blocked generation of stack.
111 base::AutoLock
lock(block_map_lock_
);
113 // Ideally, we'd like to verify that the block being added
114 // here is not already in our list of tracked blocks. However,
115 // the lookup in our hash table is expensive and slows us too
117 CallStackMap::iterator block_it
= block_map_
->find(id
);
118 if (block_it
!= block_map_
->end()) {
119 #if 0 // Don't do this until stack->ToString() uses ONLY our heap.
120 active_thread_id_
= GetCurrentThreadId();
121 PrivateAllocatorString output
;
122 block_it
->second
->ToString(&output
);
123 // VLOG(1) << "First Stack size " << stack->size() << "was\n" << output;
124 stack
->ToString(&output
);
125 // VLOG(1) << "Second Stack size " << stack->size() << "was\n" << output;
128 // TODO(jar): We should delete one stack, and keep the other, perhaps
130 // For now, just delete the first, and keep the second?
131 delete block_it
->second
;
133 // TODO(jar): Perchance we should use atomic access to member.
134 active_thread_id_
= 0; // Note: Only do this AFTER exiting above scope!
136 (*block_map_
)[id
] = stack
;
139 mem_in_use
.Add(size
);
140 mem_in_use_blocks
.Increment();
141 mem_in_use_allocs
.Increment();
144 void MemoryWatcher::OnUntrack(HANDLE heap
, int32 id
, int32 size
) {
147 // Don't bother with these.
151 if (LockedRecursionDetected())
155 base::AutoLock
lock(block_map_lock_
);
156 active_thread_id_
= GetCurrentThreadId();
158 // First, find the block in our block_map.
159 CallStackMap::iterator it
= block_map_
->find(id
);
160 if (it
!= block_map_
->end()) {
161 AllocationStack
* stack
= it
->second
;
162 DCHECK(stack
->size() == size
);
163 block_map_
->erase(id
);
166 // Untracked item. This happens a fair amount, and it is
167 // normal. A lot of time elapses during process startup
168 // before the allocation routines are hooked.
169 size
= 0; // Ignore size in tallies.
171 // TODO(jar): Perchance we should use atomic access to member.
172 active_thread_id_
= 0;
175 mem_in_use
.Add(-size
);
176 mem_in_use_blocks
.Decrement();
177 mem_in_use_frees
.Increment();
180 void MemoryWatcher::SetLogName(char* log_name
) {
184 log_name_
= log_name
;
187 // Help sort lists of stacks based on allocation cost.
188 // Note: Sort based on allocation count is interesting too!
189 static bool CompareCallStackIdItems(MemoryWatcher::StackTrack
* left
,
190 MemoryWatcher::StackTrack
* right
) {
191 return left
->size
> right
->size
;
195 void MemoryWatcher::DumpLeaks() {
196 // We can only dump the leaks once. We'll cleanup the hooks here.
201 base::AutoLock
lock(block_map_lock_
);
202 active_thread_id_
= GetCurrentThreadId();
206 // Aggregate contributions from each allocated block on per-stack basis.
207 CallStackIdMap stack_map
;
208 for (CallStackMap::iterator block_it
= block_map_
->begin();
209 block_it
!= block_map_
->end(); ++block_it
) {
210 AllocationStack
* stack
= block_it
->second
;
211 int32 stack_hash
= stack
->hash();
212 int32 alloc_block_size
= stack
->size();
213 CallStackIdMap::iterator it
= stack_map
.find(stack_hash
);
214 if (it
== stack_map
.end()) {
217 tracker
.size
= alloc_block_size
;
218 tracker
.stack
= stack
; // Temporary pointer into block_map_.
219 stack_map
[stack_hash
] = tracker
;
222 it
->second
.size
+= alloc_block_size
;
225 // Don't release lock yet, as block_map_ is still pointed into.
227 // Put references to StrackTracks into array for sorting.
228 std::vector
<StackTrack
*, PrivateHookAllocator
<int32
> >
229 stack_tracks(stack_map
.size());
230 CallStackIdMap::iterator it
= stack_map
.begin();
231 for (size_t i
= 0; i
< stack_tracks
.size(); ++i
) {
232 stack_tracks
[i
] = &(it
->second
);
235 sort(stack_tracks
.begin(), stack_tracks
.end(), CompareCallStackIdItems
);
237 int32 total_bytes
= 0;
238 int32 total_blocks
= 0;
239 for (size_t i
= 0; i
< stack_tracks
.size(); ++i
) {
240 StackTrack
* stack_track
= stack_tracks
[i
];
241 fwprintf(file_
, L
"%d bytes, %d allocs, #%d\n",
242 stack_track
->size
, stack_track
->count
, i
);
243 total_bytes
+= stack_track
->size
;
244 total_blocks
+= stack_track
->count
;
246 CallStack
* stack
= stack_track
->stack
;
247 PrivateAllocatorString output
;
248 stack
->ToString(&output
);
249 fprintf(file_
, "%s", output
.c_str());
251 fprintf(file_
, "Total Leaks: %d\n", total_blocks
);
252 fprintf(file_
, "Total Stacks: %d\n", stack_tracks
.size());
253 fprintf(file_
, "Total Bytes: %d\n", total_bytes
);