Don't show supervised user as "already on this device" while they're being imported.
[chromium-blink-merge.git] / base / trace_event / trace_event_memory.cc
blob89595890aec54030196028121956e501b0536662
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/trace_event_memory.h"
7 #include "base/debug/leak_annotations.h"
8 #include "base/lazy_instance.h"
9 #include "base/location.h"
10 #include "base/logging.h"
11 #include "base/memory/scoped_ptr.h"
12 #include "base/single_thread_task_runner.h"
13 #include "base/strings/string_number_conversions.h"
14 #include "base/strings/string_util.h"
15 #include "base/threading/thread_local_storage.h"
16 #include "base/trace_event/trace_event.h"
18 namespace base {
19 namespace trace_event {
21 namespace {
23 // Maximum number of nested TRACE_EVENT scopes to record. Must be less than
24 // or equal to HeapProfileTable::kMaxStackDepth / 2 because we record two
25 // entries on the pseudo-stack per scope.
26 const size_t kMaxScopeDepth = 16;
28 /////////////////////////////////////////////////////////////////////////////
29 // Holds a memory dump until the tracing system needs to serialize it.
30 class MemoryDumpHolder : public base::trace_event::ConvertableToTraceFormat {
31 public:
32 // Takes ownership of dump, which must be a JSON string, allocated with
33 // malloc() and NULL terminated.
34 explicit MemoryDumpHolder(char* dump) : dump_(dump) {}
36 // base::trace_event::ConvertableToTraceFormat overrides:
37 void AppendAsTraceFormat(std::string* out) const override {
38 AppendHeapProfileAsTraceFormat(dump_, out);
41 private:
42 ~MemoryDumpHolder() override { free(dump_); }
44 char* dump_;
46 DISALLOW_COPY_AND_ASSIGN(MemoryDumpHolder);
49 /////////////////////////////////////////////////////////////////////////////
50 // Records a stack of TRACE_MEMORY events. One per thread is required.
51 struct TraceMemoryStack {
52 TraceMemoryStack() : scope_depth(0) {
53 memset(scope_data, 0, kMaxScopeDepth * sizeof(scope_data[0]));
56 // Depth of the currently nested TRACE_EVENT scopes. Allowed to be greater
57 // than kMaxScopeDepth so we can match scope pushes and pops even if we don't
58 // have enough space to store the EventData.
59 size_t scope_depth;
61 // Stack of categories and names.
62 ScopedTraceMemory::ScopeData scope_data[kMaxScopeDepth];
65 // Pointer to a TraceMemoryStack per thread.
66 base::ThreadLocalStorage::StaticSlot tls_trace_memory_stack = TLS_INITIALIZER;
68 // Clean up memory pointed to by our thread-local storage.
69 void DeleteStackOnThreadCleanup(void* value) {
70 TraceMemoryStack* stack = static_cast<TraceMemoryStack*>(value);
71 delete stack;
74 // Initializes the thread-local TraceMemoryStack pointer.
75 void InitThreadLocalStorage() {
76 if (tls_trace_memory_stack.initialized())
77 return;
78 // Initialize the thread-local storage key.
79 tls_trace_memory_stack.Initialize(&DeleteStackOnThreadCleanup);
82 // Clean up thread-local-storage in the main thread.
83 void CleanupThreadLocalStorage() {
84 if (!tls_trace_memory_stack.initialized())
85 return;
86 TraceMemoryStack* stack =
87 static_cast<TraceMemoryStack*>(tls_trace_memory_stack.Get());
88 delete stack;
89 tls_trace_memory_stack.Set(NULL);
90 // Intentionally do not release the thread-local-storage key here, that is,
91 // do not call tls_trace_memory_stack.Free(). Other threads have lazily
92 // created pointers in thread-local-storage via GetTraceMemoryStack() below.
93 // Those threads need to run the DeleteStack() destructor function when they
94 // exit. If we release the key the destructor will not be called and those
95 // threads will not clean up their memory.
98 // Returns the thread-local trace memory stack for the current thread, creating
99 // one if needed. Returns NULL if the thread-local storage key isn't
100 // initialized, which indicates that heap profiling isn't running.
101 TraceMemoryStack* GetTraceMemoryStack() {
102 TraceMemoryStack* stack =
103 static_cast<TraceMemoryStack*>(tls_trace_memory_stack.Get());
104 // Lazily initialize TraceMemoryStack objects for new threads.
105 if (!stack) {
106 stack = new TraceMemoryStack;
107 tls_trace_memory_stack.Set(stack);
109 return stack;
112 // Returns a "pseudo-stack" of pointers to trace event categories and names.
113 // Because tcmalloc stores one pointer per stack frame this converts N nested
114 // trace events into N * 2 pseudo-stack entries. Thus this macro invocation:
115 // TRACE_EVENT0("category1", "name1");
116 // TRACE_EVENT0("category2", "name2");
117 // becomes this pseudo-stack:
118 // stack_out[0] = "category1"
119 // stack_out[1] = "name1"
120 // stack_out[2] = "category2"
121 // stack_out[3] = "name2"
122 // Returns int instead of size_t to match the signature required by tcmalloc.
123 int GetPseudoStack(int skip_count_ignored, void** stack_out) {
124 // If the tracing system isn't fully initialized, just skip this allocation.
125 // Attempting to initialize will allocate memory, causing this function to
126 // be called recursively from inside the allocator.
127 if (!tls_trace_memory_stack.initialized() || !tls_trace_memory_stack.Get())
128 return 0;
129 TraceMemoryStack* stack =
130 static_cast<TraceMemoryStack*>(tls_trace_memory_stack.Get());
131 // Copy at most kMaxScopeDepth scope entries.
132 const size_t count = std::min(stack->scope_depth, kMaxScopeDepth);
133 // Notes that memcpy() works for zero bytes.
134 memcpy(stack_out,
135 stack->scope_data,
136 count * sizeof(stack->scope_data[0]));
137 // Each item in the trace event stack contains both name and category so tell
138 // tcmalloc that we have returned |count| * 2 stack frames.
139 return static_cast<int>(count * 2);
142 } // namespace
144 //////////////////////////////////////////////////////////////////////////////
146 TraceMemoryController::TraceMemoryController(
147 scoped_refptr<SingleThreadTaskRunner> task_runner,
148 HeapProfilerStartFunction heap_profiler_start_function,
149 HeapProfilerStopFunction heap_profiler_stop_function,
150 GetHeapProfileFunction get_heap_profile_function)
151 : task_runner_(task_runner.Pass()),
152 heap_profiler_start_function_(heap_profiler_start_function),
153 heap_profiler_stop_function_(heap_profiler_stop_function),
154 get_heap_profile_function_(get_heap_profile_function),
155 weak_factory_(this) {
156 // Force the "memory" category to show up in the trace viewer.
157 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("memory"), "init");
158 // Watch for the tracing system being enabled.
159 TraceLog::GetInstance()->AddEnabledStateObserver(this);
162 TraceMemoryController::~TraceMemoryController() {
163 if (dump_timer_.IsRunning())
164 StopProfiling();
165 TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
168 // base::trace_event::TraceLog::EnabledStateChangedObserver overrides:
169 void TraceMemoryController::OnTraceLogEnabled() {
170 // Check to see if tracing is enabled for the memory category.
171 bool enabled;
172 TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("memory"),
173 &enabled);
174 if (!enabled)
175 return;
176 DVLOG(1) << "OnTraceLogEnabled";
177 task_runner_->PostTask(FROM_HERE,
178 base::Bind(&TraceMemoryController::StartProfiling,
179 weak_factory_.GetWeakPtr()));
182 void TraceMemoryController::OnTraceLogDisabled() {
183 // The memory category is always disabled before OnTraceLogDisabled() is
184 // called, so we cannot tell if it was enabled before. Always try to turn
185 // off profiling.
186 DVLOG(1) << "OnTraceLogDisabled";
187 task_runner_->PostTask(FROM_HERE,
188 base::Bind(&TraceMemoryController::StopProfiling,
189 weak_factory_.GetWeakPtr()));
192 void TraceMemoryController::StartProfiling() {
193 // Watch for the tracing framework sending enabling more than once.
194 if (dump_timer_.IsRunning())
195 return;
196 DVLOG(1) << "Starting trace memory";
197 InitThreadLocalStorage();
198 ScopedTraceMemory::set_enabled(true);
199 // Call ::HeapProfilerWithPseudoStackStart().
200 heap_profiler_start_function_(&GetPseudoStack);
201 const int kDumpIntervalSeconds = 5;
202 dump_timer_.Start(FROM_HERE,
203 TimeDelta::FromSeconds(kDumpIntervalSeconds),
204 base::Bind(&TraceMemoryController::DumpMemoryProfile,
205 weak_factory_.GetWeakPtr()));
208 void TraceMemoryController::DumpMemoryProfile() {
209 // Don't trace allocations here in the memory tracing system.
210 INTERNAL_TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"),
211 TRACE_MEMORY_IGNORE);
213 DVLOG(1) << "DumpMemoryProfile";
214 // MemoryDumpHolder takes ownership of this string. See GetHeapProfile() in
215 // tcmalloc for details.
216 char* dump = get_heap_profile_function_();
217 const int kSnapshotId = 1;
218 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
219 TRACE_DISABLED_BY_DEFAULT("memory"),
220 "memory::Heap",
221 kSnapshotId,
222 scoped_refptr<ConvertableToTraceFormat>(new MemoryDumpHolder(dump)));
225 void TraceMemoryController::StopProfiling() {
226 // Watch for the tracing framework sending disabled more than once.
227 if (!dump_timer_.IsRunning())
228 return;
229 DVLOG(1) << "Stopping trace memory";
230 dump_timer_.Stop();
231 ScopedTraceMemory::set_enabled(false);
232 CleanupThreadLocalStorage();
233 // Call ::HeapProfilerStop().
234 heap_profiler_stop_function_();
237 bool TraceMemoryController::IsTimerRunningForTest() const {
238 return dump_timer_.IsRunning();
241 /////////////////////////////////////////////////////////////////////////////
243 // static
244 bool ScopedTraceMemory::enabled_ = false;
246 void ScopedTraceMemory::Initialize(const char* category, const char* name) {
247 DCHECK(enabled_);
248 // Get our thread's copy of the stack.
249 TraceMemoryStack* trace_memory_stack = GetTraceMemoryStack();
250 const size_t index = trace_memory_stack->scope_depth;
251 // Don't record data for deeply nested scopes, but continue to increment
252 // |stack_depth| so we can match pushes and pops.
253 if (index < kMaxScopeDepth) {
254 ScopeData& event = trace_memory_stack->scope_data[index];
255 event.category = category;
256 event.name = name;
258 trace_memory_stack->scope_depth++;
261 void ScopedTraceMemory::Destroy() {
262 DCHECK(enabled_);
263 // Get our thread's copy of the stack.
264 TraceMemoryStack* trace_memory_stack = GetTraceMemoryStack();
265 // The tracing system can be turned on with ScopedTraceMemory objects
266 // allocated on the stack, so avoid potential underflow as they are destroyed.
267 if (trace_memory_stack->scope_depth > 0)
268 trace_memory_stack->scope_depth--;
271 // static
272 void ScopedTraceMemory::InitForTest() {
273 InitThreadLocalStorage();
274 enabled_ = true;
277 // static
278 void ScopedTraceMemory::CleanupForTest() {
279 enabled_ = false;
280 CleanupThreadLocalStorage();
283 // static
284 int ScopedTraceMemory::GetStackDepthForTest() {
285 TraceMemoryStack* stack = GetTraceMemoryStack();
286 return static_cast<int>(stack->scope_depth);
289 // static
290 ScopedTraceMemory::ScopeData ScopedTraceMemory::GetScopeDataForTest(
291 int stack_index) {
292 TraceMemoryStack* stack = GetTraceMemoryStack();
293 return stack->scope_data[stack_index];
296 /////////////////////////////////////////////////////////////////////////////
298 void AppendHeapProfileAsTraceFormat(const char* input, std::string* output) {
299 // Heap profile output has a header total line, then a list of stacks with
300 // memory totals, like this:
302 // heap profile: 357: 55227 [ 14653: 2624014] @ heapprofile
303 // 95: 40940 [ 649: 114260] @ 0x7fa7f4b3be13
304 // 77: 32546 [ 742: 106234] @
305 // 68: 4195 [ 1087: 98009] @ 0x7fa7fa9b9ba0 0x7fa7f4b3be13
307 // MAPPED_LIBRARIES:
308 // 1be411fc1000-1be4139e4000 rw-p 00000000 00:00 0
309 // 1be4139e4000-1be4139e5000 ---p 00000000 00:00 0
310 // ...
312 // Skip input after MAPPED_LIBRARIES.
313 std::string input_string;
314 const char* mapped_libraries = strstr(input, "MAPPED_LIBRARIES");
315 if (mapped_libraries) {
316 input_string.assign(input, mapped_libraries - input);
317 } else {
318 input_string.assign(input);
321 std::vector<std::string> lines;
322 size_t line_count = Tokenize(input_string, "\n", &lines);
323 if (line_count == 0) {
324 DLOG(WARNING) << "No lines found";
325 return;
328 // Handle the initial summary line.
329 output->append("[");
330 AppendHeapProfileTotalsAsTraceFormat(lines[0], output);
332 // Handle the following stack trace lines.
333 for (size_t i = 1; i < line_count; ++i) {
334 const std::string& line = lines[i];
335 AppendHeapProfileLineAsTraceFormat(line, output);
337 output->append("]\n");
340 void AppendHeapProfileTotalsAsTraceFormat(const std::string& line,
341 std::string* output) {
342 // This is what a line looks like:
343 // heap profile: 357: 55227 [ 14653: 2624014] @ heapprofile
345 // The numbers represent total allocations since profiling was enabled.
346 // From the example above:
347 // 357 = Outstanding allocations (mallocs - frees)
348 // 55227 = Outstanding bytes (malloc bytes - free bytes)
349 // 14653 = Total allocations (mallocs)
350 // 2624014 = Total bytes (malloc bytes)
351 std::vector<std::string> tokens;
352 Tokenize(line, " :[]@", &tokens);
353 if (tokens.size() < 4) {
354 DLOG(WARNING) << "Invalid totals line " << line;
355 return;
357 DCHECK_EQ(tokens[0], "heap");
358 DCHECK_EQ(tokens[1], "profile");
359 output->append("{\"current_allocs\": ");
360 output->append(tokens[2]);
361 output->append(", \"current_bytes\": ");
362 output->append(tokens[3]);
363 output->append(", \"trace\": \"\"}");
366 bool AppendHeapProfileLineAsTraceFormat(const std::string& line,
367 std::string* output) {
368 // This is what a line looks like:
369 // 68: 4195 [ 1087: 98009] @ 0x7fa7fa9b9ba0 0x7fa7f4b3be13
371 // The numbers represent allocations for a particular stack trace since
372 // profiling was enabled. From the example above:
373 // 68 = Outstanding allocations (mallocs - frees)
374 // 4195 = Outstanding bytes (malloc bytes - free bytes)
375 // 1087 = Total allocations (mallocs)
376 // 98009 = Total bytes (malloc bytes)
378 // 0x7fa7fa9b9ba0 0x7fa7f4b3be13 = Stack trace represented as pointers to
379 // static strings from trace event categories
380 // and names.
381 std::vector<std::string> tokens;
382 Tokenize(line, " :[]@", &tokens);
383 // It's valid to have no stack addresses, so only require 4 tokens.
384 if (tokens.size() < 4) {
385 DLOG(WARNING) << "Invalid line " << line;
386 return false;
388 // Don't bother with stacks that have no current allocations.
389 if (tokens[0] == "0")
390 return false;
391 output->append(",\n");
392 output->append("{\"current_allocs\": ");
393 output->append(tokens[0]);
394 output->append(", \"current_bytes\": ");
395 output->append(tokens[1]);
396 output->append(", \"trace\": \"");
398 // Convert pairs of "stack addresses" into category and name strings.
399 const std::string kSingleQuote = "'";
400 for (size_t t = 4; t < tokens.size(); t += 2) {
401 // Casting strings into pointers is ugly but otherwise tcmalloc would need
402 // to gain a special output serializer just for pseudo-stacks.
403 const char* trace_category = StringFromHexAddress(tokens[t]);
404 DCHECK_LT(t + 1, tokens.size());
405 const char* trace_name = StringFromHexAddress(tokens[t + 1]);
407 // TODO(jamescook): Report the trace category and name separately to the
408 // trace viewer and allow it to decide what decorations to apply. For now
409 // just hard-code a decoration for posted tasks (toplevel).
410 std::string trace_string(trace_name);
411 if (!strcmp(trace_category, "toplevel"))
412 trace_string.append("->PostTask");
414 // Some trace name strings have double quotes, convert them to single.
415 ReplaceChars(trace_string, "\"", kSingleQuote, &trace_string);
417 output->append(trace_string);
419 // Trace viewer expects a trailing space.
420 output->append(" ");
422 output->append("\"}");
423 return true;
426 const char* StringFromHexAddress(const std::string& hex_address) {
427 uint64 address = 0;
428 if (!base::HexStringToUInt64(hex_address, &address))
429 return "error";
430 if (!address)
431 return "null";
432 // Note that this cast handles 64-bit to 32-bit conversion if necessary.
433 return reinterpret_cast<const char*>(address);
436 } // namespace trace_event
437 } // namespace base