Roll src/third_party/WebKit d9c6159:8139f33 (svn 201974:201975)
[chromium-blink-merge.git] / base / trace_event / memory_dump_manager.cc
blobff92cd37e16ff9bc707b5942de43db475feccf5c
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/memory_dump_manager.h"
7 #include <algorithm>
9 #include "base/atomic_sequence_num.h"
10 #include "base/command_line.h"
11 #include "base/compiler_specific.h"
12 #include "base/thread_task_runner_handle.h"
13 #include "base/trace_event/memory_dump_provider.h"
14 #include "base/trace_event/memory_dump_session_state.h"
15 #include "base/trace_event/process_memory_dump.h"
16 #include "base/trace_event/trace_event_argument.h"
17 #include "build/build_config.h"
19 #if !defined(OS_NACL)
20 #include "base/trace_event/process_memory_totals_dump_provider.h"
21 #endif
23 #if defined(OS_LINUX) || defined(OS_ANDROID)
24 #include "base/trace_event/malloc_dump_provider.h"
25 #include "base/trace_event/process_memory_maps_dump_provider.h"
26 #endif
28 #if defined(OS_ANDROID)
29 #include "base/trace_event/java_heap_dump_provider_android.h"
30 #endif
32 #if defined(OS_WIN)
33 #include "base/trace_event/winheap_dump_provider_win.h"
34 #endif
36 namespace base {
37 namespace trace_event {
39 namespace {
41 const int kTraceEventNumArgs = 1;
42 const char* kTraceEventArgNames[] = {"dumps"};
43 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
45 StaticAtomicSequenceNumber g_next_guid;
46 uint32 g_periodic_dumps_count = 0;
47 uint32 g_heavy_dumps_rate = 0;
48 MemoryDumpManager* g_instance_for_testing = nullptr;
50 void RequestPeriodicGlobalDump() {
51 MemoryDumpArgs::LevelOfDetail dump_level_of_detail;
52 if (g_heavy_dumps_rate == 0) {
53 dump_level_of_detail = MemoryDumpArgs::LevelOfDetail::LOW;
54 } else {
55 dump_level_of_detail = g_periodic_dumps_count == 0
56 ? MemoryDumpArgs::LevelOfDetail::HIGH
57 : MemoryDumpArgs::LevelOfDetail::LOW;
59 if (++g_periodic_dumps_count == g_heavy_dumps_rate)
60 g_periodic_dumps_count = 0;
63 MemoryDumpArgs dump_args = {dump_level_of_detail};
64 MemoryDumpManager::GetInstance()->RequestGlobalDump(
65 MemoryDumpType::PERIODIC_INTERVAL, dump_args);
68 } // namespace
70 // static
71 const char* const MemoryDumpManager::kTraceCategory =
72 TRACE_DISABLED_BY_DEFAULT("memory-infra");
74 // static
75 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
77 // static
78 const uint64 MemoryDumpManager::kInvalidTracingProcessId = 0;
80 // static
81 MemoryDumpManager* MemoryDumpManager::GetInstance() {
82 if (g_instance_for_testing)
83 return g_instance_for_testing;
85 return Singleton<MemoryDumpManager,
86 LeakySingletonTraits<MemoryDumpManager>>::get();
89 // static
90 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
91 if (instance)
92 instance->skip_core_dumpers_auto_registration_for_testing_ = true;
93 g_instance_for_testing = instance;
96 MemoryDumpManager::MemoryDumpManager()
97 : delegate_(nullptr),
98 memory_tracing_enabled_(0),
99 tracing_process_id_(kInvalidTracingProcessId),
100 system_allocator_pool_name_(nullptr),
101 skip_core_dumpers_auto_registration_for_testing_(false),
102 disable_periodic_dumps_for_testing_(false) {
103 g_next_guid.GetNext(); // Make sure that first guid is not zero.
106 MemoryDumpManager::~MemoryDumpManager() {
107 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
110 void MemoryDumpManager::Initialize() {
111 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
112 trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this);
114 if (skip_core_dumpers_auto_registration_for_testing_)
115 return;
117 // Enable the core dump providers.
118 #if !defined(OS_NACL)
119 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance());
120 #endif
122 #if defined(OS_LINUX) || defined(OS_ANDROID)
123 RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance());
124 RegisterDumpProvider(MallocDumpProvider::GetInstance());
125 system_allocator_pool_name_ = MallocDumpProvider::kAllocatedObjects;
126 #endif
128 #if defined(OS_ANDROID)
129 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance());
130 #endif
132 #if defined(OS_WIN)
133 RegisterDumpProvider(WinHeapDumpProvider::GetInstance());
134 system_allocator_pool_name_ = WinHeapDumpProvider::kAllocatedObjects;
135 #endif
138 void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate* delegate) {
139 AutoLock lock(lock_);
140 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_);
141 delegate_ = delegate;
144 void MemoryDumpManager::RegisterDumpProvider(
145 MemoryDumpProvider* mdp,
146 const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
147 MemoryDumpProviderInfo mdp_info(mdp, task_runner);
148 AutoLock lock(lock_);
149 auto iter_new = dump_providers_.insert(mdp_info);
151 // If there was a previous entry, replace it with the new one. This is to deal
152 // with the case where a dump provider unregisters itself and then re-
153 // registers before a memory dump happens, so its entry was still in the
154 // collection but flagged |unregistered|.
155 if (!iter_new.second) {
156 dump_providers_.erase(iter_new.first);
157 dump_providers_.insert(mdp_info);
161 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) {
162 RegisterDumpProvider(mdp, nullptr);
165 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
166 AutoLock lock(lock_);
168 auto mdp_iter = dump_providers_.begin();
169 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
170 if (mdp_iter->dump_provider == mdp)
171 break;
174 if (mdp_iter == dump_providers_.end())
175 return;
177 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe
178 // only if the MDP has specified a thread affinity (via task_runner()) AND
179 // the unregistration happens on the same thread (so the MDP cannot unregister
180 // and OnMemoryDump() at the same time).
181 // Otherwise, it is not possible to guarantee that its unregistration is
182 // race-free. If you hit this DCHECK, your MDP has a bug.
183 DCHECK_IMPLIES(
184 subtle::NoBarrier_Load(&memory_tracing_enabled_),
185 mdp_iter->task_runner && mdp_iter->task_runner->BelongsToCurrentThread())
186 << "The MemoryDumpProvider attempted to unregister itself in a racy way. "
187 << "Please file a crbug.";
189 mdp_iter->unregistered = true;
192 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type,
193 const MemoryDumpArgs& dump_args,
194 const MemoryDumpCallback& callback) {
195 // Bail out immediately if tracing is not enabled at all.
196 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) {
197 if (!callback.is_null())
198 callback.Run(0u /* guid */, false /* success */);
199 return;
202 const uint64 guid =
203 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext());
205 // The delegate_ is supposed to be thread safe, immutable and long lived.
206 // No need to keep the lock after we ensure that a delegate has been set.
207 MemoryDumpManagerDelegate* delegate;
209 AutoLock lock(lock_);
210 delegate = delegate_;
213 if (delegate) {
214 // The delegate is in charge to coordinate the request among all the
215 // processes and call the CreateLocalDumpPoint on the local process.
216 MemoryDumpRequestArgs args = {guid, dump_type, dump_args};
217 delegate->RequestGlobalMemoryDump(args, callback);
218 } else if (!callback.is_null()) {
219 callback.Run(guid, false /* success */);
223 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type,
224 const MemoryDumpArgs& dump_args) {
225 RequestGlobalDump(dump_type, dump_args, MemoryDumpCallback());
228 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
229 const MemoryDumpCallback& callback) {
230 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
232 AutoLock lock(lock_);
233 pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
234 args, dump_providers_.begin(), session_state_, callback));
237 // Start the thread hop. |dump_providers_| are kept sorted by thread, so
238 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
239 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
240 ContinueAsyncProcessDump(pmd_async_state.Pass());
243 // At most one ContinueAsyncProcessDump() can be active at any time for a given
244 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to
245 // ensure consistency w.r.t. (un)registrations of |dump_providers_|.
246 // The linearization of dump providers' OnMemoryDump invocations is achieved by
247 // means of subsequent PostTask(s).
249 // 1) Prologue:
250 // - Check if the dump provider is disabled, if so skip the dump.
251 // - Check if we are on the right thread. If not hop and continue there.
252 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped).
253 // 3) Epilogue:
254 // - Unregister the dump provider if it failed too many times consecutively.
255 // - Advance the |next_dump_provider| iterator to the next dump provider.
256 // - If this was the last hop, create a trace event, add it to the trace
257 // and finalize (invoke callback).
259 void MemoryDumpManager::ContinueAsyncProcessDump(
260 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
261 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
262 // in the PostTask below don't end up registering their own dump providers
263 // (for discounting trace memory overhead) while holding the |lock_|.
264 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
266 // DO NOT put any LOG() statement in the locked sections, as in some contexts
267 // (GPU process) LOG() ends up performing PostTask/IPCs.
268 MemoryDumpProvider* mdp;
269 bool skip_dump = false;
271 AutoLock lock(lock_);
273 auto mdp_info = pmd_async_state->next_dump_provider;
274 mdp = mdp_info->dump_provider;
275 if (mdp_info->disabled || mdp_info->unregistered) {
276 skip_dump = true;
277 } else if (mdp_info->task_runner &&
278 !mdp_info->task_runner->BelongsToCurrentThread()) {
279 // It's time to hop onto another thread.
281 // Copy the callback + arguments just for the unlikley case in which
282 // PostTask fails. In such case the Bind helper will destroy the
283 // pmd_async_state and we must keep a copy of the fields to notify the
284 // abort.
285 MemoryDumpCallback callback = pmd_async_state->callback;
286 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
287 pmd_async_state->task_runner;
288 const uint64 dump_guid = pmd_async_state->req_args.dump_guid;
290 const bool did_post_task = mdp_info->task_runner->PostTask(
291 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
292 Unretained(this), Passed(pmd_async_state.Pass())));
293 if (did_post_task)
294 return;
296 // The thread is gone. At this point the best thing we can do is to
297 // disable the dump provider and abort this dump.
298 mdp_info->disabled = true;
299 return AbortDumpLocked(callback, callback_task_runner, dump_guid);
301 } // AutoLock(lock_)
303 // Invoke the dump provider without holding the |lock_|.
304 bool finalize = false;
305 bool dump_successful = false;
307 if (!skip_dump) {
308 dump_successful = mdp->OnMemoryDump(pmd_async_state->req_args.dump_args,
309 &pmd_async_state->process_memory_dump);
313 AutoLock lock(lock_);
314 auto mdp_info = pmd_async_state->next_dump_provider;
315 if (dump_successful) {
316 mdp_info->consecutive_failures = 0;
317 } else if (!skip_dump) {
318 ++mdp_info->consecutive_failures;
319 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) {
320 mdp_info->disabled = true;
323 ++pmd_async_state->next_dump_provider;
324 finalize = pmd_async_state->next_dump_provider == dump_providers_.end();
326 if (mdp_info->unregistered)
327 dump_providers_.erase(mdp_info);
330 if (!skip_dump && !dump_successful) {
331 LOG(ERROR) << "A memory dumper failed, possibly due to sandboxing "
332 "(crbug.com/461788). Disabling dumper for current process. "
333 "Try restarting chrome with the --no-sandbox switch.";
336 if (finalize)
337 return FinalizeDumpAndAddToTrace(pmd_async_state.Pass());
339 ContinueAsyncProcessDump(pmd_async_state.Pass());
342 // static
343 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
344 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
345 if (!pmd_async_state->task_runner->BelongsToCurrentThread()) {
346 scoped_refptr<SingleThreadTaskRunner> task_runner =
347 pmd_async_state->task_runner;
348 task_runner->PostTask(FROM_HERE,
349 Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace,
350 Passed(pmd_async_state.Pass())));
351 return;
354 scoped_refptr<ConvertableToTraceFormat> event_value(new TracedValue());
355 pmd_async_state->process_memory_dump.AsValueInto(
356 static_cast<TracedValue*>(event_value.get()));
357 const char* const event_name =
358 MemoryDumpTypeToString(pmd_async_state->req_args.dump_type);
360 TRACE_EVENT_API_ADD_TRACE_EVENT(
361 TRACE_EVENT_PHASE_MEMORY_DUMP,
362 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
363 pmd_async_state->req_args.dump_guid, kTraceEventNumArgs,
364 kTraceEventArgNames, kTraceEventArgTypes, nullptr /* arg_values */,
365 &event_value, TRACE_EVENT_FLAG_HAS_ID);
367 if (!pmd_async_state->callback.is_null()) {
368 pmd_async_state->callback.Run(pmd_async_state->req_args.dump_guid,
369 true /* success */);
370 pmd_async_state->callback.Reset();
374 // static
375 void MemoryDumpManager::AbortDumpLocked(
376 MemoryDumpCallback callback,
377 scoped_refptr<SingleThreadTaskRunner> task_runner,
378 uint64 dump_guid) {
379 if (callback.is_null())
380 return; // There is nothing to NACK.
382 // Post the callback even if we are already on the right thread to avoid
383 // invoking the callback while holding the lock_.
384 task_runner->PostTask(FROM_HERE,
385 Bind(callback, dump_guid, false /* success */));
388 void MemoryDumpManager::OnTraceLogEnabled() {
389 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter
390 // to figure out (and cache) which dumpers should be enabled or not.
391 // For the moment piggy back everything on the generic "memory" category.
392 bool enabled;
393 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
395 // Initialize the TraceLog for the current thread. This is to avoid that the
396 // TraceLog memory dump provider is registered lazily in the PostTask() below
397 // while the |lock_| is taken;
398 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
400 AutoLock lock(lock_);
402 // There is no point starting the tracing without a delegate.
403 if (!enabled || !delegate_) {
404 // Disable all the providers.
405 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it)
406 it->disabled = true;
407 return;
410 session_state_ = new MemoryDumpSessionState();
411 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) {
412 it->disabled = false;
413 it->consecutive_failures = 0;
416 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
418 // TODO(primiano): This is a temporary hack to disable periodic memory dumps
419 // when running memory benchmarks until telemetry uses TraceConfig to
420 // enable/disable periodic dumps.
421 // The same mechanism should be used to disable periodic dumps in tests.
422 if (!delegate_->IsCoordinatorProcess() ||
423 CommandLine::ForCurrentProcess()->HasSwitch(
424 "enable-memory-benchmarking") ||
425 disable_periodic_dumps_for_testing_) {
426 return;
429 // Enable periodic dumps. At the moment the periodic support is limited to at
430 // most one low-detail periodic dump and at most one high-detail periodic
431 // dump. If both are specified the high-detail period must be an integer
432 // multiple of the low-level one.
433 g_periodic_dumps_count = 0;
434 const TraceConfig trace_config =
435 TraceLog::GetInstance()->GetCurrentTraceConfig();
436 const TraceConfig::MemoryDumpConfig& config_list =
437 trace_config.memory_dump_config();
438 if (config_list.empty())
439 return;
441 uint32 min_timer_period_ms = std::numeric_limits<uint32>::max();
442 uint32 heavy_dump_period_ms = 0;
443 DCHECK_LE(config_list.size(), 2u);
444 for (const TraceConfig::MemoryDumpTriggerConfig& config : config_list) {
445 DCHECK(config.periodic_interval_ms);
446 if (config.level_of_detail == MemoryDumpArgs::LevelOfDetail::HIGH)
447 heavy_dump_period_ms = config.periodic_interval_ms;
448 min_timer_period_ms =
449 std::min(min_timer_period_ms, config.periodic_interval_ms);
451 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
452 g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms;
454 periodic_dump_timer_.Start(FROM_HERE,
455 TimeDelta::FromMilliseconds(min_timer_period_ms),
456 base::Bind(&RequestPeriodicGlobalDump));
459 void MemoryDumpManager::OnTraceLogDisabled() {
460 AutoLock lock(lock_);
461 periodic_dump_timer_.Stop();
462 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
463 session_state_ = nullptr;
466 uint64 MemoryDumpManager::GetTracingProcessId() const {
467 return delegate_->GetTracingProcessId();
470 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
471 MemoryDumpProvider* dump_provider,
472 const scoped_refptr<SingleThreadTaskRunner>& task_runner)
473 : dump_provider(dump_provider),
474 task_runner(task_runner),
475 consecutive_failures(0),
476 disabled(false),
477 unregistered(false) {}
479 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {
482 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<(
483 const MemoryDumpProviderInfo& other) const {
484 if (task_runner == other.task_runner)
485 return dump_provider < other.dump_provider;
486 return task_runner < other.task_runner;
489 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
490 MemoryDumpRequestArgs req_args,
491 MemoryDumpProviderInfoSet::iterator next_dump_provider,
492 const scoped_refptr<MemoryDumpSessionState>& session_state,
493 MemoryDumpCallback callback)
494 : process_memory_dump(session_state),
495 req_args(req_args),
496 next_dump_provider(next_dump_provider),
497 callback(callback),
498 task_runner(MessageLoop::current()->task_runner()) {
501 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
504 } // namespace trace_event
505 } // namespace base