Revert "Fix broken channel icon in chrome://help on CrOS" and try again
[chromium-blink-merge.git] / base / trace_event / memory_dump_manager.cc
blobcd1503b51952634ed8b19475c8176da4c151a3fc
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/memory_dump_manager.h"
7 #include <algorithm>
9 #include "base/atomic_sequence_num.h"
10 #include "base/command_line.h"
11 #include "base/compiler_specific.h"
12 #include "base/thread_task_runner_handle.h"
13 #include "base/trace_event/memory_dump_provider.h"
14 #include "base/trace_event/memory_dump_session_state.h"
15 #include "base/trace_event/process_memory_dump.h"
16 #include "base/trace_event/trace_event_argument.h"
17 #include "build/build_config.h"
19 #if !defined(OS_NACL)
20 #include "base/trace_event/process_memory_totals_dump_provider.h"
21 #endif
23 #if defined(OS_LINUX) || defined(OS_ANDROID)
24 #include "base/trace_event/malloc_dump_provider.h"
25 #include "base/trace_event/process_memory_maps_dump_provider.h"
26 #endif
28 #if defined(OS_ANDROID)
29 #include "base/trace_event/java_heap_dump_provider_android.h"
30 #endif
32 #if defined(OS_WIN)
33 #include "base/trace_event/winheap_dump_provider_win.h"
34 #endif
36 namespace base {
37 namespace trace_event {
39 namespace {
41 // Throttle mmaps at a rate of once every kHeavyMmapsDumpsRate standard dumps.
42 const int kHeavyDumpsRate = 8; // 250 ms * 8 = 2000 ms.
43 const int kDumpIntervalMs = 250;
44 const int kTraceEventNumArgs = 1;
45 const char* kTraceEventArgNames[] = {"dumps"};
46 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
48 StaticAtomicSequenceNumber g_next_guid;
49 uint32 g_periodic_dumps_count = 0;
50 MemoryDumpManager* g_instance_for_testing = nullptr;
52 void RequestPeriodicGlobalDump() {
53 MemoryDumpArgs::LevelOfDetail dump_level_of_detail =
54 g_periodic_dumps_count == 0 ? MemoryDumpArgs::LevelOfDetail::HIGH
55 : MemoryDumpArgs::LevelOfDetail::LOW;
56 if (++g_periodic_dumps_count == kHeavyDumpsRate)
57 g_periodic_dumps_count = 0;
59 MemoryDumpArgs dump_args = {dump_level_of_detail};
60 MemoryDumpManager::GetInstance()->RequestGlobalDump(
61 MemoryDumpType::PERIODIC_INTERVAL, dump_args);
64 } // namespace
66 // static
67 const char* const MemoryDumpManager::kTraceCategory =
68 TRACE_DISABLED_BY_DEFAULT("memory-infra");
70 // static
71 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
73 // static
74 const uint64 MemoryDumpManager::kInvalidTracingProcessId = 0;
76 // static
77 MemoryDumpManager* MemoryDumpManager::GetInstance() {
78 if (g_instance_for_testing)
79 return g_instance_for_testing;
81 return Singleton<MemoryDumpManager,
82 LeakySingletonTraits<MemoryDumpManager>>::get();
85 // static
86 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
87 if (instance)
88 instance->skip_core_dumpers_auto_registration_for_testing_ = true;
89 g_instance_for_testing = instance;
92 MemoryDumpManager::MemoryDumpManager()
93 : delegate_(nullptr),
94 memory_tracing_enabled_(0),
95 tracing_process_id_(kInvalidTracingProcessId),
96 system_allocator_pool_name_(nullptr),
97 skip_core_dumpers_auto_registration_for_testing_(false),
98 disable_periodic_dumps_for_testing_(false) {
99 g_next_guid.GetNext(); // Make sure that first guid is not zero.
102 MemoryDumpManager::~MemoryDumpManager() {
103 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
106 void MemoryDumpManager::Initialize() {
107 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
108 trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this);
110 if (skip_core_dumpers_auto_registration_for_testing_)
111 return;
113 // Enable the core dump providers.
114 #if !defined(OS_NACL)
115 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance());
116 #endif
118 #if defined(OS_LINUX) || defined(OS_ANDROID)
119 // The memory maps dump provider is currently disabled for security reasons
120 // and will be enabled once tracing is more secure (crbug.com/517906).
121 // It is still enabled for running benchmarks.
122 if (CommandLine::ForCurrentProcess()->HasSwitch(
123 "enable-memory-benchmarking")) {
124 RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance());
127 RegisterDumpProvider(MallocDumpProvider::GetInstance());
128 system_allocator_pool_name_ = MallocDumpProvider::kAllocatedObjects;
129 #endif
131 #if defined(OS_ANDROID)
132 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance());
133 #endif
135 #if defined(OS_WIN)
136 RegisterDumpProvider(WinHeapDumpProvider::GetInstance());
137 system_allocator_pool_name_ = WinHeapDumpProvider::kAllocatedObjects;
138 #endif
141 void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate* delegate) {
142 AutoLock lock(lock_);
143 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_);
144 delegate_ = delegate;
147 void MemoryDumpManager::RegisterDumpProvider(
148 MemoryDumpProvider* mdp,
149 const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
150 MemoryDumpProviderInfo mdp_info(mdp, task_runner);
151 AutoLock lock(lock_);
152 auto iter_new = dump_providers_.insert(mdp_info);
154 // If there was a previous entry, replace it with the new one. This is to deal
155 // with the case where a dump provider unregisters itself and then re-
156 // registers before a memory dump happens, so its entry was still in the
157 // collection but flagged |unregistered|.
158 if (!iter_new.second) {
159 dump_providers_.erase(iter_new.first);
160 dump_providers_.insert(mdp_info);
164 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) {
165 RegisterDumpProvider(mdp, nullptr);
168 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
169 AutoLock lock(lock_);
171 auto mdp_iter = dump_providers_.begin();
172 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
173 if (mdp_iter->dump_provider == mdp)
174 break;
177 if (mdp_iter == dump_providers_.end())
178 return;
180 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe
181 // only if the MDP has specified a thread affinity (via task_runner()) AND
182 // the unregistration happens on the same thread (so the MDP cannot unregister
183 // and OnMemoryDump() at the same time).
184 // Otherwise, it is not possible to guarantee that its unregistration is
185 // race-free. If you hit this DCHECK, your MDP has a bug.
186 DCHECK_IMPLIES(
187 subtle::NoBarrier_Load(&memory_tracing_enabled_),
188 mdp_iter->task_runner && mdp_iter->task_runner->BelongsToCurrentThread())
189 << "The MemoryDumpProvider attempted to unregister itself in a racy way. "
190 << "Please file a crbug.";
192 mdp_iter->unregistered = true;
195 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type,
196 const MemoryDumpArgs& dump_args,
197 const MemoryDumpCallback& callback) {
198 // Bail out immediately if tracing is not enabled at all.
199 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) {
200 if (!callback.is_null())
201 callback.Run(0u /* guid */, false /* success */);
202 return;
205 const uint64 guid =
206 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext());
208 // The delegate_ is supposed to be thread safe, immutable and long lived.
209 // No need to keep the lock after we ensure that a delegate has been set.
210 MemoryDumpManagerDelegate* delegate;
212 AutoLock lock(lock_);
213 delegate = delegate_;
216 if (delegate) {
217 // The delegate is in charge to coordinate the request among all the
218 // processes and call the CreateLocalDumpPoint on the local process.
219 MemoryDumpRequestArgs args = {guid, dump_type, dump_args};
220 delegate->RequestGlobalMemoryDump(args, callback);
221 } else if (!callback.is_null()) {
222 callback.Run(guid, false /* success */);
226 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type,
227 const MemoryDumpArgs& dump_args) {
228 RequestGlobalDump(dump_type, dump_args, MemoryDumpCallback());
231 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
232 const MemoryDumpCallback& callback) {
233 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
235 AutoLock lock(lock_);
236 pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
237 args, dump_providers_.begin(), session_state_, callback));
240 // Start the thread hop. |dump_providers_| are kept sorted by thread, so
241 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
242 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
243 ContinueAsyncProcessDump(pmd_async_state.Pass());
246 // At most one ContinueAsyncProcessDump() can be active at any time for a given
247 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to
248 // ensure consistency w.r.t. (un)registrations of |dump_providers_|.
249 // The linearization of dump providers' OnMemoryDump invocations is achieved by
250 // means of subsequent PostTask(s).
252 // 1) Prologue:
253 // - Check if the dump provider is disabled, if so skip the dump.
254 // - Check if we are on the right thread. If not hop and continue there.
255 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped).
256 // 3) Epilogue:
257 // - Unregister the dump provider if it failed too many times consecutively.
258 // - Advance the |next_dump_provider| iterator to the next dump provider.
259 // - If this was the last hop, create a trace event, add it to the trace
260 // and finalize (invoke callback).
262 void MemoryDumpManager::ContinueAsyncProcessDump(
263 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
264 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
265 // in the PostTask below don't end up registering their own dump providers
266 // (for discounting trace memory overhead) while holding the |lock_|.
267 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
269 // DO NOT put any LOG() statement in the locked sections, as in some contexts
270 // (GPU process) LOG() ends up performing PostTask/IPCs.
271 MemoryDumpProvider* mdp;
272 bool skip_dump = false;
274 AutoLock lock(lock_);
276 auto mdp_info = pmd_async_state->next_dump_provider;
277 mdp = mdp_info->dump_provider;
278 if (mdp_info->disabled || mdp_info->unregistered) {
279 skip_dump = true;
280 } else if (mdp_info->task_runner &&
281 !mdp_info->task_runner->BelongsToCurrentThread()) {
282 // It's time to hop onto another thread.
284 // Copy the callback + arguments just for the unlikley case in which
285 // PostTask fails. In such case the Bind helper will destroy the
286 // pmd_async_state and we must keep a copy of the fields to notify the
287 // abort.
288 MemoryDumpCallback callback = pmd_async_state->callback;
289 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
290 pmd_async_state->task_runner;
291 const uint64 dump_guid = pmd_async_state->req_args.dump_guid;
293 const bool did_post_task = mdp_info->task_runner->PostTask(
294 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
295 Unretained(this), Passed(pmd_async_state.Pass())));
296 if (did_post_task)
297 return;
299 // The thread is gone. At this point the best thing we can do is to
300 // disable the dump provider and abort this dump.
301 mdp_info->disabled = true;
302 return AbortDumpLocked(callback, callback_task_runner, dump_guid);
304 } // AutoLock(lock_)
306 // Invoke the dump provider without holding the |lock_|.
307 bool finalize = false;
308 bool dump_successful = false;
310 if (!skip_dump) {
311 dump_successful = mdp->OnMemoryDump(pmd_async_state->req_args.dump_args,
312 &pmd_async_state->process_memory_dump);
316 AutoLock lock(lock_);
317 auto mdp_info = pmd_async_state->next_dump_provider;
318 if (dump_successful) {
319 mdp_info->consecutive_failures = 0;
320 } else if (!skip_dump) {
321 ++mdp_info->consecutive_failures;
322 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) {
323 mdp_info->disabled = true;
326 ++pmd_async_state->next_dump_provider;
327 finalize = pmd_async_state->next_dump_provider == dump_providers_.end();
329 if (mdp_info->unregistered)
330 dump_providers_.erase(mdp_info);
333 if (!skip_dump && !dump_successful) {
334 LOG(ERROR) << "A memory dumper failed, possibly due to sandboxing "
335 "(crbug.com/461788). Disabling dumper for current process. "
336 "Try restarting chrome with the --no-sandbox switch.";
339 if (finalize)
340 return FinalizeDumpAndAddToTrace(pmd_async_state.Pass());
342 ContinueAsyncProcessDump(pmd_async_state.Pass());
345 // static
346 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
347 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
348 if (!pmd_async_state->task_runner->BelongsToCurrentThread()) {
349 scoped_refptr<SingleThreadTaskRunner> task_runner =
350 pmd_async_state->task_runner;
351 task_runner->PostTask(FROM_HERE,
352 Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace,
353 Passed(pmd_async_state.Pass())));
354 return;
357 scoped_refptr<ConvertableToTraceFormat> event_value(new TracedValue());
358 pmd_async_state->process_memory_dump.AsValueInto(
359 static_cast<TracedValue*>(event_value.get()));
360 const char* const event_name =
361 MemoryDumpTypeToString(pmd_async_state->req_args.dump_type);
363 TRACE_EVENT_API_ADD_TRACE_EVENT(
364 TRACE_EVENT_PHASE_MEMORY_DUMP,
365 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
366 pmd_async_state->req_args.dump_guid, kTraceEventNumArgs,
367 kTraceEventArgNames, kTraceEventArgTypes, nullptr /* arg_values */,
368 &event_value, TRACE_EVENT_FLAG_HAS_ID);
370 if (!pmd_async_state->callback.is_null()) {
371 pmd_async_state->callback.Run(pmd_async_state->req_args.dump_guid,
372 true /* success */);
373 pmd_async_state->callback.Reset();
377 // static
378 void MemoryDumpManager::AbortDumpLocked(
379 MemoryDumpCallback callback,
380 scoped_refptr<SingleThreadTaskRunner> task_runner,
381 uint64 dump_guid) {
382 if (callback.is_null())
383 return; // There is nothing to NACK.
385 // Post the callback even if we are already on the right thread to avoid
386 // invoking the callback while holding the lock_.
387 task_runner->PostTask(FROM_HERE,
388 Bind(callback, dump_guid, false /* success */));
391 void MemoryDumpManager::OnTraceLogEnabled() {
392 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter
393 // to figure out (and cache) which dumpers should be enabled or not.
394 // For the moment piggy back everything on the generic "memory" category.
395 bool enabled;
396 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
398 // Initialize the TraceLog for the current thread. This is to avoid that the
399 // TraceLog memory dump provider is registered lazily in the PostTask() below
400 // while the |lock_| is taken;
401 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
403 AutoLock lock(lock_);
405 // There is no point starting the tracing without a delegate.
406 if (!enabled || !delegate_) {
407 // Disable all the providers.
408 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it)
409 it->disabled = true;
410 return;
413 session_state_ = new MemoryDumpSessionState();
414 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) {
415 it->disabled = false;
416 it->consecutive_failures = 0;
419 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
421 // TODO(primiano): This is a temporary hack to disable periodic memory dumps
422 // when running memory benchmarks until they can be enabled/disabled in
423 // base::trace_event::TraceConfig. See https://goo.gl/5Hj3o0.
424 // The same mechanism should be used to disable periodic dumps in tests.
425 if (delegate_->IsCoordinatorProcess() &&
426 !CommandLine::ForCurrentProcess()->HasSwitch(
427 "enable-memory-benchmarking") &&
428 !disable_periodic_dumps_for_testing_) {
429 g_periodic_dumps_count = 0;
430 periodic_dump_timer_.Start(FROM_HERE,
431 TimeDelta::FromMilliseconds(kDumpIntervalMs),
432 base::Bind(&RequestPeriodicGlobalDump));
436 void MemoryDumpManager::OnTraceLogDisabled() {
437 AutoLock lock(lock_);
438 periodic_dump_timer_.Stop();
439 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
440 session_state_ = nullptr;
443 uint64 MemoryDumpManager::GetTracingProcessId() const {
444 return delegate_->GetTracingProcessId();
447 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
448 MemoryDumpProvider* dump_provider,
449 const scoped_refptr<SingleThreadTaskRunner>& task_runner)
450 : dump_provider(dump_provider),
451 task_runner(task_runner),
452 consecutive_failures(0),
453 disabled(false),
454 unregistered(false) {}
456 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {
459 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<(
460 const MemoryDumpProviderInfo& other) const {
461 if (task_runner == other.task_runner)
462 return dump_provider < other.dump_provider;
463 return task_runner < other.task_runner;
466 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
467 MemoryDumpRequestArgs req_args,
468 MemoryDumpProviderInfoSet::iterator next_dump_provider,
469 const scoped_refptr<MemoryDumpSessionState>& session_state,
470 MemoryDumpCallback callback)
471 : process_memory_dump(session_state),
472 req_args(req_args),
473 next_dump_provider(next_dump_provider),
474 callback(callback),
475 task_runner(MessageLoop::current()->task_runner()) {
478 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
481 } // namespace trace_event
482 } // namespace base