base/threading: remove ScopedTracker placed for experiments
[chromium-blink-merge.git] / base / trace_event / memory_dump_manager.cc
blobd076dda05054ce70e2f5e009c7103d68ea03c6db
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/memory_dump_manager.h"
7 #include <algorithm>
9 #include "base/atomic_sequence_num.h"
10 #include "base/command_line.h"
11 #include "base/compiler_specific.h"
12 #include "base/thread_task_runner_handle.h"
13 #include "base/trace_event/memory_dump_provider.h"
14 #include "base/trace_event/memory_dump_session_state.h"
15 #include "base/trace_event/process_memory_dump.h"
16 #include "base/trace_event/trace_event_argument.h"
17 #include "build/build_config.h"
19 #if !defined(OS_NACL)
20 #include "base/trace_event/process_memory_totals_dump_provider.h"
21 #endif
23 #if defined(OS_LINUX) || defined(OS_ANDROID)
24 #include "base/trace_event/malloc_dump_provider.h"
25 #include "base/trace_event/process_memory_maps_dump_provider.h"
26 #endif
28 #if defined(OS_ANDROID)
29 #include "base/trace_event/java_heap_dump_provider_android.h"
30 #endif
32 #if defined(OS_WIN)
33 #include "base/trace_event/winheap_dump_provider_win.h"
34 #endif
36 namespace base {
37 namespace trace_event {
39 namespace {
41 // TODO(primiano): this should be smarter and should do something similar to
42 // trace event synthetic delays.
43 const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("memory-infra");
45 // Throttle mmaps at a rate of once every kHeavyMmapsDumpsRate standard dumps.
46 const int kHeavyMmapsDumpsRate = 8; // 250 ms * 8 = 2000 ms.
47 const int kDumpIntervalMs = 250;
48 const int kTraceEventNumArgs = 1;
49 const char* kTraceEventArgNames[] = {"dumps"};
50 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
52 StaticAtomicSequenceNumber g_next_guid;
53 uint32 g_periodic_dumps_count = 0;
54 MemoryDumpManager* g_instance_for_testing = nullptr;
55 MemoryDumpProvider* g_mmaps_dump_provider = nullptr;
57 void RequestPeriodicGlobalDump() {
58 MemoryDumpType dump_type = g_periodic_dumps_count == 0
59 ? MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS
60 : MemoryDumpType::PERIODIC_INTERVAL;
61 if (++g_periodic_dumps_count == kHeavyMmapsDumpsRate)
62 g_periodic_dumps_count = 0;
64 MemoryDumpManager::GetInstance()->RequestGlobalDump(dump_type);
67 } // namespace
69 // static
70 const char* const MemoryDumpManager::kTraceCategoryForTesting = kTraceCategory;
72 // static
73 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
75 // static
76 const uint64 MemoryDumpManager::kInvalidTracingProcessId = 0;
78 // static
79 MemoryDumpManager* MemoryDumpManager::GetInstance() {
80 if (g_instance_for_testing)
81 return g_instance_for_testing;
83 return Singleton<MemoryDumpManager,
84 LeakySingletonTraits<MemoryDumpManager>>::get();
87 // static
88 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
89 if (instance)
90 instance->skip_core_dumpers_auto_registration_for_testing_ = true;
91 g_instance_for_testing = instance;
94 MemoryDumpManager::MemoryDumpManager()
95 : did_unregister_dump_provider_(false),
96 delegate_(nullptr),
97 memory_tracing_enabled_(0),
98 tracing_process_id_(kInvalidTracingProcessId),
99 system_allocator_pool_name_(nullptr),
100 skip_core_dumpers_auto_registration_for_testing_(false) {
101 g_next_guid.GetNext(); // Make sure that first guid is not zero.
104 MemoryDumpManager::~MemoryDumpManager() {
105 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
108 void MemoryDumpManager::Initialize() {
109 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
110 trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this);
112 if (skip_core_dumpers_auto_registration_for_testing_)
113 return;
115 // Enable the core dump providers.
116 #if !defined(OS_NACL)
117 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance());
118 #endif
120 #if defined(OS_LINUX) || defined(OS_ANDROID)
121 g_mmaps_dump_provider = ProcessMemoryMapsDumpProvider::GetInstance();
122 RegisterDumpProvider(g_mmaps_dump_provider);
123 RegisterDumpProvider(MallocDumpProvider::GetInstance());
124 system_allocator_pool_name_ = MallocDumpProvider::kAllocatedObjects;
125 #endif
127 #if defined(OS_ANDROID)
128 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance());
129 #endif
131 #if defined(OS_WIN)
132 RegisterDumpProvider(WinHeapDumpProvider::GetInstance());
133 system_allocator_pool_name_ = WinHeapDumpProvider::kAllocatedObjects;
134 #endif
137 void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate* delegate) {
138 AutoLock lock(lock_);
139 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_);
140 delegate_ = delegate;
143 void MemoryDumpManager::RegisterDumpProvider(
144 MemoryDumpProvider* mdp,
145 const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
146 MemoryDumpProviderInfo mdp_info(mdp, task_runner);
147 AutoLock lock(lock_);
148 dump_providers_.insert(mdp_info);
151 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) {
152 RegisterDumpProvider(mdp, nullptr);
155 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
156 AutoLock lock(lock_);
158 auto mdp_iter = dump_providers_.begin();
159 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
160 if (mdp_iter->dump_provider == mdp)
161 break;
164 if (mdp_iter == dump_providers_.end())
165 return;
167 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe
168 // only if the MDP has specified a thread affinity (via task_runner()) AND
169 // the unregistration happens on the same thread (so the MDP cannot unregister
170 // and OnMemoryDump() at the same time).
171 // Otherwise, it is not possible to guarantee that its unregistration is
172 // race-free. If you hit this DCHECK, your MDP has a bug.
173 DCHECK_IMPLIES(
174 subtle::NoBarrier_Load(&memory_tracing_enabled_),
175 mdp_iter->task_runner && mdp_iter->task_runner->BelongsToCurrentThread())
176 << "The MemoryDumpProvider attempted to unregister itself in a racy way. "
177 << "Please file a crbug.";
179 dump_providers_.erase(mdp_iter);
180 did_unregister_dump_provider_ = true;
183 void MemoryDumpManager::RequestGlobalDump(
184 MemoryDumpType dump_type,
185 const MemoryDumpCallback& callback) {
186 // Bail out immediately if tracing is not enabled at all.
187 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) {
188 if (!callback.is_null())
189 callback.Run(0u /* guid */, false /* success */);
190 return;
193 const uint64 guid =
194 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext());
196 // The delegate_ is supposed to be thread safe, immutable and long lived.
197 // No need to keep the lock after we ensure that a delegate has been set.
198 MemoryDumpManagerDelegate* delegate;
200 AutoLock lock(lock_);
201 delegate = delegate_;
204 if (delegate) {
205 // The delegate is in charge to coordinate the request among all the
206 // processes and call the CreateLocalDumpPoint on the local process.
207 MemoryDumpRequestArgs args = {guid, dump_type};
208 delegate->RequestGlobalMemoryDump(args, callback);
209 } else if (!callback.is_null()) {
210 callback.Run(guid, false /* success */);
214 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type) {
215 RequestGlobalDump(dump_type, MemoryDumpCallback());
218 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
219 const MemoryDumpCallback& callback) {
220 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
222 AutoLock lock(lock_);
223 did_unregister_dump_provider_ = false;
224 pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
225 args, dump_providers_.begin(), session_state_, callback));
228 // Start the thread hop. |dump_providers_| are kept sorted by thread, so
229 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
230 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
231 ContinueAsyncProcessDump(pmd_async_state.Pass());
234 // At most one ContinueAsyncProcessDump() can be active at any time for a given
235 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to
236 // ensure consistency w.r.t. (un)registrations of |dump_providers_|.
237 // The linearization of dump providers' OnMemoryDump invocations is achieved by
238 // means of subsequent PostTask(s).
240 // 1) Prologue:
241 // - Check if the dump provider is disabled, if so skip the dump.
242 // - Check if we are on the right thread. If not hop and continue there.
243 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped).
244 // 3) Epilogue:
245 // - Unregister the dump provider if it failed too many times consecutively.
246 // - Advance the |next_dump_provider| iterator to the next dump provider.
247 // - If this was the last hop, create a trace event, add it to the trace
248 // and finalize (invoke callback).
250 void MemoryDumpManager::ContinueAsyncProcessDump(
251 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
252 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
253 // in the PostTask below don't end up registering their own dump providers
254 // (for discounting trace memory overhead) while holding the |lock_|.
255 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
257 // DO NOT put any LOG() statement in the locked sections, as in some contexts
258 // (GPU process) LOG() ends up performing PostTask/IPCs.
259 MemoryDumpProvider* mdp;
260 bool skip_dump = false;
262 AutoLock lock(lock_);
263 // In the unlikely event that a dump provider was unregistered while
264 // dumping, abort the dump, as that would make |next_dump_provider| invalid.
265 // Registration, on the other hand, is safe as per std::set<> contract.
266 if (did_unregister_dump_provider_) {
267 return AbortDumpLocked(pmd_async_state->callback,
268 pmd_async_state->task_runner,
269 pmd_async_state->req_args.dump_guid);
272 auto* mdp_info = &*pmd_async_state->next_dump_provider;
273 mdp = mdp_info->dump_provider;
274 if (mdp_info->disabled) {
275 skip_dump = true;
276 } else if (mdp == g_mmaps_dump_provider &&
277 pmd_async_state->req_args.dump_type !=
278 MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS &&
279 pmd_async_state->req_args.dump_type !=
280 MemoryDumpType::EXPLICITLY_TRIGGERED) {
281 // Mmaps dumping is very heavyweight and cannot be performed at the same
282 // rate of other dumps. TODO(primiano): this is a hack and should be
283 // cleaned up as part of crbug.com/499731.
284 skip_dump = true;
285 } else if (mdp_info->task_runner &&
286 !mdp_info->task_runner->BelongsToCurrentThread()) {
287 // It's time to hop onto another thread.
289 // Copy the callback + arguments just for the unlikley case in which
290 // PostTask fails. In such case the Bind helper will destroy the
291 // pmd_async_state and we must keep a copy of the fields to notify the
292 // abort.
293 MemoryDumpCallback callback = pmd_async_state->callback;
294 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
295 pmd_async_state->task_runner;
296 const uint64 dump_guid = pmd_async_state->req_args.dump_guid;
298 const bool did_post_task = mdp_info->task_runner->PostTask(
299 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
300 Unretained(this), Passed(pmd_async_state.Pass())));
301 if (did_post_task)
302 return;
304 // The thread is gone. At this point the best thing we can do is to
305 // disable the dump provider and abort this dump.
306 mdp_info->disabled = true;
307 return AbortDumpLocked(callback, callback_task_runner, dump_guid);
309 } // AutoLock(lock_)
311 // Invoke the dump provider without holding the |lock_|.
312 bool finalize = false;
313 bool dump_successful = false;
314 if (!skip_dump)
315 dump_successful = mdp->OnMemoryDump(&pmd_async_state->process_memory_dump);
318 AutoLock lock(lock_);
319 if (did_unregister_dump_provider_) {
320 return AbortDumpLocked(pmd_async_state->callback,
321 pmd_async_state->task_runner,
322 pmd_async_state->req_args.dump_guid);
324 auto* mdp_info = &*pmd_async_state->next_dump_provider;
325 if (dump_successful) {
326 mdp_info->consecutive_failures = 0;
327 } else if (!skip_dump) {
328 ++mdp_info->consecutive_failures;
329 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) {
330 mdp_info->disabled = true;
333 ++pmd_async_state->next_dump_provider;
334 finalize = pmd_async_state->next_dump_provider == dump_providers_.end();
337 if (!skip_dump && !dump_successful) {
338 LOG(ERROR) << "A memory dumper failed, possibly due to sandboxing "
339 "(crbug.com/461788). Disabling dumper for current process. "
340 "Try restarting chrome with the --no-sandbox switch.";
343 if (finalize)
344 return FinalizeDumpAndAddToTrace(pmd_async_state.Pass());
346 ContinueAsyncProcessDump(pmd_async_state.Pass());
349 // static
350 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
351 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
352 if (!pmd_async_state->task_runner->BelongsToCurrentThread()) {
353 scoped_refptr<SingleThreadTaskRunner> task_runner =
354 pmd_async_state->task_runner;
355 task_runner->PostTask(FROM_HERE,
356 Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace,
357 Passed(pmd_async_state.Pass())));
358 return;
361 scoped_refptr<ConvertableToTraceFormat> event_value(new TracedValue());
362 pmd_async_state->process_memory_dump.AsValueInto(
363 static_cast<TracedValue*>(event_value.get()));
364 const char* const event_name =
365 MemoryDumpTypeToString(pmd_async_state->req_args.dump_type);
367 TRACE_EVENT_API_ADD_TRACE_EVENT(
368 TRACE_EVENT_PHASE_MEMORY_DUMP,
369 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
370 pmd_async_state->req_args.dump_guid, kTraceEventNumArgs,
371 kTraceEventArgNames, kTraceEventArgTypes, nullptr /* arg_values */,
372 &event_value, TRACE_EVENT_FLAG_HAS_ID);
374 if (!pmd_async_state->callback.is_null()) {
375 pmd_async_state->callback.Run(pmd_async_state->req_args.dump_guid,
376 true /* success */);
377 pmd_async_state->callback.Reset();
381 // static
382 void MemoryDumpManager::AbortDumpLocked(
383 MemoryDumpCallback callback,
384 scoped_refptr<SingleThreadTaskRunner> task_runner,
385 uint64 dump_guid) {
386 if (callback.is_null())
387 return; // There is nothing to NACK.
389 // Post the callback even if we are already on the right thread to avoid
390 // invoking the callback while holding the lock_.
391 task_runner->PostTask(FROM_HERE,
392 Bind(callback, dump_guid, false /* success */));
395 void MemoryDumpManager::OnTraceLogEnabled() {
396 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter
397 // to figure out (and cache) which dumpers should be enabled or not.
398 // For the moment piggy back everything on the generic "memory" category.
399 bool enabled;
400 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
402 // Initialize the TraceLog for the current thread. This is to avoid that the
403 // TraceLog memory dump provider is registered lazily in the PostTask() below
404 // while the |lock_| is taken;
405 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
407 AutoLock lock(lock_);
409 // There is no point starting the tracing without a delegate.
410 if (!enabled || !delegate_) {
411 // Disable all the providers.
412 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it)
413 it->disabled = true;
414 return;
417 session_state_ = new MemoryDumpSessionState();
418 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) {
419 it->disabled = false;
420 it->consecutive_failures = 0;
423 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
425 // TODO(primiano): This is a temporary hack to disable periodic memory dumps
426 // when running memory benchmarks until they can be enabled/disabled in
427 // base::trace_event::TraceConfig. See https://goo.gl/5Hj3o0.
428 if (delegate_->IsCoordinatorProcess() &&
429 !CommandLine::ForCurrentProcess()->HasSwitch(
430 "enable-memory-benchmarking")) {
431 g_periodic_dumps_count = 0;
432 periodic_dump_timer_.Start(FROM_HERE,
433 TimeDelta::FromMilliseconds(kDumpIntervalMs),
434 base::Bind(&RequestPeriodicGlobalDump));
438 void MemoryDumpManager::OnTraceLogDisabled() {
439 AutoLock lock(lock_);
440 periodic_dump_timer_.Stop();
441 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
442 session_state_ = nullptr;
445 uint64 MemoryDumpManager::GetTracingProcessId() const {
446 return delegate_->GetTracingProcessId();
449 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
450 MemoryDumpProvider* dump_provider,
451 const scoped_refptr<SingleThreadTaskRunner>& task_runner)
452 : dump_provider(dump_provider),
453 task_runner(task_runner),
454 consecutive_failures(0),
455 disabled(false) {
458 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {
461 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<(
462 const MemoryDumpProviderInfo& other) const {
463 if (task_runner == other.task_runner)
464 return dump_provider < other.dump_provider;
465 return task_runner < other.task_runner;
468 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
469 MemoryDumpRequestArgs req_args,
470 MemoryDumpProviderInfoSet::iterator next_dump_provider,
471 const scoped_refptr<MemoryDumpSessionState>& session_state,
472 MemoryDumpCallback callback)
473 : process_memory_dump(session_state),
474 req_args(req_args),
475 next_dump_provider(next_dump_provider),
476 callback(callback),
477 task_runner(MessageLoop::current()->task_runner()) {
480 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
483 } // namespace trace_event
484 } // namespace base