1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/memory_dump_manager.h"
9 #include "base/atomic_sequence_num.h"
10 #include "base/command_line.h"
11 #include "base/compiler_specific.h"
12 #include "base/thread_task_runner_handle.h"
13 #include "base/trace_event/memory_dump_provider.h"
14 #include "base/trace_event/memory_dump_session_state.h"
15 #include "base/trace_event/process_memory_dump.h"
16 #include "base/trace_event/trace_event_argument.h"
17 #include "build/build_config.h"
20 #include "base/trace_event/process_memory_totals_dump_provider.h"
23 #if defined(OS_LINUX) || defined(OS_ANDROID)
24 #include "base/trace_event/malloc_dump_provider.h"
25 #include "base/trace_event/process_memory_maps_dump_provider.h"
28 #if defined(OS_ANDROID)
29 #include "base/trace_event/java_heap_dump_provider_android.h"
33 #include "base/trace_event/winheap_dump_provider_win.h"
37 namespace trace_event
{
41 // TODO(primiano): this should be smarter and should do something similar to
42 // trace event synthetic delays.
43 const char kTraceCategory
[] = TRACE_DISABLED_BY_DEFAULT("memory-infra");
45 // Throttle mmaps at a rate of once every kHeavyMmapsDumpsRate standard dumps.
46 const int kHeavyMmapsDumpsRate
= 8; // 250 ms * 8 = 2000 ms.
47 const int kDumpIntervalMs
= 250;
48 const int kTraceEventNumArgs
= 1;
49 const char* kTraceEventArgNames
[] = {"dumps"};
50 const unsigned char kTraceEventArgTypes
[] = {TRACE_VALUE_TYPE_CONVERTABLE
};
52 StaticAtomicSequenceNumber g_next_guid
;
53 uint32 g_periodic_dumps_count
= 0;
54 MemoryDumpManager
* g_instance_for_testing
= nullptr;
55 MemoryDumpProvider
* g_mmaps_dump_provider
= nullptr;
57 void RequestPeriodicGlobalDump() {
58 MemoryDumpType dump_type
= g_periodic_dumps_count
== 0
59 ? MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS
60 : MemoryDumpType::PERIODIC_INTERVAL
;
61 if (++g_periodic_dumps_count
== kHeavyMmapsDumpsRate
)
62 g_periodic_dumps_count
= 0;
64 MemoryDumpManager::GetInstance()->RequestGlobalDump(dump_type
);
70 const char* const MemoryDumpManager::kTraceCategoryForTesting
= kTraceCategory
;
73 const int MemoryDumpManager::kMaxConsecutiveFailuresCount
= 3;
76 const uint64
MemoryDumpManager::kInvalidTracingProcessId
= 0;
79 MemoryDumpManager
* MemoryDumpManager::GetInstance() {
80 if (g_instance_for_testing
)
81 return g_instance_for_testing
;
83 return Singleton
<MemoryDumpManager
,
84 LeakySingletonTraits
<MemoryDumpManager
>>::get();
88 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager
* instance
) {
90 instance
->skip_core_dumpers_auto_registration_for_testing_
= true;
91 g_instance_for_testing
= instance
;
94 MemoryDumpManager::MemoryDumpManager()
95 : did_unregister_dump_provider_(false),
97 memory_tracing_enabled_(0),
98 tracing_process_id_(kInvalidTracingProcessId
),
99 system_allocator_pool_name_(nullptr),
100 skip_core_dumpers_auto_registration_for_testing_(false) {
101 g_next_guid
.GetNext(); // Make sure that first guid is not zero.
104 MemoryDumpManager::~MemoryDumpManager() {
105 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
108 void MemoryDumpManager::Initialize() {
109 TRACE_EVENT0(kTraceCategory
, "init"); // Add to trace-viewer category list.
110 trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this);
112 if (skip_core_dumpers_auto_registration_for_testing_
)
115 // Enable the core dump providers.
116 #if !defined(OS_NACL)
117 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance());
120 #if defined(OS_LINUX) || defined(OS_ANDROID)
121 g_mmaps_dump_provider
= ProcessMemoryMapsDumpProvider::GetInstance();
122 RegisterDumpProvider(g_mmaps_dump_provider
);
123 RegisterDumpProvider(MallocDumpProvider::GetInstance());
124 system_allocator_pool_name_
= MallocDumpProvider::kAllocatedObjects
;
127 #if defined(OS_ANDROID)
128 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance());
132 RegisterDumpProvider(WinHeapDumpProvider::GetInstance());
133 system_allocator_pool_name_
= WinHeapDumpProvider::kAllocatedObjects
;
137 void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate
* delegate
) {
138 AutoLock
lock(lock_
);
139 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate
*>(nullptr), delegate_
);
140 delegate_
= delegate
;
143 void MemoryDumpManager::RegisterDumpProvider(
144 MemoryDumpProvider
* mdp
,
145 const scoped_refptr
<SingleThreadTaskRunner
>& task_runner
) {
146 MemoryDumpProviderInfo
mdp_info(mdp
, task_runner
);
147 AutoLock
lock(lock_
);
148 dump_providers_
.insert(mdp_info
);
151 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider
* mdp
) {
152 RegisterDumpProvider(mdp
, nullptr);
155 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider
* mdp
) {
156 AutoLock
lock(lock_
);
158 auto mdp_iter
= dump_providers_
.begin();
159 for (; mdp_iter
!= dump_providers_
.end(); ++mdp_iter
) {
160 if (mdp_iter
->dump_provider
== mdp
)
164 if (mdp_iter
== dump_providers_
.end())
167 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe
168 // only if the MDP has specified a thread affinity (via task_runner()) AND
169 // the unregistration happens on the same thread (so the MDP cannot unregister
170 // and OnMemoryDump() at the same time).
171 // Otherwise, it is not possible to guarantee that its unregistration is
172 // race-free. If you hit this DCHECK, your MDP has a bug.
174 subtle::NoBarrier_Load(&memory_tracing_enabled_
),
175 mdp_iter
->task_runner
&& mdp_iter
->task_runner
->BelongsToCurrentThread())
176 << "The MemoryDumpProvider attempted to unregister itself in a racy way. "
177 << "Please file a crbug.";
179 dump_providers_
.erase(mdp_iter
);
180 did_unregister_dump_provider_
= true;
183 void MemoryDumpManager::RequestGlobalDump(
184 MemoryDumpType dump_type
,
185 const MemoryDumpCallback
& callback
) {
186 // Bail out immediately if tracing is not enabled at all.
187 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_
))) {
188 if (!callback
.is_null())
189 callback
.Run(0u /* guid */, false /* success */);
194 TraceLog::GetInstance()->MangleEventId(g_next_guid
.GetNext());
196 // The delegate_ is supposed to be thread safe, immutable and long lived.
197 // No need to keep the lock after we ensure that a delegate has been set.
198 MemoryDumpManagerDelegate
* delegate
;
200 AutoLock
lock(lock_
);
201 delegate
= delegate_
;
205 // The delegate is in charge to coordinate the request among all the
206 // processes and call the CreateLocalDumpPoint on the local process.
207 MemoryDumpRequestArgs args
= {guid
, dump_type
};
208 delegate
->RequestGlobalMemoryDump(args
, callback
);
209 } else if (!callback
.is_null()) {
210 callback
.Run(guid
, false /* success */);
214 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type
) {
215 RequestGlobalDump(dump_type
, MemoryDumpCallback());
218 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs
& args
,
219 const MemoryDumpCallback
& callback
) {
220 scoped_ptr
<ProcessMemoryDumpAsyncState
> pmd_async_state
;
222 AutoLock
lock(lock_
);
223 did_unregister_dump_provider_
= false;
224 pmd_async_state
.reset(new ProcessMemoryDumpAsyncState(
225 args
, dump_providers_
.begin(), session_state_
, callback
));
228 // Start the thread hop. |dump_providers_| are kept sorted by thread, so
229 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
230 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
231 ContinueAsyncProcessDump(pmd_async_state
.Pass());
234 // At most one ContinueAsyncProcessDump() can be active at any time for a given
235 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to
236 // ensure consistency w.r.t. (un)registrations of |dump_providers_|.
237 // The linearization of dump providers' OnMemoryDump invocations is achieved by
238 // means of subsequent PostTask(s).
241 // - Check if the dump provider is disabled, if so skip the dump.
242 // - Check if we are on the right thread. If not hop and continue there.
243 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped).
245 // - Unregister the dump provider if it failed too many times consecutively.
246 // - Advance the |next_dump_provider| iterator to the next dump provider.
247 // - If this was the last hop, create a trace event, add it to the trace
248 // and finalize (invoke callback).
250 void MemoryDumpManager::ContinueAsyncProcessDump(
251 scoped_ptr
<ProcessMemoryDumpAsyncState
> pmd_async_state
) {
252 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
253 // in the PostTask below don't end up registering their own dump providers
254 // (for discounting trace memory overhead) while holding the |lock_|.
255 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
257 // DO NOT put any LOG() statement in the locked sections, as in some contexts
258 // (GPU process) LOG() ends up performing PostTask/IPCs.
259 MemoryDumpProvider
* mdp
;
260 bool skip_dump
= false;
262 AutoLock
lock(lock_
);
263 // In the unlikely event that a dump provider was unregistered while
264 // dumping, abort the dump, as that would make |next_dump_provider| invalid.
265 // Registration, on the other hand, is safe as per std::set<> contract.
266 if (did_unregister_dump_provider_
) {
267 return AbortDumpLocked(pmd_async_state
->callback
,
268 pmd_async_state
->task_runner
,
269 pmd_async_state
->req_args
.dump_guid
);
272 auto* mdp_info
= &*pmd_async_state
->next_dump_provider
;
273 mdp
= mdp_info
->dump_provider
;
274 if (mdp_info
->disabled
) {
276 } else if (mdp
== g_mmaps_dump_provider
&&
277 pmd_async_state
->req_args
.dump_type
!=
278 MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS
&&
279 pmd_async_state
->req_args
.dump_type
!=
280 MemoryDumpType::EXPLICITLY_TRIGGERED
) {
281 // Mmaps dumping is very heavyweight and cannot be performed at the same
282 // rate of other dumps. TODO(primiano): this is a hack and should be
283 // cleaned up as part of crbug.com/499731.
285 } else if (mdp_info
->task_runner
&&
286 !mdp_info
->task_runner
->BelongsToCurrentThread()) {
287 // It's time to hop onto another thread.
289 // Copy the callback + arguments just for the unlikley case in which
290 // PostTask fails. In such case the Bind helper will destroy the
291 // pmd_async_state and we must keep a copy of the fields to notify the
293 MemoryDumpCallback callback
= pmd_async_state
->callback
;
294 scoped_refptr
<SingleThreadTaskRunner
> callback_task_runner
=
295 pmd_async_state
->task_runner
;
296 const uint64 dump_guid
= pmd_async_state
->req_args
.dump_guid
;
298 const bool did_post_task
= mdp_info
->task_runner
->PostTask(
299 FROM_HERE
, Bind(&MemoryDumpManager::ContinueAsyncProcessDump
,
300 Unretained(this), Passed(pmd_async_state
.Pass())));
304 // The thread is gone. At this point the best thing we can do is to
305 // disable the dump provider and abort this dump.
306 mdp_info
->disabled
= true;
307 return AbortDumpLocked(callback
, callback_task_runner
, dump_guid
);
311 // Invoke the dump provider without holding the |lock_|.
312 bool finalize
= false;
313 bool dump_successful
= false;
315 // TODO(ssid): Change RequestGlobalDump to use MemoryDumpArgs along with
316 // MemoryDumpType to get request for light / heavy dump, and remove this
319 MemoryDumpArgs dump_args
= {MemoryDumpArgs::LEVEL_OF_DETAIL_HIGH
};
321 mdp
->OnMemoryDump(dump_args
, &pmd_async_state
->process_memory_dump
);
325 AutoLock
lock(lock_
);
326 if (did_unregister_dump_provider_
) {
327 return AbortDumpLocked(pmd_async_state
->callback
,
328 pmd_async_state
->task_runner
,
329 pmd_async_state
->req_args
.dump_guid
);
331 auto* mdp_info
= &*pmd_async_state
->next_dump_provider
;
332 if (dump_successful
) {
333 mdp_info
->consecutive_failures
= 0;
334 } else if (!skip_dump
) {
335 ++mdp_info
->consecutive_failures
;
336 if (mdp_info
->consecutive_failures
>= kMaxConsecutiveFailuresCount
) {
337 mdp_info
->disabled
= true;
340 ++pmd_async_state
->next_dump_provider
;
341 finalize
= pmd_async_state
->next_dump_provider
== dump_providers_
.end();
344 if (!skip_dump
&& !dump_successful
) {
345 LOG(ERROR
) << "A memory dumper failed, possibly due to sandboxing "
346 "(crbug.com/461788). Disabling dumper for current process. "
347 "Try restarting chrome with the --no-sandbox switch.";
351 return FinalizeDumpAndAddToTrace(pmd_async_state
.Pass());
353 ContinueAsyncProcessDump(pmd_async_state
.Pass());
357 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
358 scoped_ptr
<ProcessMemoryDumpAsyncState
> pmd_async_state
) {
359 if (!pmd_async_state
->task_runner
->BelongsToCurrentThread()) {
360 scoped_refptr
<SingleThreadTaskRunner
> task_runner
=
361 pmd_async_state
->task_runner
;
362 task_runner
->PostTask(FROM_HERE
,
363 Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace
,
364 Passed(pmd_async_state
.Pass())));
368 scoped_refptr
<ConvertableToTraceFormat
> event_value(new TracedValue());
369 pmd_async_state
->process_memory_dump
.AsValueInto(
370 static_cast<TracedValue
*>(event_value
.get()));
371 const char* const event_name
=
372 MemoryDumpTypeToString(pmd_async_state
->req_args
.dump_type
);
374 TRACE_EVENT_API_ADD_TRACE_EVENT(
375 TRACE_EVENT_PHASE_MEMORY_DUMP
,
376 TraceLog::GetCategoryGroupEnabled(kTraceCategory
), event_name
,
377 pmd_async_state
->req_args
.dump_guid
, kTraceEventNumArgs
,
378 kTraceEventArgNames
, kTraceEventArgTypes
, nullptr /* arg_values */,
379 &event_value
, TRACE_EVENT_FLAG_HAS_ID
);
381 if (!pmd_async_state
->callback
.is_null()) {
382 pmd_async_state
->callback
.Run(pmd_async_state
->req_args
.dump_guid
,
384 pmd_async_state
->callback
.Reset();
389 void MemoryDumpManager::AbortDumpLocked(
390 MemoryDumpCallback callback
,
391 scoped_refptr
<SingleThreadTaskRunner
> task_runner
,
393 if (callback
.is_null())
394 return; // There is nothing to NACK.
396 // Post the callback even if we are already on the right thread to avoid
397 // invoking the callback while holding the lock_.
398 task_runner
->PostTask(FROM_HERE
,
399 Bind(callback
, dump_guid
, false /* success */));
402 void MemoryDumpManager::OnTraceLogEnabled() {
403 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter
404 // to figure out (and cache) which dumpers should be enabled or not.
405 // For the moment piggy back everything on the generic "memory" category.
407 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory
, &enabled
);
409 // Initialize the TraceLog for the current thread. This is to avoid that the
410 // TraceLog memory dump provider is registered lazily in the PostTask() below
411 // while the |lock_| is taken;
412 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
414 AutoLock
lock(lock_
);
416 // There is no point starting the tracing without a delegate.
417 if (!enabled
|| !delegate_
) {
418 // Disable all the providers.
419 for (auto it
= dump_providers_
.begin(); it
!= dump_providers_
.end(); ++it
)
424 session_state_
= new MemoryDumpSessionState();
425 for (auto it
= dump_providers_
.begin(); it
!= dump_providers_
.end(); ++it
) {
426 it
->disabled
= false;
427 it
->consecutive_failures
= 0;
430 subtle::NoBarrier_Store(&memory_tracing_enabled_
, 1);
432 // TODO(primiano): This is a temporary hack to disable periodic memory dumps
433 // when running memory benchmarks until they can be enabled/disabled in
434 // base::trace_event::TraceConfig. See https://goo.gl/5Hj3o0.
435 if (delegate_
->IsCoordinatorProcess() &&
436 !CommandLine::ForCurrentProcess()->HasSwitch(
437 "enable-memory-benchmarking")) {
438 g_periodic_dumps_count
= 0;
439 periodic_dump_timer_
.Start(FROM_HERE
,
440 TimeDelta::FromMilliseconds(kDumpIntervalMs
),
441 base::Bind(&RequestPeriodicGlobalDump
));
445 void MemoryDumpManager::OnTraceLogDisabled() {
446 AutoLock
lock(lock_
);
447 periodic_dump_timer_
.Stop();
448 subtle::NoBarrier_Store(&memory_tracing_enabled_
, 0);
449 session_state_
= nullptr;
452 uint64
MemoryDumpManager::GetTracingProcessId() const {
453 return delegate_
->GetTracingProcessId();
456 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
457 MemoryDumpProvider
* dump_provider
,
458 const scoped_refptr
<SingleThreadTaskRunner
>& task_runner
)
459 : dump_provider(dump_provider
),
460 task_runner(task_runner
),
461 consecutive_failures(0),
465 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {
468 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<(
469 const MemoryDumpProviderInfo
& other
) const {
470 if (task_runner
== other
.task_runner
)
471 return dump_provider
< other
.dump_provider
;
472 return task_runner
< other
.task_runner
;
475 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
476 MemoryDumpRequestArgs req_args
,
477 MemoryDumpProviderInfoSet::iterator next_dump_provider
,
478 const scoped_refptr
<MemoryDumpSessionState
>& session_state
,
479 MemoryDumpCallback callback
)
480 : process_memory_dump(session_state
),
482 next_dump_provider(next_dump_provider
),
484 task_runner(MessageLoop::current()->task_runner()) {
487 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
490 } // namespace trace_event