1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/memory_dump_manager.h"
9 #include "base/atomic_sequence_num.h"
10 #include "base/compiler_specific.h"
11 #include "base/trace_event/memory_dump_provider.h"
12 #include "base/trace_event/memory_dump_session_state.h"
13 #include "base/trace_event/process_memory_dump.h"
14 #include "base/trace_event/trace_event_argument.h"
15 #include "build/build_config.h"
18 #include "base/trace_event/process_memory_totals_dump_provider.h"
21 #if defined(OS_LINUX) || defined(OS_ANDROID)
22 #include "base/trace_event/malloc_dump_provider.h"
23 #include "base/trace_event/process_memory_maps_dump_provider.h"
26 #if defined(OS_ANDROID)
27 #include "base/trace_event/java_heap_dump_provider_android.h"
31 #include "base/trace_event/winheap_dump_provider_win.h"
35 namespace trace_event
{
39 // TODO(primiano): this should be smarter and should do something similar to
40 // trace event synthetic delays.
41 const char kTraceCategory
[] = TRACE_DISABLED_BY_DEFAULT("memory-infra");
43 MemoryDumpManager
* g_instance_for_testing
= nullptr;
44 const int kDumpIntervalSeconds
= 2;
45 const int kTraceEventNumArgs
= 1;
46 const char* kTraceEventArgNames
[] = {"dumps"};
47 const unsigned char kTraceEventArgTypes
[] = {TRACE_VALUE_TYPE_CONVERTABLE
};
48 StaticAtomicSequenceNumber g_next_guid
;
50 const char* MemoryDumpTypeToString(const MemoryDumpType
& dump_type
) {
52 case MemoryDumpType::TASK_BEGIN
:
54 case MemoryDumpType::TASK_END
:
56 case MemoryDumpType::PERIODIC_INTERVAL
:
57 return "PERIODIC_INTERVAL";
58 case MemoryDumpType::EXPLICITLY_TRIGGERED
:
59 return "EXPLICITLY_TRIGGERED";
65 // Internal class used to hold details about ProcessMemoryDump requests for the
67 // TODO(primiano): In the upcoming CLs, ProcessMemoryDump will become async.
68 // and this class will be used to convey more details across PostTask()s.
69 class ProcessMemoryDumpHolder
70 : public RefCountedThreadSafe
<ProcessMemoryDumpHolder
> {
72 ProcessMemoryDumpHolder(
73 MemoryDumpRequestArgs req_args
,
74 const scoped_refptr
<MemoryDumpSessionState
>& session_state
,
75 MemoryDumpCallback callback
)
76 : process_memory_dump(session_state
),
79 task_runner(MessageLoop::current()->task_runner()),
80 num_pending_async_requests(0) {}
82 ProcessMemoryDump process_memory_dump
;
83 const MemoryDumpRequestArgs req_args
;
85 // Callback passed to the initial call to CreateProcessDump().
86 MemoryDumpCallback callback
;
88 // Thread on which FinalizeDumpAndAddToTrace() should be called, which is the
89 // same that invoked the initial CreateProcessDump().
90 const scoped_refptr
<SingleThreadTaskRunner
> task_runner
;
92 // Number of pending ContinueAsyncProcessDump() calls.
93 int num_pending_async_requests
;
96 friend class RefCountedThreadSafe
<ProcessMemoryDumpHolder
>;
97 virtual ~ProcessMemoryDumpHolder() {}
98 DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpHolder
);
101 void FinalizeDumpAndAddToTrace(
102 const scoped_refptr
<ProcessMemoryDumpHolder
>& pmd_holder
) {
103 DCHECK_EQ(0, pmd_holder
->num_pending_async_requests
);
105 if (!pmd_holder
->task_runner
->BelongsToCurrentThread()) {
106 pmd_holder
->task_runner
->PostTask(
107 FROM_HERE
, Bind(&FinalizeDumpAndAddToTrace
, pmd_holder
));
111 scoped_refptr
<ConvertableToTraceFormat
> event_value(new TracedValue());
112 pmd_holder
->process_memory_dump
.AsValueInto(
113 static_cast<TracedValue
*>(event_value
.get()));
114 const char* const event_name
=
115 MemoryDumpTypeToString(pmd_holder
->req_args
.dump_type
);
117 TRACE_EVENT_API_ADD_TRACE_EVENT(
118 TRACE_EVENT_PHASE_MEMORY_DUMP
,
119 TraceLog::GetCategoryGroupEnabled(kTraceCategory
), event_name
,
120 pmd_holder
->req_args
.dump_guid
, kTraceEventNumArgs
, kTraceEventArgNames
,
121 kTraceEventArgTypes
, nullptr /* arg_values */, &event_value
,
122 TRACE_EVENT_FLAG_HAS_ID
);
124 if (!pmd_holder
->callback
.is_null()) {
125 pmd_holder
->callback
.Run(pmd_holder
->req_args
.dump_guid
, true);
126 pmd_holder
->callback
.Reset();
130 void RequestPeriodicGlobalDump() {
131 MemoryDumpManager::GetInstance()->RequestGlobalDump(
132 MemoryDumpType::PERIODIC_INTERVAL
);
138 const char* const MemoryDumpManager::kTraceCategoryForTesting
= kTraceCategory
;
141 MemoryDumpManager
* MemoryDumpManager::GetInstance() {
142 if (g_instance_for_testing
)
143 return g_instance_for_testing
;
145 return Singleton
<MemoryDumpManager
,
146 LeakySingletonTraits
<MemoryDumpManager
>>::get();
150 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager
* instance
) {
152 instance
->skip_core_dumpers_auto_registration_for_testing_
= true;
153 g_instance_for_testing
= instance
;
156 MemoryDumpManager::MemoryDumpManager()
157 : delegate_(nullptr),
158 memory_tracing_enabled_(0),
159 skip_core_dumpers_auto_registration_for_testing_(false) {
160 g_next_guid
.GetNext(); // Make sure that first guid is not zero.
163 MemoryDumpManager::~MemoryDumpManager() {
164 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
167 void MemoryDumpManager::Initialize() {
168 TRACE_EVENT0(kTraceCategory
, "init"); // Add to trace-viewer category list.
169 trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this);
171 if (skip_core_dumpers_auto_registration_for_testing_
)
174 // Enable the core dump providers.
175 #if !defined(OS_NACL)
176 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance());
179 #if defined(OS_LINUX) || defined(OS_ANDROID)
180 RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance());
181 RegisterDumpProvider(MallocDumpProvider::GetInstance());
184 #if defined(OS_ANDROID)
185 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance());
189 RegisterDumpProvider(WinHeapDumpProvider::GetInstance());
193 void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate
* delegate
) {
194 AutoLock
lock(lock_
);
195 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate
*>(nullptr), delegate_
);
196 delegate_
= delegate
;
199 void MemoryDumpManager::RegisterDumpProvider(
200 MemoryDumpProvider
* mdp
,
201 const scoped_refptr
<SingleThreadTaskRunner
>& task_runner
) {
202 MemoryDumpProviderInfo
mdp_info(task_runner
);
203 AutoLock
lock(lock_
);
204 dump_providers_
.insert(std::make_pair(mdp
, mdp_info
));
207 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider
* mdp
) {
208 RegisterDumpProvider(mdp
, nullptr);
211 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider
* mdp
) {
212 AutoLock
lock(lock_
);
214 auto it
= dump_providers_
.find(mdp
);
215 if (it
== dump_providers_
.end())
218 const MemoryDumpProviderInfo
& mdp_info
= it
->second
;
219 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe
220 // only if the MDP has specified a thread affinity (via task_runner()) AND
221 // the unregistration happens on the same thread (so the MDP cannot unregister
222 // and OnMemoryDump() at the same time).
223 // Otherwise, it is not possible to guarantee that its unregistration is
224 // race-free. If you hit this DCHECK, your MDP has a bug.
226 subtle::NoBarrier_Load(&memory_tracing_enabled_
),
227 mdp_info
.task_runner
&& mdp_info
.task_runner
->BelongsToCurrentThread())
228 << "The MemoryDumpProvider attempted to unregister itself in a racy way. "
229 << " Please file a crbug.";
231 // Remove from the enabled providers list. This is to deal with the case that
232 // UnregisterDumpProvider is called while the trace is enabled.
233 dump_providers_
.erase(it
);
236 void MemoryDumpManager::RequestGlobalDump(
237 MemoryDumpType dump_type
,
238 const MemoryDumpCallback
& callback
) {
239 // Bail out immediately if tracing is not enabled at all.
240 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_
)))
244 TraceLog::GetInstance()->MangleEventId(g_next_guid
.GetNext());
246 // The delegate_ is supposed to be thread safe, immutable and long lived.
247 // No need to keep the lock after we ensure that a delegate has been set.
248 MemoryDumpManagerDelegate
* delegate
;
250 AutoLock
lock(lock_
);
251 delegate
= delegate_
;
255 // The delegate is in charge to coordinate the request among all the
256 // processes and call the CreateLocalDumpPoint on the local process.
257 MemoryDumpRequestArgs args
= {guid
, dump_type
};
258 delegate
->RequestGlobalMemoryDump(args
, callback
);
259 } else if (!callback
.is_null()) {
260 callback
.Run(guid
, false /* success */);
264 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type
) {
265 RequestGlobalDump(dump_type
, MemoryDumpCallback());
268 // Creates a memory dump for the current process and appends it to the trace.
269 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs
& args
,
270 const MemoryDumpCallback
& callback
) {
271 scoped_refptr
<ProcessMemoryDumpHolder
> pmd_holder(
272 new ProcessMemoryDumpHolder(args
, session_state_
, callback
));
273 ProcessMemoryDump
* pmd
= &pmd_holder
->process_memory_dump
;
274 bool did_any_provider_dump
= false;
276 // Iterate over the active dump providers and invoke OnMemoryDump(pmd).
277 // The MDM guarantees linearity (at most one MDP is active within one
278 // process) and thread-safety (MDM enforces the right locking when entering /
279 // leaving the MDP.OnMemoryDump() call). This is to simplify the clients'
281 // and not let the MDPs worry about locking.
282 // As regards thread affinity, depending on the MDP configuration (see
283 // memory_dump_provider.h), the OnMemoryDump() invocation can happen:
284 // - Synchronousy on the MDM thread, when MDP.task_runner() is not set.
285 // - Posted on MDP.task_runner(), when MDP.task_runner() is set.
287 AutoLock
lock(lock_
);
288 for (auto it
= dump_providers_
.begin(); it
!= dump_providers_
.end(); ++it
) {
289 MemoryDumpProvider
* mdp
= it
->first
;
290 MemoryDumpProviderInfo
* mdp_info
= &it
->second
;
291 if (mdp_info
->disabled
)
293 if (mdp_info
->task_runner
) {
294 // The OnMemoryDump() call must be posted.
295 bool did_post_async_task
= mdp_info
->task_runner
->PostTask(
296 FROM_HERE
, Bind(&MemoryDumpManager::ContinueAsyncProcessDump
,
297 Unretained(this), Unretained(mdp
), pmd_holder
));
298 // The thread underlying the TaskRunner might have gone away.
299 if (did_post_async_task
)
300 ++pmd_holder
->num_pending_async_requests
;
302 // Invoke the dump provider synchronously.
303 did_any_provider_dump
|= InvokeDumpProviderLocked(mdp
, pmd
);
308 // If at least one synchronous provider did dump and there are no pending
309 // asynchronous requests, add the dump to the trace and invoke the callback
310 // straight away (FinalizeDumpAndAddToTrace() takes care of the callback).
311 if (did_any_provider_dump
&& pmd_holder
->num_pending_async_requests
== 0)
312 FinalizeDumpAndAddToTrace(pmd_holder
);
315 // Invokes the MemoryDumpProvider.OnMemoryDump(), taking care of the fail-safe
316 // logic which disables the dumper when failing (crbug.com/461788).
317 bool MemoryDumpManager::InvokeDumpProviderLocked(MemoryDumpProvider
* mdp
,
318 ProcessMemoryDump
* pmd
) {
319 lock_
.AssertAcquired();
320 bool dump_successful
= mdp
->OnMemoryDump(pmd
);
321 if (!dump_successful
) {
322 LOG(ERROR
) << "The memory dumper failed, possibly due to sandboxing "
323 "(crbug.com/461788), disabling it for current process. Try "
324 "restarting chrome with the --no-sandbox switch.";
325 dump_providers_
.find(mdp
)->second
.disabled
= true;
327 return dump_successful
;
330 // This is posted to arbitrary threads as a continuation of CreateProcessDump(),
331 // when one or more MemoryDumpProvider(s) require the OnMemoryDump() call to
332 // happen on a different thread.
333 void MemoryDumpManager::ContinueAsyncProcessDump(
334 MemoryDumpProvider
* mdp
,
335 scoped_refptr
<ProcessMemoryDumpHolder
> pmd_holder
) {
336 bool should_finalize_dump
= false;
338 // The lock here is to guarantee that different asynchronous dumps on
339 // different threads are still serialized, so that the MemoryDumpProvider
340 // has a consistent view of the |pmd| argument passed.
341 AutoLock
lock(lock_
);
342 ProcessMemoryDump
* pmd
= &pmd_holder
->process_memory_dump
;
344 // Check if the MemoryDumpProvider is still there. It might have been
345 // destroyed and unregistered while hopping threads.
346 if (dump_providers_
.count(mdp
))
347 InvokeDumpProviderLocked(mdp
, pmd
);
349 // Finalize the dump appending it to the trace if this was the last
350 // asynchronous request pending.
351 --pmd_holder
->num_pending_async_requests
;
352 if (pmd_holder
->num_pending_async_requests
== 0)
353 should_finalize_dump
= true;
356 if (should_finalize_dump
)
357 FinalizeDumpAndAddToTrace(pmd_holder
);
360 void MemoryDumpManager::OnTraceLogEnabled() {
361 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter
362 // to figure out (and cache) which dumpers should be enabled or not.
363 // For the moment piggy back everything on the generic "memory" category.
365 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory
, &enabled
);
367 AutoLock
lock(lock_
);
369 // There is no point starting the tracing without a delegate.
370 if (!enabled
|| !delegate_
) {
371 // Disable all the providers.
372 for (auto it
= dump_providers_
.begin(); it
!= dump_providers_
.end(); ++it
)
373 it
->second
.disabled
= true;
377 session_state_
= new MemoryDumpSessionState();
378 for (auto it
= dump_providers_
.begin(); it
!= dump_providers_
.end(); ++it
)
379 it
->second
.disabled
= false;
381 subtle::NoBarrier_Store(&memory_tracing_enabled_
, 1);
383 if (delegate_
->IsCoordinatorProcess()) {
384 periodic_dump_timer_
.Start(FROM_HERE
,
385 TimeDelta::FromSeconds(kDumpIntervalSeconds
),
386 base::Bind(&RequestPeriodicGlobalDump
));
390 void MemoryDumpManager::OnTraceLogDisabled() {
391 AutoLock
lock(lock_
);
392 periodic_dump_timer_
.Stop();
393 subtle::NoBarrier_Store(&memory_tracing_enabled_
, 0);
394 session_state_
= nullptr;
397 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
398 const scoped_refptr
<SingleThreadTaskRunner
>& task_runner
)
399 : task_runner(task_runner
), disabled(false) {
401 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {
404 } // namespace trace_event