Supervised user import: Listen for profile creation/deletion
[chromium-blink-merge.git] / base / trace_event / memory_dump_manager.cc
blobbf9bf5d716f09834dea1aa944abbac0e72033756
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/memory_dump_manager.h"
7 #include <algorithm>
9 #include "base/atomic_sequence_num.h"
10 #include "base/compiler_specific.h"
11 #include "base/trace_event/memory_dump_provider.h"
12 #include "base/trace_event/memory_dump_session_state.h"
13 #include "base/trace_event/process_memory_dump.h"
14 #include "base/trace_event/trace_event_argument.h"
16 #if defined(OS_LINUX) || defined(OS_ANDROID)
17 #include "base/trace_event/malloc_dump_provider.h"
18 #include "base/trace_event/process_memory_maps_dump_provider.h"
19 #include "base/trace_event/process_memory_totals_dump_provider.h"
20 #elif defined(OS_WIN)
21 #include "base/trace_event/winheap_dump_provider_win.h"
22 #endif
24 namespace base {
25 namespace trace_event {
27 namespace {
29 // TODO(primiano): this should be smarter and should do something similar to
30 // trace event synthetic delays.
31 const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("memory-infra");
33 MemoryDumpManager* g_instance_for_testing = nullptr;
34 const int kDumpIntervalSeconds = 2;
35 const int kTraceEventNumArgs = 1;
36 const char* kTraceEventArgNames[] = {"dumps"};
37 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
38 StaticAtomicSequenceNumber g_next_guid;
40 const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
41 switch (dump_type) {
42 case MemoryDumpType::TASK_BEGIN:
43 return "TASK_BEGIN";
44 case MemoryDumpType::TASK_END:
45 return "TASK_END";
46 case MemoryDumpType::PERIODIC_INTERVAL:
47 return "PERIODIC_INTERVAL";
48 case MemoryDumpType::EXPLICITLY_TRIGGERED:
49 return "EXPLICITLY_TRIGGERED";
51 NOTREACHED();
52 return "UNKNOWN";
55 // Internal class used to hold details about ProcessMemoryDump requests for the
56 // current process.
57 // TODO(primiano): In the upcoming CLs, ProcessMemoryDump will become async.
58 // and this class will be used to convey more details across PostTask()s.
59 class ProcessMemoryDumpHolder
60 : public RefCountedThreadSafe<ProcessMemoryDumpHolder> {
61 public:
62 ProcessMemoryDumpHolder(
63 MemoryDumpRequestArgs req_args,
64 const scoped_refptr<MemoryDumpSessionState>& session_state,
65 MemoryDumpCallback callback)
66 : process_memory_dump(session_state),
67 req_args(req_args),
68 callback(callback),
69 task_runner(MessageLoop::current()->task_runner()),
70 num_pending_async_requests(0) {}
72 ProcessMemoryDump process_memory_dump;
73 const MemoryDumpRequestArgs req_args;
75 // Callback passed to the initial call to CreateProcessDump().
76 MemoryDumpCallback callback;
78 // Thread on which FinalizeDumpAndAddToTrace() should be called, which is the
79 // same that invoked the initial CreateProcessDump().
80 const scoped_refptr<SingleThreadTaskRunner> task_runner;
82 // Number of pending ContinueAsyncProcessDump() calls.
83 int num_pending_async_requests;
85 private:
86 friend class RefCountedThreadSafe<ProcessMemoryDumpHolder>;
87 virtual ~ProcessMemoryDumpHolder() {}
88 DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpHolder);
91 void FinalizeDumpAndAddToTrace(
92 const scoped_refptr<ProcessMemoryDumpHolder>& pmd_holder) {
93 DCHECK_EQ(0, pmd_holder->num_pending_async_requests);
95 if (!pmd_holder->task_runner->BelongsToCurrentThread()) {
96 pmd_holder->task_runner->PostTask(
97 FROM_HERE, Bind(&FinalizeDumpAndAddToTrace, pmd_holder));
98 return;
101 scoped_refptr<ConvertableToTraceFormat> event_value(new TracedValue());
102 pmd_holder->process_memory_dump.AsValueInto(
103 static_cast<TracedValue*>(event_value.get()));
104 const char* const event_name =
105 MemoryDumpTypeToString(pmd_holder->req_args.dump_type);
107 TRACE_EVENT_API_ADD_TRACE_EVENT(
108 TRACE_EVENT_PHASE_MEMORY_DUMP,
109 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
110 pmd_holder->req_args.dump_guid, kTraceEventNumArgs, kTraceEventArgNames,
111 kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
112 TRACE_EVENT_FLAG_HAS_ID);
114 if (!pmd_holder->callback.is_null()) {
115 pmd_holder->callback.Run(pmd_holder->req_args.dump_guid, true);
116 pmd_holder->callback.Reset();
120 void RequestPeriodicGlobalDump() {
121 MemoryDumpManager::GetInstance()->RequestGlobalDump(
122 MemoryDumpType::PERIODIC_INTERVAL);
125 } // namespace
127 // static
128 const char* const MemoryDumpManager::kTraceCategoryForTesting = kTraceCategory;
130 // static
131 MemoryDumpManager* MemoryDumpManager::GetInstance() {
132 if (g_instance_for_testing)
133 return g_instance_for_testing;
135 return Singleton<MemoryDumpManager,
136 LeakySingletonTraits<MemoryDumpManager>>::get();
139 // static
140 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
141 if (instance)
142 instance->skip_core_dumpers_auto_registration_for_testing_ = true;
143 g_instance_for_testing = instance;
146 MemoryDumpManager::MemoryDumpManager()
147 : delegate_(nullptr),
148 memory_tracing_enabled_(0),
149 skip_core_dumpers_auto_registration_for_testing_(false) {
150 g_next_guid.GetNext(); // Make sure that first guid is not zero.
153 MemoryDumpManager::~MemoryDumpManager() {
154 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
157 void MemoryDumpManager::Initialize() {
158 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
159 trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this);
161 if (skip_core_dumpers_auto_registration_for_testing_)
162 return;
164 #if defined(OS_LINUX) || defined(OS_ANDROID)
165 // Enable the core dump providers.
166 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance());
167 RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance());
168 RegisterDumpProvider(MallocDumpProvider::GetInstance());
169 #elif defined(OS_WIN)
170 RegisterDumpProvider(WinHeapDumpProvider::GetInstance());
171 #endif
174 void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate* delegate) {
175 AutoLock lock(lock_);
176 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_);
177 delegate_ = delegate;
180 void MemoryDumpManager::RegisterDumpProvider(
181 MemoryDumpProvider* mdp,
182 const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
183 MemoryDumpProviderInfo mdp_info(task_runner);
184 AutoLock lock(lock_);
185 dump_providers_.insert(std::make_pair(mdp, mdp_info));
188 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) {
189 RegisterDumpProvider(mdp, nullptr);
192 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
193 AutoLock lock(lock_);
195 auto it = dump_providers_.find(mdp);
196 if (it == dump_providers_.end())
197 return;
199 const MemoryDumpProviderInfo& mdp_info = it->second;
200 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe
201 // only if the MDP has specified a thread affinity (via task_runner()) AND
202 // the unregistration happens on the same thread (so the MDP cannot unregister
203 // and OnMemoryDump() at the same time).
204 // Otherwise, it is not possible to guarantee that its unregistration is
205 // race-free. If you hit this DCHECK, your MDP has a bug.
206 DCHECK_IMPLIES(
207 subtle::NoBarrier_Load(&memory_tracing_enabled_),
208 mdp_info.task_runner && mdp_info.task_runner->BelongsToCurrentThread())
209 << "The MemoryDumpProvider attempted to unregister itself in a racy way. "
210 << " Please file a crbug.";
212 // Remove from the enabled providers list. This is to deal with the case that
213 // UnregisterDumpProvider is called while the trace is enabled.
214 dump_providers_.erase(it);
217 void MemoryDumpManager::RequestGlobalDump(
218 MemoryDumpType dump_type,
219 const MemoryDumpCallback& callback) {
220 // Bail out immediately if tracing is not enabled at all.
221 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)))
222 return;
224 const uint64 guid =
225 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext());
227 // The delegate_ is supposed to be thread safe, immutable and long lived.
228 // No need to keep the lock after we ensure that a delegate has been set.
229 MemoryDumpManagerDelegate* delegate;
231 AutoLock lock(lock_);
232 delegate = delegate_;
235 if (delegate) {
236 // The delegate is in charge to coordinate the request among all the
237 // processes and call the CreateLocalDumpPoint on the local process.
238 MemoryDumpRequestArgs args = {guid, dump_type};
239 delegate->RequestGlobalMemoryDump(args, callback);
240 } else if (!callback.is_null()) {
241 callback.Run(guid, false /* success */);
245 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type) {
246 RequestGlobalDump(dump_type, MemoryDumpCallback());
249 // Creates a memory dump for the current process and appends it to the trace.
250 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
251 const MemoryDumpCallback& callback) {
252 scoped_refptr<ProcessMemoryDumpHolder> pmd_holder(
253 new ProcessMemoryDumpHolder(args, session_state_, callback));
254 ProcessMemoryDump* pmd = &pmd_holder->process_memory_dump;
255 bool did_any_provider_dump = false;
257 // Iterate over the active dump providers and invoke OnMemoryDump(pmd).
258 // The MDM guarantees linearity (at most one MDP is active within one
259 // process) and thread-safety (MDM enforces the right locking when entering /
260 // leaving the MDP.OnMemoryDump() call). This is to simplify the clients'
261 // design
262 // and not let the MDPs worry about locking.
263 // As regards thread affinity, depending on the MDP configuration (see
264 // memory_dump_provider.h), the OnMemoryDump() invocation can happen:
265 // - Synchronousy on the MDM thread, when MDP.task_runner() is not set.
266 // - Posted on MDP.task_runner(), when MDP.task_runner() is set.
268 AutoLock lock(lock_);
269 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) {
270 MemoryDumpProvider* mdp = it->first;
271 MemoryDumpProviderInfo* mdp_info = &it->second;
272 if (mdp_info->disabled)
273 continue;
274 if (mdp_info->task_runner) {
275 // The OnMemoryDump() call must be posted.
276 bool did_post_async_task = mdp_info->task_runner->PostTask(
277 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
278 Unretained(this), Unretained(mdp), pmd_holder));
279 // The thread underlying the TaskRunner might have gone away.
280 if (did_post_async_task)
281 ++pmd_holder->num_pending_async_requests;
282 } else {
283 // Invoke the dump provider synchronously.
284 did_any_provider_dump |= InvokeDumpProviderLocked(mdp, pmd);
287 } // AutoLock
289 // If at least one synchronous provider did dump and there are no pending
290 // asynchronous requests, add the dump to the trace and invoke the callback
291 // straight away (FinalizeDumpAndAddToTrace() takes care of the callback).
292 if (did_any_provider_dump && pmd_holder->num_pending_async_requests == 0)
293 FinalizeDumpAndAddToTrace(pmd_holder);
296 // Invokes the MemoryDumpProvider.OnMemoryDump(), taking care of the fail-safe
297 // logic which disables the dumper when failing (crbug.com/461788).
298 bool MemoryDumpManager::InvokeDumpProviderLocked(MemoryDumpProvider* mdp,
299 ProcessMemoryDump* pmd) {
300 lock_.AssertAcquired();
301 bool dump_successful = mdp->OnMemoryDump(pmd);
302 if (!dump_successful) {
303 LOG(ERROR) << "The memory dumper failed, possibly due to sandboxing "
304 "(crbug.com/461788), disabling it for current process. Try "
305 "restarting chrome with the --no-sandbox switch.";
306 dump_providers_.find(mdp)->second.disabled = true;
308 return dump_successful;
311 // This is posted to arbitrary threads as a continuation of CreateProcessDump(),
312 // when one or more MemoryDumpProvider(s) require the OnMemoryDump() call to
313 // happen on a different thread.
314 void MemoryDumpManager::ContinueAsyncProcessDump(
315 MemoryDumpProvider* mdp,
316 scoped_refptr<ProcessMemoryDumpHolder> pmd_holder) {
317 bool should_finalize_dump = false;
319 // The lock here is to guarantee that different asynchronous dumps on
320 // different threads are still serialized, so that the MemoryDumpProvider
321 // has a consistent view of the |pmd| argument passed.
322 AutoLock lock(lock_);
323 ProcessMemoryDump* pmd = &pmd_holder->process_memory_dump;
325 // Check if the MemoryDumpProvider is still there. It might have been
326 // destroyed and unregistered while hopping threads.
327 if (dump_providers_.count(mdp))
328 InvokeDumpProviderLocked(mdp, pmd);
330 // Finalize the dump appending it to the trace if this was the last
331 // asynchronous request pending.
332 --pmd_holder->num_pending_async_requests;
333 if (pmd_holder->num_pending_async_requests == 0)
334 should_finalize_dump = true;
335 } // AutoLock(lock_)
337 if (should_finalize_dump)
338 FinalizeDumpAndAddToTrace(pmd_holder);
341 void MemoryDumpManager::OnTraceLogEnabled() {
342 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter
343 // to figure out (and cache) which dumpers should be enabled or not.
344 // For the moment piggy back everything on the generic "memory" category.
345 bool enabled;
346 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
348 AutoLock lock(lock_);
350 // There is no point starting the tracing without a delegate.
351 if (!enabled || !delegate_) {
352 // Disable all the providers.
353 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it)
354 it->second.disabled = true;
355 return;
358 session_state_ = new MemoryDumpSessionState();
359 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it)
360 it->second.disabled = false;
362 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
364 if (delegate_->IsCoordinatorProcess()) {
365 periodic_dump_timer_.Start(FROM_HERE,
366 TimeDelta::FromSeconds(kDumpIntervalSeconds),
367 base::Bind(&RequestPeriodicGlobalDump));
371 void MemoryDumpManager::OnTraceLogDisabled() {
372 AutoLock lock(lock_);
373 periodic_dump_timer_.Stop();
374 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
375 session_state_ = nullptr;
378 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
379 const scoped_refptr<SingleThreadTaskRunner>& task_runner)
380 : task_runner(task_runner), disabled(false) {
382 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {
385 } // namespace trace_event
386 } // namespace base