1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/trace_log.h"
10 #include "base/base_switches.h"
11 #include "base/bind.h"
12 #include "base/command_line.h"
13 #include "base/debug/leak_annotations.h"
14 #include "base/lazy_instance.h"
15 #include "base/location.h"
16 #include "base/memory/singleton.h"
17 #include "base/process/process_metrics.h"
18 #include "base/strings/string_split.h"
19 #include "base/strings/string_tokenizer.h"
20 #include "base/strings/stringprintf.h"
21 #include "base/sys_info.h"
22 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
23 #include "base/thread_task_runner_handle.h"
24 #include "base/threading/platform_thread.h"
25 #include "base/threading/thread_id_name_manager.h"
26 #include "base/threading/worker_pool.h"
27 #include "base/time/time.h"
28 #include "base/trace_event/memory_dump_manager.h"
29 #include "base/trace_event/memory_dump_provider.h"
30 #include "base/trace_event/process_memory_dump.h"
31 #include "base/trace_event/trace_buffer.h"
32 #include "base/trace_event/trace_event.h"
33 #include "base/trace_event/trace_event_synthetic_delay.h"
34 #include "base/trace_event/trace_log.h"
35 #include "base/trace_event/trace_sampling_thread.h"
38 #include "base/trace_event/trace_event_etw_export_win.h"
39 #include "base/trace_event/trace_event_win.h"
42 class DeleteTraceLogForTesting
{
44 static void Delete() {
45 Singleton
<base::trace_event::TraceLog
,
46 LeakySingletonTraits
<base::trace_event::TraceLog
>>::OnExit(0);
50 // The thread buckets for the sampling profiler.
51 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state
[3];
54 namespace trace_event
{
58 // Controls the number of trace events we will buffer in-memory
59 // before throwing them away.
60 const size_t kTraceBufferChunkSize
= TraceBufferChunk::kTraceBufferChunkSize
;
61 const size_t kTraceEventVectorBigBufferChunks
=
62 512000000 / kTraceBufferChunkSize
;
63 const size_t kTraceEventVectorBufferChunks
= 256000 / kTraceBufferChunkSize
;
64 const size_t kTraceEventRingBufferChunks
= kTraceEventVectorBufferChunks
/ 4;
65 // Can store results for 30 seconds with 1 ms sampling interval.
66 const size_t kMonitorTraceEventBufferChunks
= 30000 / kTraceBufferChunkSize
;
67 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
68 const size_t kEchoToConsoleTraceEventBufferChunks
= 256;
70 // The overhead of TraceEvent above this threshold will be reported in the
72 const int kOverheadReportThresholdInMicroseconds
= 50;
73 const size_t kTraceEventBufferSizeInBytes
= 100 * 1024;
74 const int kThreadFlushTimeoutMs
= 3000;
77 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
78 const char kEchoToConsoleCategoryFilter
[] = "-ipc,-task";
81 #define MAX_CATEGORY_GROUPS 100
83 // Parallel arrays g_category_groups and g_category_group_enabled are separate
84 // so that a pointer to a member of g_category_group_enabled can be easily
85 // converted to an index into g_category_groups. This allows macros to deal
86 // only with char enabled pointers from g_category_group_enabled, and we can
87 // convert internally to determine the category name from the char enabled
89 const char* g_category_groups
[MAX_CATEGORY_GROUPS
] = {
91 "tracing already shutdown",
92 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
94 // For reporting trace_event overhead. For thread local event buffers only.
95 "trace_event_overhead"};
97 // The enabled flag is char instead of bool so that the API can be used from C.
98 unsigned char g_category_group_enabled
[MAX_CATEGORY_GROUPS
] = {0};
99 // Indexes here have to match the g_category_groups array indexes above.
100 const int g_category_already_shutdown
= 1;
101 const int g_category_categories_exhausted
= 2;
102 const int g_category_metadata
= 3;
103 const int g_category_trace_event_overhead
= 4;
104 const int g_num_builtin_categories
= 5;
105 // Skip default categories.
106 base::subtle::AtomicWord g_category_index
= g_num_builtin_categories
;
108 // The name of the current thread. This is used to decide if the current
109 // thread name has changed. We combine all the seen thread names into the
110 // output name for the thread.
111 LazyInstance
<ThreadLocalPointer
<const char>>::Leaky g_current_thread_name
=
112 LAZY_INSTANCE_INITIALIZER
;
114 ThreadTicks
ThreadNow() {
115 return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
118 template <typename T
>
119 void InitializeMetadataEvent(TraceEvent
* trace_event
,
121 const char* metadata_name
,
122 const char* arg_name
,
128 unsigned char arg_type
;
129 unsigned long long arg_value
;
130 ::trace_event_internal::SetTraceValue(value
, &arg_type
, &arg_value
);
131 trace_event
->Initialize(
135 TRACE_EVENT_PHASE_METADATA
,
136 &g_category_group_enabled
[g_category_metadata
],
138 trace_event_internal::kNoId
, // id
139 trace_event_internal::kNoId
, // context_id
140 trace_event_internal::kNoId
, // bind_id
146 TRACE_EVENT_FLAG_NONE
);
149 class AutoThreadLocalBoolean
{
151 explicit AutoThreadLocalBoolean(ThreadLocalBoolean
* thread_local_boolean
)
152 : thread_local_boolean_(thread_local_boolean
) {
153 DCHECK(!thread_local_boolean_
->Get());
154 thread_local_boolean_
->Set(true);
156 ~AutoThreadLocalBoolean() { thread_local_boolean_
->Set(false); }
159 ThreadLocalBoolean
* thread_local_boolean_
;
160 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean
);
163 // Use this function instead of TraceEventHandle constructor to keep the
164 // overhead of ScopedTracer (trace_event.h) constructor minimum.
165 void MakeHandle(uint32 chunk_seq
,
168 TraceEventHandle
* handle
) {
170 DCHECK(chunk_index
< (1u << 16));
171 DCHECK(event_index
< (1u << 16));
172 handle
->chunk_seq
= chunk_seq
;
173 handle
->chunk_index
= static_cast<uint16
>(chunk_index
);
174 handle
->event_index
= static_cast<uint16
>(event_index
);
179 // A helper class that allows the lock to be acquired in the middle of the scope
180 // and unlocks at the end of scope if locked.
181 class TraceLog::OptionalAutoLock
{
183 explicit OptionalAutoLock(Lock
* lock
) : lock_(lock
), locked_(false) {}
185 ~OptionalAutoLock() {
190 void EnsureAcquired() {
200 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock
);
203 class TraceLog::ThreadLocalEventBuffer
204 : public MessageLoop::DestructionObserver
,
205 public MemoryDumpProvider
{
207 ThreadLocalEventBuffer(TraceLog
* trace_log
);
208 ~ThreadLocalEventBuffer() override
;
210 TraceEvent
* AddTraceEvent(TraceEventHandle
* handle
);
212 void ReportOverhead(const TraceTicks
& event_timestamp
,
213 const ThreadTicks
& event_thread_timestamp
);
215 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) {
216 if (!chunk_
|| handle
.chunk_seq
!= chunk_
->seq() ||
217 handle
.chunk_index
!= chunk_index_
)
220 return chunk_
->GetEventAt(handle
.event_index
);
223 int generation() const { return generation_
; }
226 // MessageLoop::DestructionObserver
227 void WillDestroyCurrentMessageLoop() override
;
229 // MemoryDumpProvider implementation.
230 bool OnMemoryDump(ProcessMemoryDump
* pmd
) override
;
232 void FlushWhileLocked();
234 void CheckThisIsCurrentBuffer() const {
235 DCHECK(trace_log_
->thread_local_event_buffer_
.Get() == this);
238 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
239 // as long as the thread exists.
240 TraceLog
* trace_log_
;
241 scoped_ptr
<TraceBufferChunk
> chunk_
;
247 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer
);
250 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog
* trace_log
)
251 : trace_log_(trace_log
),
254 generation_(trace_log
->generation()) {
255 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
256 // the following message_loop won't be NULL.
257 MessageLoop
* message_loop
= MessageLoop::current();
258 message_loop
->AddDestructionObserver(this);
260 // This is to report the local memory usage when memory-infra is enabled.
261 MemoryDumpManager::GetInstance()->RegisterDumpProvider(
262 this, ThreadTaskRunnerHandle::Get());
264 AutoLock
lock(trace_log
->lock_
);
265 trace_log
->thread_message_loops_
.insert(message_loop
);
268 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
269 CheckThisIsCurrentBuffer();
270 MessageLoop::current()->RemoveDestructionObserver(this);
271 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
273 // Zero event_count_ happens in either of the following cases:
274 // - no event generated for the thread;
275 // - the thread has no message loop;
276 // - trace_event_overhead is disabled.
278 InitializeMetadataEvent(AddTraceEvent(NULL
),
279 static_cast<int>(base::PlatformThread::CurrentId()),
280 "overhead", "average_overhead",
281 overhead_
.InMillisecondsF() / event_count_
);
285 AutoLock
lock(trace_log_
->lock_
);
287 trace_log_
->thread_message_loops_
.erase(MessageLoop::current());
289 trace_log_
->thread_local_event_buffer_
.Set(NULL
);
292 TraceEvent
* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
293 TraceEventHandle
* handle
) {
294 CheckThisIsCurrentBuffer();
296 if (chunk_
&& chunk_
->IsFull()) {
297 AutoLock
lock(trace_log_
->lock_
);
302 AutoLock
lock(trace_log_
->lock_
);
303 chunk_
= trace_log_
->logged_events_
->GetChunk(&chunk_index_
);
304 trace_log_
->CheckIfBufferIsFullWhileLocked();
310 TraceEvent
* trace_event
= chunk_
->AddTraceEvent(&event_index
);
311 if (trace_event
&& handle
)
312 MakeHandle(chunk_
->seq(), chunk_index_
, event_index
, handle
);
317 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
318 const TraceTicks
& event_timestamp
,
319 const ThreadTicks
& event_thread_timestamp
) {
320 if (!g_category_group_enabled
[g_category_trace_event_overhead
])
323 CheckThisIsCurrentBuffer();
326 ThreadTicks thread_now
= ThreadNow();
327 TraceTicks now
= trace_log_
->OffsetNow();
328 TimeDelta overhead
= now
- event_timestamp
;
329 if (overhead
.InMicroseconds() >= kOverheadReportThresholdInMicroseconds
) {
330 TraceEvent
* trace_event
= AddTraceEvent(NULL
);
332 trace_event
->Initialize(
333 static_cast<int>(PlatformThread::CurrentId()),
335 event_thread_timestamp
,
336 TRACE_EVENT_PHASE_COMPLETE
,
337 &g_category_group_enabled
[g_category_trace_event_overhead
],
339 trace_event_internal::kNoId
, // id
340 trace_event_internal::kNoId
, // context_id
341 trace_event_internal::kNoId
, // bind_id
342 ::trace_event_internal::kZeroNumArgs
,
347 TRACE_EVENT_FLAG_NONE
);
348 trace_event
->UpdateDuration(now
, thread_now
);
351 overhead_
+= overhead
;
354 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
358 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(ProcessMemoryDump
* pmd
) {
361 std::string dump_base_name
= StringPrintf(
362 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
363 TraceEventMemoryOverhead overhead
;
364 chunk_
->EstimateTraceMemoryOverhead(&overhead
);
365 overhead
.DumpInto(dump_base_name
.c_str(), pmd
);
369 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
373 trace_log_
->lock_
.AssertAcquired();
374 if (trace_log_
->CheckGeneration(generation_
)) {
375 // Return the chunk to the buffer only if the generation matches.
376 trace_log_
->logged_events_
->ReturnChunk(chunk_index_
, chunk_
.Pass());
378 // Otherwise this method may be called from the destructor, or TraceLog will
379 // find the generation mismatch and delete this buffer soon.
382 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
384 TraceLogStatus::~TraceLogStatus() {}
387 TraceLog
* TraceLog::GetInstance() {
388 return Singleton
<TraceLog
, LeakySingletonTraits
<TraceLog
>>::get();
393 num_traces_recorded_(0),
395 dispatching_to_observer_list_(false),
396 process_sort_index_(0),
400 trace_options_(kInternalRecordUntilFull
),
401 sampling_thread_handle_(0),
402 trace_config_(TraceConfig()),
403 event_callback_trace_config_(TraceConfig()),
404 thread_shared_chunk_index_(0),
406 use_worker_thread_(false) {
407 // Trace is enabled or disabled on one thread while other threads are
408 // accessing the enabled flag. We don't care whether edge-case events are
409 // traced or not, so we allow races on the enabled flag to keep the trace
411 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
412 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
413 // sizeof(g_category_group_enabled),
414 // "trace_event category enabled");
415 for (int i
= 0; i
< MAX_CATEGORY_GROUPS
; ++i
) {
416 ANNOTATE_BENIGN_RACE(&g_category_group_enabled
[i
],
417 "trace_event category enabled");
419 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
422 SetProcessID(static_cast<int>(GetCurrentProcId()));
424 // NaCl also shouldn't access the command line.
425 if (CommandLine::InitializedForCurrentProcess() &&
426 CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole
)) {
427 std::string filter
= CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
428 switches::kTraceToConsole
);
429 if (filter
.empty()) {
430 filter
= kEchoToConsoleCategoryFilter
;
433 filter
.append(kEchoToConsoleCategoryFilter
);
436 LOG(ERROR
) << "Start " << switches::kTraceToConsole
437 << " with CategoryFilter '" << filter
<< "'.";
438 SetEnabled(TraceConfig(filter
, ECHO_TO_CONSOLE
), RECORDING_MODE
);
442 logged_events_
.reset(CreateTraceBuffer());
444 MemoryDumpManager::GetInstance()->RegisterDumpProvider(this);
447 TraceLog::~TraceLog() {}
449 void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
450 // A ThreadLocalEventBuffer needs the message loop
451 // - to know when the thread exits;
452 // - to handle the final flush.
453 // For a thread without a message loop or the message loop may be blocked, the
454 // trace events will be added into the main buffer directly.
455 if (thread_blocks_message_loop_
.Get() || !MessageLoop::current())
457 auto thread_local_event_buffer
= thread_local_event_buffer_
.Get();
458 if (thread_local_event_buffer
&&
459 !CheckGeneration(thread_local_event_buffer
->generation())) {
460 delete thread_local_event_buffer
;
461 thread_local_event_buffer
= NULL
;
463 if (!thread_local_event_buffer
) {
464 thread_local_event_buffer
= new ThreadLocalEventBuffer(this);
465 thread_local_event_buffer_
.Set(thread_local_event_buffer
);
469 bool TraceLog::OnMemoryDump(ProcessMemoryDump
* pmd
) {
470 TraceEventMemoryOverhead overhead
;
471 overhead
.Add("TraceLog", sizeof(*this));
473 AutoLock
lock(lock_
);
475 logged_events_
->EstimateTraceMemoryOverhead(&overhead
);
478 overhead
.DumpInto("tracing/main_trace_log", pmd
);
482 const unsigned char* TraceLog::GetCategoryGroupEnabled(
483 const char* category_group
) {
484 TraceLog
* tracelog
= GetInstance();
486 DCHECK(!g_category_group_enabled
[g_category_already_shutdown
]);
487 return &g_category_group_enabled
[g_category_already_shutdown
];
489 return tracelog
->GetCategoryGroupEnabledInternal(category_group
);
492 const char* TraceLog::GetCategoryGroupName(
493 const unsigned char* category_group_enabled
) {
494 // Calculate the index of the category group by finding
495 // category_group_enabled in g_category_group_enabled array.
496 uintptr_t category_begin
=
497 reinterpret_cast<uintptr_t>(g_category_group_enabled
);
498 uintptr_t category_ptr
= reinterpret_cast<uintptr_t>(category_group_enabled
);
499 DCHECK(category_ptr
>= category_begin
&&
500 category_ptr
< reinterpret_cast<uintptr_t>(g_category_group_enabled
+
501 MAX_CATEGORY_GROUPS
))
502 << "out of bounds category pointer";
503 uintptr_t category_index
=
504 (category_ptr
- category_begin
) / sizeof(g_category_group_enabled
[0]);
505 return g_category_groups
[category_index
];
508 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index
) {
509 unsigned char enabled_flag
= 0;
510 const char* category_group
= g_category_groups
[category_index
];
511 if (mode_
== RECORDING_MODE
&&
512 trace_config_
.IsCategoryGroupEnabled(category_group
))
513 enabled_flag
|= ENABLED_FOR_RECORDING
;
514 else if (mode_
== MONITORING_MODE
&&
515 trace_config_
.IsCategoryGroupEnabled(category_group
))
516 enabled_flag
|= ENABLED_FOR_MONITORING
;
517 if (event_callback_
&&
518 event_callback_trace_config_
.IsCategoryGroupEnabled(category_group
))
519 enabled_flag
|= ENABLED_FOR_EVENT_CALLBACK
;
521 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
523 enabled_flag
|= ENABLED_FOR_ETW_EXPORT
;
527 g_category_group_enabled
[category_index
] = enabled_flag
;
530 void TraceLog::UpdateCategoryGroupEnabledFlags() {
531 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
532 for (size_t i
= 0; i
< category_index
; i
++)
533 UpdateCategoryGroupEnabledFlag(i
);
536 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
537 ResetTraceEventSyntheticDelays();
538 const TraceConfig::StringList
& delays
=
539 trace_config_
.GetSyntheticDelayValues();
540 TraceConfig::StringList::const_iterator ci
;
541 for (ci
= delays
.begin(); ci
!= delays
.end(); ++ci
) {
542 StringTokenizer
tokens(*ci
, ";");
543 if (!tokens
.GetNext())
545 TraceEventSyntheticDelay
* delay
=
546 TraceEventSyntheticDelay::Lookup(tokens
.token());
547 while (tokens
.GetNext()) {
548 std::string token
= tokens
.token();
550 double target_duration
= strtod(token
.c_str(), &duration_end
);
551 if (duration_end
!= token
.c_str()) {
552 delay
->SetTargetDuration(TimeDelta::FromMicroseconds(
553 static_cast<int64
>(target_duration
* 1e6
)));
554 } else if (token
== "static") {
555 delay
->SetMode(TraceEventSyntheticDelay::STATIC
);
556 } else if (token
== "oneshot") {
557 delay
->SetMode(TraceEventSyntheticDelay::ONE_SHOT
);
558 } else if (token
== "alternating") {
559 delay
->SetMode(TraceEventSyntheticDelay::ALTERNATING
);
565 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
566 const char* category_group
) {
567 DCHECK(!strchr(category_group
, '"'))
568 << "Category groups may not contain double quote";
569 // The g_category_groups is append only, avoid using a lock for the fast path.
570 size_t current_category_index
= base::subtle::Acquire_Load(&g_category_index
);
572 // Search for pre-existing category group.
573 for (size_t i
= 0; i
< current_category_index
; ++i
) {
574 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
575 return &g_category_group_enabled
[i
];
579 unsigned char* category_group_enabled
= NULL
;
580 // This is the slow path: the lock is not held in the case above, so more
581 // than one thread could have reached here trying to add the same category.
582 // Only hold to lock when actually appending a new category, and
583 // check the categories groups again.
584 AutoLock
lock(lock_
);
585 size_t category_index
= base::subtle::Acquire_Load(&g_category_index
);
586 for (size_t i
= 0; i
< category_index
; ++i
) {
587 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
588 return &g_category_group_enabled
[i
];
592 // Create a new category group.
593 DCHECK(category_index
< MAX_CATEGORY_GROUPS
)
594 << "must increase MAX_CATEGORY_GROUPS";
595 if (category_index
< MAX_CATEGORY_GROUPS
) {
596 // Don't hold on to the category_group pointer, so that we can create
597 // category groups with strings not known at compile time (this is
598 // required by SetWatchEvent).
599 const char* new_group
= strdup(category_group
);
600 ANNOTATE_LEAKING_OBJECT_PTR(new_group
);
601 g_category_groups
[category_index
] = new_group
;
602 DCHECK(!g_category_group_enabled
[category_index
]);
603 // Note that if both included and excluded patterns in the
604 // TraceConfig are empty, we exclude nothing,
605 // thereby enabling this category group.
606 UpdateCategoryGroupEnabledFlag(category_index
);
607 category_group_enabled
= &g_category_group_enabled
[category_index
];
608 // Update the max index now.
609 base::subtle::Release_Store(&g_category_index
, category_index
+ 1);
611 category_group_enabled
=
612 &g_category_group_enabled
[g_category_categories_exhausted
];
614 return category_group_enabled
;
617 void TraceLog::GetKnownCategoryGroups(
618 std::vector
<std::string
>* category_groups
) {
619 AutoLock
lock(lock_
);
620 category_groups
->push_back(
621 g_category_groups
[g_category_trace_event_overhead
]);
622 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
623 for (size_t i
= g_num_builtin_categories
; i
< category_index
; i
++)
624 category_groups
->push_back(g_category_groups
[i
]);
627 void TraceLog::SetEnabled(const TraceConfig
& trace_config
, Mode mode
) {
628 std::vector
<EnabledStateObserver
*> observer_list
;
630 AutoLock
lock(lock_
);
632 // Can't enable tracing when Flush() is in progress.
633 DCHECK(!flush_task_runner_
);
635 InternalTraceOptions new_options
=
636 GetInternalOptionsFromTraceConfig(trace_config
);
638 InternalTraceOptions old_options
= trace_options();
641 if (new_options
!= old_options
) {
642 DLOG(ERROR
) << "Attempting to re-enable tracing with a different "
643 << "set of options.";
647 DLOG(ERROR
) << "Attempting to re-enable tracing with a different mode.";
650 trace_config_
.Merge(trace_config
);
651 UpdateCategoryGroupEnabledFlags();
655 if (dispatching_to_observer_list_
) {
657 << "Cannot manipulate TraceLog::Enabled state from an observer.";
663 if (new_options
!= old_options
) {
664 subtle::NoBarrier_Store(&trace_options_
, new_options
);
665 UseNextTraceBuffer();
668 num_traces_recorded_
++;
670 trace_config_
= TraceConfig(trace_config
);
671 UpdateCategoryGroupEnabledFlags();
672 UpdateSyntheticDelaysFromTraceConfig();
674 if (new_options
& kInternalEnableSampling
) {
675 sampling_thread_
.reset(new TraceSamplingThread
);
676 sampling_thread_
->RegisterSampleBucket(
677 &g_trace_state
[0], "bucket0",
678 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
679 sampling_thread_
->RegisterSampleBucket(
680 &g_trace_state
[1], "bucket1",
681 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
682 sampling_thread_
->RegisterSampleBucket(
683 &g_trace_state
[2], "bucket2",
684 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
685 if (!PlatformThread::Create(0, sampling_thread_
.get(),
686 &sampling_thread_handle_
)) {
687 DCHECK(false) << "failed to create thread";
691 dispatching_to_observer_list_
= true;
692 observer_list
= enabled_state_observer_list_
;
694 // Notify observers outside the lock in case they trigger trace events.
695 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
696 observer_list
[i
]->OnTraceLogEnabled();
699 AutoLock
lock(lock_
);
700 dispatching_to_observer_list_
= false;
704 void TraceLog::SetArgumentFilterPredicate(
705 const ArgumentFilterPredicate
& argument_filter_predicate
) {
706 AutoLock
lock(lock_
);
707 DCHECK(!argument_filter_predicate
.is_null());
708 DCHECK(argument_filter_predicate_
.is_null());
709 argument_filter_predicate_
= argument_filter_predicate
;
712 TraceLog::InternalTraceOptions
TraceLog::GetInternalOptionsFromTraceConfig(
713 const TraceConfig
& config
) {
714 InternalTraceOptions ret
=
715 config
.IsSamplingEnabled() ? kInternalEnableSampling
: kInternalNone
;
716 if (config
.IsArgumentFilterEnabled())
717 ret
|= kInternalEnableArgumentFilter
;
718 switch (config
.GetTraceRecordMode()) {
719 case RECORD_UNTIL_FULL
:
720 return ret
| kInternalRecordUntilFull
;
721 case RECORD_CONTINUOUSLY
:
722 return ret
| kInternalRecordContinuously
;
723 case ECHO_TO_CONSOLE
:
724 return ret
| kInternalEchoToConsole
;
725 case RECORD_AS_MUCH_AS_POSSIBLE
:
726 return ret
| kInternalRecordAsMuchAsPossible
;
729 return kInternalNone
;
732 TraceConfig
TraceLog::GetCurrentTraceConfig() const {
733 AutoLock
lock(lock_
);
734 return trace_config_
;
737 void TraceLog::SetDisabled() {
738 AutoLock
lock(lock_
);
739 SetDisabledWhileLocked();
742 void TraceLog::SetDisabledWhileLocked() {
743 lock_
.AssertAcquired();
748 if (dispatching_to_observer_list_
) {
750 << "Cannot manipulate TraceLog::Enabled state from an observer.";
756 if (sampling_thread_
.get()) {
757 // Stop the sampling thread.
758 sampling_thread_
->Stop();
760 PlatformThread::Join(sampling_thread_handle_
);
762 sampling_thread_handle_
= PlatformThreadHandle();
763 sampling_thread_
.reset();
766 trace_config_
.Clear();
767 subtle::NoBarrier_Store(&watch_category_
, 0);
768 watch_event_name_
= "";
769 UpdateCategoryGroupEnabledFlags();
770 AddMetadataEventsWhileLocked();
772 dispatching_to_observer_list_
= true;
773 std::vector
<EnabledStateObserver
*> observer_list
=
774 enabled_state_observer_list_
;
777 // Dispatch to observers outside the lock in case the observer triggers a
779 AutoUnlock
unlock(lock_
);
780 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
781 observer_list
[i
]->OnTraceLogDisabled();
783 dispatching_to_observer_list_
= false;
786 int TraceLog::GetNumTracesRecorded() {
787 AutoLock
lock(lock_
);
790 return num_traces_recorded_
;
793 void TraceLog::AddEnabledStateObserver(EnabledStateObserver
* listener
) {
794 enabled_state_observer_list_
.push_back(listener
);
797 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver
* listener
) {
798 std::vector
<EnabledStateObserver
*>::iterator it
=
799 std::find(enabled_state_observer_list_
.begin(),
800 enabled_state_observer_list_
.end(), listener
);
801 if (it
!= enabled_state_observer_list_
.end())
802 enabled_state_observer_list_
.erase(it
);
805 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver
* listener
) const {
806 std::vector
<EnabledStateObserver
*>::const_iterator it
=
807 std::find(enabled_state_observer_list_
.begin(),
808 enabled_state_observer_list_
.end(), listener
);
809 return it
!= enabled_state_observer_list_
.end();
812 TraceLogStatus
TraceLog::GetStatus() const {
813 AutoLock
lock(lock_
);
814 TraceLogStatus result
;
815 result
.event_capacity
= logged_events_
->Capacity();
816 result
.event_count
= logged_events_
->Size();
820 bool TraceLog::BufferIsFull() const {
821 AutoLock
lock(lock_
);
822 return logged_events_
->IsFull();
825 TraceEvent
* TraceLog::AddEventToThreadSharedChunkWhileLocked(
826 TraceEventHandle
* handle
,
827 bool check_buffer_is_full
) {
828 lock_
.AssertAcquired();
830 if (thread_shared_chunk_
&& thread_shared_chunk_
->IsFull()) {
831 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
832 thread_shared_chunk_
.Pass());
835 if (!thread_shared_chunk_
) {
836 thread_shared_chunk_
=
837 logged_events_
->GetChunk(&thread_shared_chunk_index_
);
838 if (check_buffer_is_full
)
839 CheckIfBufferIsFullWhileLocked();
841 if (!thread_shared_chunk_
)
845 TraceEvent
* trace_event
= thread_shared_chunk_
->AddTraceEvent(&event_index
);
846 if (trace_event
&& handle
) {
847 MakeHandle(thread_shared_chunk_
->seq(), thread_shared_chunk_index_
,
848 event_index
, handle
);
853 void TraceLog::CheckIfBufferIsFullWhileLocked() {
854 lock_
.AssertAcquired();
855 if (logged_events_
->IsFull()) {
856 if (buffer_limit_reached_timestamp_
.is_null()) {
857 buffer_limit_reached_timestamp_
= OffsetNow();
859 SetDisabledWhileLocked();
863 void TraceLog::SetEventCallbackEnabled(const TraceConfig
& trace_config
,
865 AutoLock
lock(lock_
);
866 subtle::NoBarrier_Store(&event_callback_
,
867 reinterpret_cast<subtle::AtomicWord
>(cb
));
868 event_callback_trace_config_
= trace_config
;
869 UpdateCategoryGroupEnabledFlags();
872 void TraceLog::SetEventCallbackDisabled() {
873 AutoLock
lock(lock_
);
874 subtle::NoBarrier_Store(&event_callback_
, 0);
875 UpdateCategoryGroupEnabledFlags();
878 // Flush() works as the following:
879 // 1. Flush() is called in thread A whose task runner is saved in
880 // flush_task_runner_;
881 // 2. If thread_message_loops_ is not empty, thread A posts task to each message
882 // loop to flush the thread local buffers; otherwise finish the flush;
883 // 3. FlushCurrentThread() deletes the thread local event buffer:
884 // - The last batch of events of the thread are flushed into the main buffer;
885 // - The message loop will be removed from thread_message_loops_;
886 // If this is the last message loop, finish the flush;
887 // 4. If any thread hasn't finish its flush in time, finish the flush.
888 void TraceLog::Flush(const TraceLog::OutputCallback
& cb
,
889 bool use_worker_thread
) {
890 FlushInternal(cb
, use_worker_thread
, false);
893 void TraceLog::CancelTracing(const OutputCallback
& cb
) {
895 FlushInternal(cb
, false, true);
898 void TraceLog::FlushInternal(const TraceLog::OutputCallback
& cb
,
899 bool use_worker_thread
,
900 bool discard_events
) {
901 use_worker_thread_
= use_worker_thread
;
903 // Can't flush when tracing is enabled because otherwise PostTask would
904 // - generate more trace events;
905 // - deschedule the calling thread on some platforms causing inaccurate
906 // timing of the trace events.
907 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
909 cb
.Run(empty_result
, false);
910 LOG(WARNING
) << "Ignored TraceLog::Flush called when tracing is enabled";
914 int generation
= this->generation();
915 // Copy of thread_message_loops_ to be used without locking.
916 std::vector
<scoped_refptr
<SingleThreadTaskRunner
>>
917 thread_message_loop_task_runners
;
919 AutoLock
lock(lock_
);
920 DCHECK(!flush_task_runner_
);
921 flush_task_runner_
= ThreadTaskRunnerHandle::IsSet()
922 ? ThreadTaskRunnerHandle::Get()
924 DCHECK_IMPLIES(thread_message_loops_
.size(), flush_task_runner_
);
925 flush_output_callback_
= cb
;
927 if (thread_shared_chunk_
) {
928 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
929 thread_shared_chunk_
.Pass());
932 if (thread_message_loops_
.size()) {
933 for (hash_set
<MessageLoop
*>::const_iterator it
=
934 thread_message_loops_
.begin();
935 it
!= thread_message_loops_
.end(); ++it
) {
936 thread_message_loop_task_runners
.push_back((*it
)->task_runner());
941 if (thread_message_loop_task_runners
.size()) {
942 for (size_t i
= 0; i
< thread_message_loop_task_runners
.size(); ++i
) {
943 thread_message_loop_task_runners
[i
]->PostTask(
944 FROM_HERE
, Bind(&TraceLog::FlushCurrentThread
, Unretained(this),
945 generation
, discard_events
));
947 flush_task_runner_
->PostDelayedTask(
948 FROM_HERE
, Bind(&TraceLog::OnFlushTimeout
, Unretained(this), generation
,
950 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs
));
954 FinishFlush(generation
, discard_events
);
957 // Usually it runs on a different thread.
958 void TraceLog::ConvertTraceEventsToTraceFormat(
959 scoped_ptr
<TraceBuffer
> logged_events
,
960 const OutputCallback
& flush_output_callback
,
961 const ArgumentFilterPredicate
& argument_filter_predicate
) {
962 if (flush_output_callback
.is_null())
965 // The callback need to be called at least once even if there is no events
966 // to let the caller know the completion of flush.
967 scoped_refptr
<RefCountedString
> json_events_str_ptr
= new RefCountedString();
968 while (const TraceBufferChunk
* chunk
= logged_events
->NextChunk()) {
969 for (size_t j
= 0; j
< chunk
->size(); ++j
) {
970 size_t size
= json_events_str_ptr
->size();
971 if (size
> kTraceEventBufferSizeInBytes
) {
972 flush_output_callback
.Run(json_events_str_ptr
, true);
973 json_events_str_ptr
= new RefCountedString();
975 json_events_str_ptr
->data().append(",\n");
977 chunk
->GetEventAt(j
)->AppendAsJSON(&(json_events_str_ptr
->data()),
978 argument_filter_predicate
);
981 flush_output_callback
.Run(json_events_str_ptr
, false);
984 void TraceLog::FinishFlush(int generation
, bool discard_events
) {
985 scoped_ptr
<TraceBuffer
> previous_logged_events
;
986 OutputCallback flush_output_callback
;
987 ArgumentFilterPredicate argument_filter_predicate
;
989 if (!CheckGeneration(generation
))
993 AutoLock
lock(lock_
);
995 previous_logged_events
.swap(logged_events_
);
996 UseNextTraceBuffer();
997 thread_message_loops_
.clear();
999 flush_task_runner_
= NULL
;
1000 flush_output_callback
= flush_output_callback_
;
1001 flush_output_callback_
.Reset();
1003 if (trace_options() & kInternalEnableArgumentFilter
) {
1004 CHECK(!argument_filter_predicate_
.is_null());
1005 argument_filter_predicate
= argument_filter_predicate_
;
1009 if (discard_events
) {
1010 if (!flush_output_callback
.is_null()) {
1011 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
1012 flush_output_callback
.Run(empty_result
, false);
1017 if (use_worker_thread_
&&
1018 WorkerPool::PostTask(
1019 FROM_HERE
, Bind(&TraceLog::ConvertTraceEventsToTraceFormat
,
1020 Passed(&previous_logged_events
),
1021 flush_output_callback
, argument_filter_predicate
),
1026 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1027 flush_output_callback
,
1028 argument_filter_predicate
);
1031 // Run in each thread holding a local event buffer.
1032 void TraceLog::FlushCurrentThread(int generation
, bool discard_events
) {
1034 AutoLock
lock(lock_
);
1035 if (!CheckGeneration(generation
) || !flush_task_runner_
) {
1036 // This is late. The corresponding flush has finished.
1041 // This will flush the thread local buffer.
1042 delete thread_local_event_buffer_
.Get();
1044 AutoLock
lock(lock_
);
1045 if (!CheckGeneration(generation
) || !flush_task_runner_
||
1046 thread_message_loops_
.size())
1049 flush_task_runner_
->PostTask(
1050 FROM_HERE
, Bind(&TraceLog::FinishFlush
, Unretained(this), generation
,
1054 void TraceLog::OnFlushTimeout(int generation
, bool discard_events
) {
1056 AutoLock
lock(lock_
);
1057 if (!CheckGeneration(generation
) || !flush_task_runner_
) {
1058 // Flush has finished before timeout.
1063 << "The following threads haven't finished flush in time. "
1064 "If this happens stably for some thread, please call "
1065 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1066 "the thread to avoid its trace events from being lost.";
1067 for (hash_set
<MessageLoop
*>::const_iterator it
=
1068 thread_message_loops_
.begin();
1069 it
!= thread_message_loops_
.end(); ++it
) {
1070 LOG(WARNING
) << "Thread: " << (*it
)->thread_name();
1073 FinishFlush(generation
, discard_events
);
1076 void TraceLog::FlushButLeaveBufferIntact(
1077 const TraceLog::OutputCallback
& flush_output_callback
) {
1078 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1079 ArgumentFilterPredicate argument_filter_predicate
;
1081 AutoLock
lock(lock_
);
1082 AddMetadataEventsWhileLocked();
1083 if (thread_shared_chunk_
) {
1084 // Return the chunk to the main buffer to flush the sampling data.
1085 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1086 thread_shared_chunk_
.Pass());
1088 previous_logged_events
= logged_events_
->CloneForIteration().Pass();
1090 if (trace_options() & kInternalEnableArgumentFilter
) {
1091 CHECK(!argument_filter_predicate_
.is_null());
1092 argument_filter_predicate
= argument_filter_predicate_
;
1096 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1097 flush_output_callback
,
1098 argument_filter_predicate
);
1101 void TraceLog::UseNextTraceBuffer() {
1102 logged_events_
.reset(CreateTraceBuffer());
1103 subtle::NoBarrier_AtomicIncrement(&generation_
, 1);
1104 thread_shared_chunk_
.reset();
1105 thread_shared_chunk_index_
= 0;
1108 TraceEventHandle
TraceLog::AddTraceEvent(
1110 const unsigned char* category_group_enabled
,
1112 unsigned long long id
,
1114 const char** arg_names
,
1115 const unsigned char* arg_types
,
1116 const unsigned long long* arg_values
,
1117 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1118 unsigned int flags
) {
1119 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1120 base::TraceTicks now
= base::TraceTicks::Now();
1121 return AddTraceEventWithThreadIdAndTimestamp(
1123 category_group_enabled
,
1126 trace_event_internal::kNoId
, // context_id
1127 trace_event_internal::kNoId
, // bind_id
1138 TraceEventHandle
TraceLog::AddTraceEventWithContextId(
1140 const unsigned char* category_group_enabled
,
1142 unsigned long long id
,
1143 unsigned long long context_id
,
1145 const char** arg_names
,
1146 const unsigned char* arg_types
,
1147 const unsigned long long* arg_values
,
1148 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1149 unsigned int flags
) {
1150 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1151 base::TraceTicks now
= base::TraceTicks::Now();
1152 return AddTraceEventWithThreadIdAndTimestamp(
1154 category_group_enabled
,
1158 trace_event_internal::kNoId
, // bind_id
1166 flags
| TRACE_EVENT_FLAG_HAS_CONTEXT_ID
);
1169 // Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp
1170 // with kNoId as bind_id
1171 TraceEventHandle
TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1173 const unsigned char* category_group_enabled
,
1175 unsigned long long id
,
1176 unsigned long long context_id
,
1178 const TraceTicks
& timestamp
,
1180 const char** arg_names
,
1181 const unsigned char* arg_types
,
1182 const unsigned long long* arg_values
,
1183 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1184 unsigned int flags
) {
1185 return AddTraceEventWithThreadIdAndTimestamp(
1187 category_group_enabled
,
1191 trace_event_internal::kNoId
, // bind_id
1202 TraceEventHandle
TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1204 const unsigned char* category_group_enabled
,
1206 unsigned long long id
,
1207 unsigned long long context_id
,
1208 unsigned long long bind_id
,
1210 const TraceTicks
& timestamp
,
1212 const char** arg_names
,
1213 const unsigned char* arg_types
,
1214 const unsigned long long* arg_values
,
1215 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1216 unsigned int flags
) {
1217 TraceEventHandle handle
= {0, 0, 0};
1218 if (!*category_group_enabled
)
1221 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1222 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1223 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1224 if (thread_is_in_trace_event_
.Get())
1227 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
1230 DCHECK(!timestamp
.is_null());
1232 if (flags
& TRACE_EVENT_FLAG_MANGLE_ID
) {
1233 if ((flags
& TRACE_EVENT_FLAG_FLOW_IN
) ||
1234 (flags
& TRACE_EVENT_FLAG_FLOW_OUT
))
1235 bind_id
= MangleEventId(bind_id
);
1236 id
= MangleEventId(id
);
1239 TraceTicks offset_event_timestamp
= OffsetTimestamp(timestamp
);
1240 TraceTicks now
= flags
& TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP
1242 : offset_event_timestamp
;
1243 ThreadTicks thread_now
= ThreadNow();
1245 // |thread_local_event_buffer_| can be null if the current thread doesn't have
1246 // a message loop or the message loop is blocked.
1247 InitializeThreadLocalEventBufferIfSupported();
1248 auto thread_local_event_buffer
= thread_local_event_buffer_
.Get();
1250 // Check and update the current thread name only if the event is for the
1251 // current thread to avoid locks in most cases.
1252 if (thread_id
== static_cast<int>(PlatformThread::CurrentId())) {
1253 const char* new_name
=
1254 ThreadIdNameManager::GetInstance()->GetName(thread_id
);
1255 // Check if the thread name has been set or changed since the previous
1256 // call (if any), but don't bother if the new name is empty. Note this will
1257 // not detect a thread name change within the same char* buffer address: we
1258 // favor common case performance over corner case correctness.
1259 if (new_name
!= g_current_thread_name
.Get().Get() && new_name
&&
1261 g_current_thread_name
.Get().Set(new_name
);
1263 AutoLock
thread_info_lock(thread_info_lock_
);
1265 hash_map
<int, std::string
>::iterator existing_name
=
1266 thread_names_
.find(thread_id
);
1267 if (existing_name
== thread_names_
.end()) {
1268 // This is a new thread id, and a new name.
1269 thread_names_
[thread_id
] = new_name
;
1271 // This is a thread id that we've seen before, but potentially with a
1273 std::vector
<StringPiece
> existing_names
= base::SplitStringPiece(
1274 existing_name
->second
, ",", base::KEEP_WHITESPACE
,
1275 base::SPLIT_WANT_NONEMPTY
);
1276 bool found
= std::find(existing_names
.begin(), existing_names
.end(),
1277 new_name
) != existing_names
.end();
1279 if (existing_names
.size())
1280 existing_name
->second
.push_back(',');
1281 existing_name
->second
.append(new_name
);
1288 // This is done sooner rather than later, to avoid creating the event and
1289 // acquiring the lock, which is not needed for ETW as it's already threadsafe.
1290 if (*category_group_enabled
& ENABLED_FOR_ETW_EXPORT
)
1291 TraceEventETWExport::AddEvent(phase
, category_group_enabled
, name
, id
,
1292 num_args
, arg_names
, arg_types
, arg_values
,
1293 convertable_values
);
1296 std::string console_message
;
1297 if (*category_group_enabled
&
1298 (ENABLED_FOR_RECORDING
| ENABLED_FOR_MONITORING
)) {
1299 OptionalAutoLock
lock(&lock_
);
1301 TraceEvent
* trace_event
= NULL
;
1302 if (thread_local_event_buffer
) {
1303 trace_event
= thread_local_event_buffer
->AddTraceEvent(&handle
);
1305 lock
.EnsureAcquired();
1306 trace_event
= AddEventToThreadSharedChunkWhileLocked(&handle
, true);
1310 trace_event
->Initialize(thread_id
,
1311 offset_event_timestamp
,
1314 category_group_enabled
,
1326 #if defined(OS_ANDROID)
1327 trace_event
->SendToATrace();
1331 if (trace_options() & kInternalEchoToConsole
) {
1332 console_message
= EventToConsoleMessage(
1333 phase
== TRACE_EVENT_PHASE_COMPLETE
? TRACE_EVENT_PHASE_BEGIN
: phase
,
1334 timestamp
, trace_event
);
1338 if (console_message
.size())
1339 LOG(ERROR
) << console_message
;
1341 if (reinterpret_cast<const unsigned char*>(
1342 subtle::NoBarrier_Load(&watch_category_
)) == category_group_enabled
) {
1343 bool event_name_matches
;
1344 WatchEventCallback watch_event_callback_copy
;
1346 AutoLock
lock(lock_
);
1347 event_name_matches
= watch_event_name_
== name
;
1348 watch_event_callback_copy
= watch_event_callback_
;
1350 if (event_name_matches
) {
1351 if (!watch_event_callback_copy
.is_null())
1352 watch_event_callback_copy
.Run();
1356 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
1357 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
1358 subtle::NoBarrier_Load(&event_callback_
));
1359 if (event_callback
) {
1361 offset_event_timestamp
,
1362 phase
== TRACE_EVENT_PHASE_COMPLETE
? TRACE_EVENT_PHASE_BEGIN
: phase
,
1363 category_group_enabled
, name
, id
, num_args
, arg_names
, arg_types
,
1368 if (thread_local_event_buffer
)
1369 thread_local_event_buffer
->ReportOverhead(now
, thread_now
);
1374 // May be called when a COMPELETE event ends and the unfinished event has been
1375 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
1376 std::string
TraceLog::EventToConsoleMessage(unsigned char phase
,
1377 const TraceTicks
& timestamp
,
1378 TraceEvent
* trace_event
) {
1379 AutoLock
thread_info_lock(thread_info_lock_
);
1381 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1382 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1383 DCHECK(phase
!= TRACE_EVENT_PHASE_COMPLETE
);
1387 trace_event
? trace_event
->thread_id() : PlatformThread::CurrentId();
1388 if (phase
== TRACE_EVENT_PHASE_END
) {
1389 duration
= timestamp
- thread_event_start_times_
[thread_id
].top();
1390 thread_event_start_times_
[thread_id
].pop();
1393 std::string thread_name
= thread_names_
[thread_id
];
1394 if (thread_colors_
.find(thread_name
) == thread_colors_
.end())
1395 thread_colors_
[thread_name
] = (thread_colors_
.size() % 6) + 1;
1397 std::ostringstream log
;
1398 log
<< base::StringPrintf("%s: \x1b[0;3%dm", thread_name
.c_str(),
1399 thread_colors_
[thread_name
]);
1402 if (thread_event_start_times_
.find(thread_id
) !=
1403 thread_event_start_times_
.end())
1404 depth
= thread_event_start_times_
[thread_id
].size();
1406 for (size_t i
= 0; i
< depth
; ++i
)
1410 trace_event
->AppendPrettyPrinted(&log
);
1411 if (phase
== TRACE_EVENT_PHASE_END
)
1412 log
<< base::StringPrintf(" (%.3f ms)", duration
.InMillisecondsF());
1416 if (phase
== TRACE_EVENT_PHASE_BEGIN
)
1417 thread_event_start_times_
[thread_id
].push(timestamp
);
1422 void TraceLog::AddTraceEventEtw(char phase
,
1425 const char* extra
) {
1427 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
1429 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
1430 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
1433 void TraceLog::AddTraceEventEtw(char phase
,
1436 const std::string
& extra
) {
1438 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
1440 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
1441 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
1444 void TraceLog::UpdateTraceEventDuration(
1445 const unsigned char* category_group_enabled
,
1447 TraceEventHandle handle
) {
1448 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1449 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1450 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1451 if (thread_is_in_trace_event_
.Get())
1454 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
1456 ThreadTicks thread_now
= ThreadNow();
1457 TraceTicks now
= OffsetNow();
1459 std::string console_message
;
1460 if (*category_group_enabled
& ENABLED_FOR_RECORDING
) {
1461 OptionalAutoLock
lock(&lock_
);
1463 TraceEvent
* trace_event
= GetEventByHandleInternal(handle
, &lock
);
1465 DCHECK(trace_event
->phase() == TRACE_EVENT_PHASE_COMPLETE
);
1466 trace_event
->UpdateDuration(now
, thread_now
);
1467 #if defined(OS_ANDROID)
1468 trace_event
->SendToATrace();
1472 if (trace_options() & kInternalEchoToConsole
) {
1474 EventToConsoleMessage(TRACE_EVENT_PHASE_END
, now
, trace_event
);
1478 if (console_message
.size())
1479 LOG(ERROR
) << console_message
;
1481 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
1482 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
1483 subtle::NoBarrier_Load(&event_callback_
));
1484 if (event_callback
) {
1485 event_callback(now
, TRACE_EVENT_PHASE_END
, category_group_enabled
, name
,
1486 trace_event_internal::kNoId
, 0,
1487 nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE
);
1492 void TraceLog::SetWatchEvent(const std::string
& category_name
,
1493 const std::string
& event_name
,
1494 const WatchEventCallback
& callback
) {
1495 const unsigned char* category
=
1496 GetCategoryGroupEnabled(category_name
.c_str());
1497 AutoLock
lock(lock_
);
1498 subtle::NoBarrier_Store(&watch_category_
,
1499 reinterpret_cast<subtle::AtomicWord
>(category
));
1500 watch_event_name_
= event_name
;
1501 watch_event_callback_
= callback
;
1504 void TraceLog::CancelWatchEvent() {
1505 AutoLock
lock(lock_
);
1506 subtle::NoBarrier_Store(&watch_category_
, 0);
1507 watch_event_name_
= "";
1508 watch_event_callback_
.Reset();
1511 uint64
TraceLog::MangleEventId(uint64 id
) {
1512 return id
^ process_id_hash_
;
1515 void TraceLog::AddMetadataEventsWhileLocked() {
1516 lock_
.AssertAcquired();
1518 #if !defined(OS_NACL) // NaCl shouldn't expose the process id.
1519 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1520 0, "num_cpus", "number",
1521 base::SysInfo::NumberOfProcessors());
1524 int current_thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1525 if (process_sort_index_
!= 0) {
1526 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1527 current_thread_id
, "process_sort_index",
1528 "sort_index", process_sort_index_
);
1531 if (process_name_
.size()) {
1532 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1533 current_thread_id
, "process_name", "name",
1537 if (process_labels_
.size() > 0) {
1538 std::vector
<std::string
> labels
;
1539 for (base::hash_map
<int, std::string
>::iterator it
=
1540 process_labels_
.begin();
1541 it
!= process_labels_
.end(); it
++) {
1542 labels
.push_back(it
->second
);
1544 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1545 current_thread_id
, "process_labels", "labels",
1546 base::JoinString(labels
, ","));
1549 // Thread sort indices.
1550 for (hash_map
<int, int>::iterator it
= thread_sort_indices_
.begin();
1551 it
!= thread_sort_indices_
.end(); it
++) {
1552 if (it
->second
== 0)
1554 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1555 it
->first
, "thread_sort_index", "sort_index",
1560 AutoLock
thread_info_lock(thread_info_lock_
);
1561 for (hash_map
<int, std::string
>::iterator it
= thread_names_
.begin();
1562 it
!= thread_names_
.end(); it
++) {
1563 if (it
->second
.empty())
1565 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1566 it
->first
, "thread_name", "name", it
->second
);
1569 // If buffer is full, add a metadata record to report this.
1570 if (!buffer_limit_reached_timestamp_
.is_null()) {
1571 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1572 current_thread_id
, "trace_buffer_overflowed",
1574 buffer_limit_reached_timestamp_
);
1578 void TraceLog::WaitSamplingEventForTesting() {
1579 if (!sampling_thread_
)
1581 sampling_thread_
->WaitSamplingEventForTesting();
1584 void TraceLog::DeleteForTesting() {
1585 DeleteTraceLogForTesting::Delete();
1588 TraceEvent
* TraceLog::GetEventByHandle(TraceEventHandle handle
) {
1589 return GetEventByHandleInternal(handle
, NULL
);
1592 TraceEvent
* TraceLog::GetEventByHandleInternal(TraceEventHandle handle
,
1593 OptionalAutoLock
* lock
) {
1594 if (!handle
.chunk_seq
)
1597 if (thread_local_event_buffer_
.Get()) {
1598 TraceEvent
* trace_event
=
1599 thread_local_event_buffer_
.Get()->GetEventByHandle(handle
);
1604 // The event has been out-of-control of the thread local buffer.
1605 // Try to get the event from the main buffer with a lock.
1607 lock
->EnsureAcquired();
1609 if (thread_shared_chunk_
&&
1610 handle
.chunk_index
== thread_shared_chunk_index_
) {
1611 return handle
.chunk_seq
== thread_shared_chunk_
->seq()
1612 ? thread_shared_chunk_
->GetEventAt(handle
.event_index
)
1616 return logged_events_
->GetEventByHandle(handle
);
1619 void TraceLog::SetProcessID(int process_id
) {
1620 process_id_
= process_id
;
1621 // Create a FNV hash from the process ID for XORing.
1622 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
1623 unsigned long long offset_basis
= 14695981039346656037ull;
1624 unsigned long long fnv_prime
= 1099511628211ull;
1625 unsigned long long pid
= static_cast<unsigned long long>(process_id_
);
1626 process_id_hash_
= (offset_basis
^ pid
) * fnv_prime
;
1629 void TraceLog::SetProcessSortIndex(int sort_index
) {
1630 AutoLock
lock(lock_
);
1631 process_sort_index_
= sort_index
;
1634 void TraceLog::SetProcessName(const std::string
& process_name
) {
1635 AutoLock
lock(lock_
);
1636 process_name_
= process_name
;
1639 void TraceLog::UpdateProcessLabel(int label_id
,
1640 const std::string
& current_label
) {
1641 if (!current_label
.length())
1642 return RemoveProcessLabel(label_id
);
1644 AutoLock
lock(lock_
);
1645 process_labels_
[label_id
] = current_label
;
1648 void TraceLog::RemoveProcessLabel(int label_id
) {
1649 AutoLock
lock(lock_
);
1650 base::hash_map
<int, std::string
>::iterator it
=
1651 process_labels_
.find(label_id
);
1652 if (it
== process_labels_
.end())
1655 process_labels_
.erase(it
);
1658 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id
, int sort_index
) {
1659 AutoLock
lock(lock_
);
1660 thread_sort_indices_
[static_cast<int>(thread_id
)] = sort_index
;
1663 void TraceLog::SetTimeOffset(TimeDelta offset
) {
1664 time_offset_
= offset
;
1667 size_t TraceLog::GetObserverCountForTest() const {
1668 return enabled_state_observer_list_
.size();
1671 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
1672 thread_blocks_message_loop_
.Set(true);
1673 if (thread_local_event_buffer_
.Get()) {
1674 // This will flush the thread local buffer.
1675 delete thread_local_event_buffer_
.Get();
1679 TraceBuffer
* TraceLog::CreateTraceBuffer() {
1680 InternalTraceOptions options
= trace_options();
1681 if (options
& kInternalRecordContinuously
)
1682 return TraceBuffer::CreateTraceBufferRingBuffer(
1683 kTraceEventRingBufferChunks
);
1684 else if ((options
& kInternalEnableSampling
) && mode_
== MONITORING_MODE
)
1685 return TraceBuffer::CreateTraceBufferRingBuffer(
1686 kMonitorTraceEventBufferChunks
);
1687 else if (options
& kInternalEchoToConsole
)
1688 return TraceBuffer::CreateTraceBufferRingBuffer(
1689 kEchoToConsoleTraceEventBufferChunks
);
1690 else if (options
& kInternalRecordAsMuchAsPossible
)
1691 return TraceBuffer::CreateTraceBufferVectorOfSize(
1692 kTraceEventVectorBigBufferChunks
);
1693 return TraceBuffer::CreateTraceBufferVectorOfSize(
1694 kTraceEventVectorBufferChunks
);
1697 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
1698 TraceEventMemoryOverhead
* overhead
) {
1699 overhead
->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this));
1702 } // namespace trace_event
1705 namespace trace_event_internal
{
1707 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
1708 const char* category_group
,
1710 // The single atom works because for now the category_group can only be "gpu".
1711 DCHECK_EQ(strcmp(category_group
, "gpu"), 0);
1712 static TRACE_EVENT_API_ATOMIC_WORD atomic
= 0;
1713 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
1714 category_group
, atomic
, category_group_enabled_
);
1716 if (*category_group_enabled_
) {
1718 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
1719 TRACE_EVENT_PHASE_COMPLETE
,
1720 category_group_enabled_
,
1722 trace_event_internal::kNoId
, // id
1723 trace_event_internal::kNoId
, // context_id
1724 static_cast<int>(base::PlatformThread::CurrentId()), // thread_id
1725 base::TraceTicks::Now(),
1726 trace_event_internal::kZeroNumArgs
,
1731 TRACE_EVENT_FLAG_NONE
);
1735 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
1736 if (*category_group_enabled_
) {
1737 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_
, name_
,
1742 } // namespace trace_event_internal