1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/trace_log.h"
10 #include "base/base_switches.h"
11 #include "base/bind.h"
12 #include "base/command_line.h"
13 #include "base/debug/leak_annotations.h"
14 #include "base/lazy_instance.h"
15 #include "base/location.h"
16 #include "base/memory/singleton.h"
17 #include "base/process/process_metrics.h"
18 #include "base/strings/string_split.h"
19 #include "base/strings/string_tokenizer.h"
20 #include "base/strings/stringprintf.h"
21 #include "base/sys_info.h"
22 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
23 #include "base/thread_task_runner_handle.h"
24 #include "base/threading/platform_thread.h"
25 #include "base/threading/thread_id_name_manager.h"
26 #include "base/threading/worker_pool.h"
27 #include "base/time/time.h"
28 #include "base/trace_event/memory_dump_manager.h"
29 #include "base/trace_event/memory_dump_provider.h"
30 #include "base/trace_event/process_memory_dump.h"
31 #include "base/trace_event/trace_buffer.h"
32 #include "base/trace_event/trace_event.h"
33 #include "base/trace_event/trace_event_synthetic_delay.h"
34 #include "base/trace_event/trace_log.h"
35 #include "base/trace_event/trace_sampling_thread.h"
38 #include "base/trace_event/trace_event_etw_export_win.h"
39 #include "base/trace_event/trace_event_win.h"
42 // The thread buckets for the sampling profiler.
43 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state
[3];
48 class DeleteTraceLogForTesting
{
50 static void Delete() {
51 Singleton
<trace_event::TraceLog
,
52 LeakySingletonTraits
<trace_event::TraceLog
>>::OnExit(0);
56 } // namespace internal
58 namespace trace_event
{
62 // Controls the number of trace events we will buffer in-memory
63 // before throwing them away.
64 const size_t kTraceBufferChunkSize
= TraceBufferChunk::kTraceBufferChunkSize
;
66 const size_t kTraceEventVectorBigBufferChunks
=
67 512000000 / kTraceBufferChunkSize
;
69 kTraceEventVectorBigBufferChunks
<= TraceBufferChunk::kMaxChunkIndex
,
70 "Too many big buffer chunks");
71 const size_t kTraceEventVectorBufferChunks
= 256000 / kTraceBufferChunkSize
;
73 kTraceEventVectorBufferChunks
<= TraceBufferChunk::kMaxChunkIndex
,
74 "Too many vector buffer chunks");
75 const size_t kTraceEventRingBufferChunks
= kTraceEventVectorBufferChunks
/ 4;
77 // Can store results for 30 seconds with 1 ms sampling interval.
78 const size_t kMonitorTraceEventBufferChunks
= 30000 / kTraceBufferChunkSize
;
79 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
80 const size_t kEchoToConsoleTraceEventBufferChunks
= 256;
82 const size_t kTraceEventBufferSizeInBytes
= 100 * 1024;
83 const int kThreadFlushTimeoutMs
= 3000;
86 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
87 const char kEchoToConsoleCategoryFilter
[] = "-ipc,-task";
90 #define MAX_CATEGORY_GROUPS 100
92 // Parallel arrays g_category_groups and g_category_group_enabled are separate
93 // so that a pointer to a member of g_category_group_enabled can be easily
94 // converted to an index into g_category_groups. This allows macros to deal
95 // only with char enabled pointers from g_category_group_enabled, and we can
96 // convert internally to determine the category name from the char enabled
98 const char* g_category_groups
[MAX_CATEGORY_GROUPS
] = {
100 "tracing already shutdown",
101 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
104 // The enabled flag is char instead of bool so that the API can be used from C.
105 unsigned char g_category_group_enabled
[MAX_CATEGORY_GROUPS
] = {0};
106 // Indexes here have to match the g_category_groups array indexes above.
107 const int g_category_already_shutdown
= 1;
108 const int g_category_categories_exhausted
= 2;
109 const int g_category_metadata
= 3;
110 const int g_num_builtin_categories
= 4;
111 // Skip default categories.
112 base::subtle::AtomicWord g_category_index
= g_num_builtin_categories
;
114 // The name of the current thread. This is used to decide if the current
115 // thread name has changed. We combine all the seen thread names into the
116 // output name for the thread.
117 LazyInstance
<ThreadLocalPointer
<const char>>::Leaky g_current_thread_name
=
118 LAZY_INSTANCE_INITIALIZER
;
120 ThreadTicks
ThreadNow() {
121 return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
124 template <typename T
>
125 void InitializeMetadataEvent(TraceEvent
* trace_event
,
127 const char* metadata_name
,
128 const char* arg_name
,
134 unsigned char arg_type
;
135 unsigned long long arg_value
;
136 ::trace_event_internal::SetTraceValue(value
, &arg_type
, &arg_value
);
137 trace_event
->Initialize(
141 TRACE_EVENT_PHASE_METADATA
,
142 &g_category_group_enabled
[g_category_metadata
],
144 trace_event_internal::kNoId
, // id
145 trace_event_internal::kNoId
, // context_id
146 trace_event_internal::kNoId
, // bind_id
152 TRACE_EVENT_FLAG_NONE
);
155 class AutoThreadLocalBoolean
{
157 explicit AutoThreadLocalBoolean(ThreadLocalBoolean
* thread_local_boolean
)
158 : thread_local_boolean_(thread_local_boolean
) {
159 DCHECK(!thread_local_boolean_
->Get());
160 thread_local_boolean_
->Set(true);
162 ~AutoThreadLocalBoolean() { thread_local_boolean_
->Set(false); }
165 ThreadLocalBoolean
* thread_local_boolean_
;
166 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean
);
169 // Use this function instead of TraceEventHandle constructor to keep the
170 // overhead of ScopedTracer (trace_event.h) constructor minimum.
171 void MakeHandle(uint32 chunk_seq
,
174 TraceEventHandle
* handle
) {
176 DCHECK(chunk_index
<= TraceBufferChunk::kMaxChunkIndex
);
177 DCHECK(event_index
< TraceBufferChunk::kTraceBufferChunkSize
);
178 handle
->chunk_seq
= chunk_seq
;
179 handle
->chunk_index
= static_cast<uint16
>(chunk_index
);
180 handle
->event_index
= static_cast<uint16
>(event_index
);
185 // A helper class that allows the lock to be acquired in the middle of the scope
186 // and unlocks at the end of scope if locked.
187 class TraceLog::OptionalAutoLock
{
189 explicit OptionalAutoLock(Lock
* lock
) : lock_(lock
), locked_(false) {}
191 ~OptionalAutoLock() {
196 void EnsureAcquired() {
206 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock
);
209 class TraceLog::ThreadLocalEventBuffer
210 : public MessageLoop::DestructionObserver
,
211 public MemoryDumpProvider
{
213 ThreadLocalEventBuffer(TraceLog
* trace_log
);
214 ~ThreadLocalEventBuffer() override
;
216 TraceEvent
* AddTraceEvent(TraceEventHandle
* handle
);
218 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) {
219 if (!chunk_
|| handle
.chunk_seq
!= chunk_
->seq() ||
220 handle
.chunk_index
!= chunk_index_
)
223 return chunk_
->GetEventAt(handle
.event_index
);
226 int generation() const { return generation_
; }
229 // MessageLoop::DestructionObserver
230 void WillDestroyCurrentMessageLoop() override
;
232 // MemoryDumpProvider implementation.
233 bool OnMemoryDump(const MemoryDumpArgs
& args
,
234 ProcessMemoryDump
* pmd
) override
;
236 void FlushWhileLocked();
238 void CheckThisIsCurrentBuffer() const {
239 DCHECK(trace_log_
->thread_local_event_buffer_
.Get() == this);
242 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
243 // as long as the thread exists.
244 TraceLog
* trace_log_
;
245 scoped_ptr
<TraceBufferChunk
> chunk_
;
249 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer
);
252 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog
* trace_log
)
253 : trace_log_(trace_log
),
255 generation_(trace_log
->generation()) {
256 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
257 // the following message_loop won't be NULL.
258 MessageLoop
* message_loop
= MessageLoop::current();
259 message_loop
->AddDestructionObserver(this);
261 // This is to report the local memory usage when memory-infra is enabled.
262 MemoryDumpManager::GetInstance()->RegisterDumpProvider(
263 this, ThreadTaskRunnerHandle::Get());
265 AutoLock
lock(trace_log
->lock_
);
266 trace_log
->thread_message_loops_
.insert(message_loop
);
269 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
270 CheckThisIsCurrentBuffer();
271 MessageLoop::current()->RemoveDestructionObserver(this);
272 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
275 AutoLock
lock(trace_log_
->lock_
);
277 trace_log_
->thread_message_loops_
.erase(MessageLoop::current());
279 trace_log_
->thread_local_event_buffer_
.Set(NULL
);
282 TraceEvent
* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
283 TraceEventHandle
* handle
) {
284 CheckThisIsCurrentBuffer();
286 if (chunk_
&& chunk_
->IsFull()) {
287 AutoLock
lock(trace_log_
->lock_
);
292 AutoLock
lock(trace_log_
->lock_
);
293 chunk_
= trace_log_
->logged_events_
->GetChunk(&chunk_index_
);
294 trace_log_
->CheckIfBufferIsFullWhileLocked();
300 TraceEvent
* trace_event
= chunk_
->AddTraceEvent(&event_index
);
301 if (trace_event
&& handle
)
302 MakeHandle(chunk_
->seq(), chunk_index_
, event_index
, handle
);
307 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
311 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs
& args
,
312 ProcessMemoryDump
* pmd
) {
315 std::string dump_base_name
= StringPrintf(
316 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
317 TraceEventMemoryOverhead overhead
;
318 chunk_
->EstimateTraceMemoryOverhead(&overhead
);
319 overhead
.DumpInto(dump_base_name
.c_str(), pmd
);
323 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
327 trace_log_
->lock_
.AssertAcquired();
328 if (trace_log_
->CheckGeneration(generation_
)) {
329 // Return the chunk to the buffer only if the generation matches.
330 trace_log_
->logged_events_
->ReturnChunk(chunk_index_
, chunk_
.Pass());
332 // Otherwise this method may be called from the destructor, or TraceLog will
333 // find the generation mismatch and delete this buffer soon.
336 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
338 TraceLogStatus::~TraceLogStatus() {}
341 TraceLog
* TraceLog::GetInstance() {
342 return Singleton
<TraceLog
, LeakySingletonTraits
<TraceLog
>>::get();
347 num_traces_recorded_(0),
349 dispatching_to_observer_list_(false),
350 process_sort_index_(0),
354 trace_options_(kInternalRecordUntilFull
),
355 sampling_thread_handle_(0),
356 trace_config_(TraceConfig()),
357 event_callback_trace_config_(TraceConfig()),
358 thread_shared_chunk_index_(0),
360 use_worker_thread_(false) {
361 // Trace is enabled or disabled on one thread while other threads are
362 // accessing the enabled flag. We don't care whether edge-case events are
363 // traced or not, so we allow races on the enabled flag to keep the trace
365 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
366 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
367 // sizeof(g_category_group_enabled),
368 // "trace_event category enabled");
369 for (int i
= 0; i
< MAX_CATEGORY_GROUPS
; ++i
) {
370 ANNOTATE_BENIGN_RACE(&g_category_group_enabled
[i
],
371 "trace_event category enabled");
373 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
376 SetProcessID(static_cast<int>(GetCurrentProcId()));
378 // NaCl also shouldn't access the command line.
379 if (CommandLine::InitializedForCurrentProcess() &&
380 CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole
)) {
381 std::string filter
= CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
382 switches::kTraceToConsole
);
383 if (filter
.empty()) {
384 filter
= kEchoToConsoleCategoryFilter
;
387 filter
.append(kEchoToConsoleCategoryFilter
);
390 LOG(ERROR
) << "Start " << switches::kTraceToConsole
391 << " with CategoryFilter '" << filter
<< "'.";
392 SetEnabled(TraceConfig(filter
, ECHO_TO_CONSOLE
), RECORDING_MODE
);
396 logged_events_
.reset(CreateTraceBuffer());
398 MemoryDumpManager::GetInstance()->RegisterDumpProvider(this);
401 TraceLog::~TraceLog() {}
403 void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
404 // A ThreadLocalEventBuffer needs the message loop
405 // - to know when the thread exits;
406 // - to handle the final flush.
407 // For a thread without a message loop or the message loop may be blocked, the
408 // trace events will be added into the main buffer directly.
409 if (thread_blocks_message_loop_
.Get() || !MessageLoop::current())
411 auto thread_local_event_buffer
= thread_local_event_buffer_
.Get();
412 if (thread_local_event_buffer
&&
413 !CheckGeneration(thread_local_event_buffer
->generation())) {
414 delete thread_local_event_buffer
;
415 thread_local_event_buffer
= NULL
;
417 if (!thread_local_event_buffer
) {
418 thread_local_event_buffer
= new ThreadLocalEventBuffer(this);
419 thread_local_event_buffer_
.Set(thread_local_event_buffer
);
423 bool TraceLog::OnMemoryDump(const MemoryDumpArgs
& args
,
424 ProcessMemoryDump
* pmd
) {
425 // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
426 // (crbug.com/499731).
427 TraceEventMemoryOverhead overhead
;
428 overhead
.Add("TraceLog", sizeof(*this));
430 AutoLock
lock(lock_
);
432 logged_events_
->EstimateTraceMemoryOverhead(&overhead
);
435 overhead
.DumpInto("tracing/main_trace_log", pmd
);
439 const unsigned char* TraceLog::GetCategoryGroupEnabled(
440 const char* category_group
) {
441 TraceLog
* tracelog
= GetInstance();
443 DCHECK(!g_category_group_enabled
[g_category_already_shutdown
]);
444 return &g_category_group_enabled
[g_category_already_shutdown
];
446 return tracelog
->GetCategoryGroupEnabledInternal(category_group
);
449 const char* TraceLog::GetCategoryGroupName(
450 const unsigned char* category_group_enabled
) {
451 // Calculate the index of the category group by finding
452 // category_group_enabled in g_category_group_enabled array.
453 uintptr_t category_begin
=
454 reinterpret_cast<uintptr_t>(g_category_group_enabled
);
455 uintptr_t category_ptr
= reinterpret_cast<uintptr_t>(category_group_enabled
);
456 DCHECK(category_ptr
>= category_begin
&&
457 category_ptr
< reinterpret_cast<uintptr_t>(g_category_group_enabled
+
458 MAX_CATEGORY_GROUPS
))
459 << "out of bounds category pointer";
460 uintptr_t category_index
=
461 (category_ptr
- category_begin
) / sizeof(g_category_group_enabled
[0]);
462 return g_category_groups
[category_index
];
465 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index
) {
466 unsigned char enabled_flag
= 0;
467 const char* category_group
= g_category_groups
[category_index
];
468 if (mode_
== RECORDING_MODE
&&
469 trace_config_
.IsCategoryGroupEnabled(category_group
))
470 enabled_flag
|= ENABLED_FOR_RECORDING
;
471 else if (mode_
== MONITORING_MODE
&&
472 trace_config_
.IsCategoryGroupEnabled(category_group
))
473 enabled_flag
|= ENABLED_FOR_MONITORING
;
474 if (event_callback_
&&
475 event_callback_trace_config_
.IsCategoryGroupEnabled(category_group
))
476 enabled_flag
|= ENABLED_FOR_EVENT_CALLBACK
;
478 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
480 enabled_flag
|= ENABLED_FOR_ETW_EXPORT
;
484 g_category_group_enabled
[category_index
] = enabled_flag
;
487 void TraceLog::UpdateCategoryGroupEnabledFlags() {
488 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
489 for (size_t i
= 0; i
< category_index
; i
++)
490 UpdateCategoryGroupEnabledFlag(i
);
493 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
494 ResetTraceEventSyntheticDelays();
495 const TraceConfig::StringList
& delays
=
496 trace_config_
.GetSyntheticDelayValues();
497 TraceConfig::StringList::const_iterator ci
;
498 for (ci
= delays
.begin(); ci
!= delays
.end(); ++ci
) {
499 StringTokenizer
tokens(*ci
, ";");
500 if (!tokens
.GetNext())
502 TraceEventSyntheticDelay
* delay
=
503 TraceEventSyntheticDelay::Lookup(tokens
.token());
504 while (tokens
.GetNext()) {
505 std::string token
= tokens
.token();
507 double target_duration
= strtod(token
.c_str(), &duration_end
);
508 if (duration_end
!= token
.c_str()) {
509 delay
->SetTargetDuration(TimeDelta::FromMicroseconds(
510 static_cast<int64
>(target_duration
* 1e6
)));
511 } else if (token
== "static") {
512 delay
->SetMode(TraceEventSyntheticDelay::STATIC
);
513 } else if (token
== "oneshot") {
514 delay
->SetMode(TraceEventSyntheticDelay::ONE_SHOT
);
515 } else if (token
== "alternating") {
516 delay
->SetMode(TraceEventSyntheticDelay::ALTERNATING
);
522 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
523 const char* category_group
) {
524 DCHECK(!strchr(category_group
, '"'))
525 << "Category groups may not contain double quote";
526 // The g_category_groups is append only, avoid using a lock for the fast path.
527 size_t current_category_index
= base::subtle::Acquire_Load(&g_category_index
);
529 // Search for pre-existing category group.
530 for (size_t i
= 0; i
< current_category_index
; ++i
) {
531 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
532 return &g_category_group_enabled
[i
];
536 unsigned char* category_group_enabled
= NULL
;
537 // This is the slow path: the lock is not held in the case above, so more
538 // than one thread could have reached here trying to add the same category.
539 // Only hold to lock when actually appending a new category, and
540 // check the categories groups again.
541 AutoLock
lock(lock_
);
542 size_t category_index
= base::subtle::Acquire_Load(&g_category_index
);
543 for (size_t i
= 0; i
< category_index
; ++i
) {
544 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
545 return &g_category_group_enabled
[i
];
549 // Create a new category group.
550 DCHECK(category_index
< MAX_CATEGORY_GROUPS
)
551 << "must increase MAX_CATEGORY_GROUPS";
552 if (category_index
< MAX_CATEGORY_GROUPS
) {
553 // Don't hold on to the category_group pointer, so that we can create
554 // category groups with strings not known at compile time (this is
555 // required by SetWatchEvent).
556 const char* new_group
= strdup(category_group
);
557 ANNOTATE_LEAKING_OBJECT_PTR(new_group
);
558 g_category_groups
[category_index
] = new_group
;
559 DCHECK(!g_category_group_enabled
[category_index
]);
560 // Note that if both included and excluded patterns in the
561 // TraceConfig are empty, we exclude nothing,
562 // thereby enabling this category group.
563 UpdateCategoryGroupEnabledFlag(category_index
);
564 category_group_enabled
= &g_category_group_enabled
[category_index
];
565 // Update the max index now.
566 base::subtle::Release_Store(&g_category_index
, category_index
+ 1);
568 category_group_enabled
=
569 &g_category_group_enabled
[g_category_categories_exhausted
];
571 return category_group_enabled
;
574 void TraceLog::GetKnownCategoryGroups(
575 std::vector
<std::string
>* category_groups
) {
576 AutoLock
lock(lock_
);
577 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
578 for (size_t i
= g_num_builtin_categories
; i
< category_index
; i
++)
579 category_groups
->push_back(g_category_groups
[i
]);
582 void TraceLog::SetEnabled(const TraceConfig
& trace_config
, Mode mode
) {
583 std::vector
<EnabledStateObserver
*> observer_list
;
585 AutoLock
lock(lock_
);
587 // Can't enable tracing when Flush() is in progress.
588 DCHECK(!flush_task_runner_
);
590 InternalTraceOptions new_options
=
591 GetInternalOptionsFromTraceConfig(trace_config
);
593 InternalTraceOptions old_options
= trace_options();
596 if (new_options
!= old_options
) {
597 DLOG(ERROR
) << "Attempting to re-enable tracing with a different "
598 << "set of options.";
602 DLOG(ERROR
) << "Attempting to re-enable tracing with a different mode.";
605 trace_config_
.Merge(trace_config
);
606 UpdateCategoryGroupEnabledFlags();
610 if (dispatching_to_observer_list_
) {
612 << "Cannot manipulate TraceLog::Enabled state from an observer.";
618 if (new_options
!= old_options
) {
619 subtle::NoBarrier_Store(&trace_options_
, new_options
);
620 UseNextTraceBuffer();
623 num_traces_recorded_
++;
625 trace_config_
= TraceConfig(trace_config
);
626 UpdateCategoryGroupEnabledFlags();
627 UpdateSyntheticDelaysFromTraceConfig();
629 if (new_options
& kInternalEnableSampling
) {
630 sampling_thread_
.reset(new TraceSamplingThread
);
631 sampling_thread_
->RegisterSampleBucket(
632 &g_trace_state
[0], "bucket0",
633 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
634 sampling_thread_
->RegisterSampleBucket(
635 &g_trace_state
[1], "bucket1",
636 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
637 sampling_thread_
->RegisterSampleBucket(
638 &g_trace_state
[2], "bucket2",
639 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
640 if (!PlatformThread::Create(0, sampling_thread_
.get(),
641 &sampling_thread_handle_
)) {
642 DCHECK(false) << "failed to create thread";
646 dispatching_to_observer_list_
= true;
647 observer_list
= enabled_state_observer_list_
;
649 // Notify observers outside the lock in case they trigger trace events.
650 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
651 observer_list
[i
]->OnTraceLogEnabled();
654 AutoLock
lock(lock_
);
655 dispatching_to_observer_list_
= false;
659 void TraceLog::SetArgumentFilterPredicate(
660 const ArgumentFilterPredicate
& argument_filter_predicate
) {
661 AutoLock
lock(lock_
);
662 DCHECK(!argument_filter_predicate
.is_null());
663 DCHECK(argument_filter_predicate_
.is_null());
664 argument_filter_predicate_
= argument_filter_predicate
;
667 TraceLog::InternalTraceOptions
TraceLog::GetInternalOptionsFromTraceConfig(
668 const TraceConfig
& config
) {
669 InternalTraceOptions ret
=
670 config
.IsSamplingEnabled() ? kInternalEnableSampling
: kInternalNone
;
671 if (config
.IsArgumentFilterEnabled())
672 ret
|= kInternalEnableArgumentFilter
;
673 switch (config
.GetTraceRecordMode()) {
674 case RECORD_UNTIL_FULL
:
675 return ret
| kInternalRecordUntilFull
;
676 case RECORD_CONTINUOUSLY
:
677 return ret
| kInternalRecordContinuously
;
678 case ECHO_TO_CONSOLE
:
679 return ret
| kInternalEchoToConsole
;
680 case RECORD_AS_MUCH_AS_POSSIBLE
:
681 return ret
| kInternalRecordAsMuchAsPossible
;
684 return kInternalNone
;
687 TraceConfig
TraceLog::GetCurrentTraceConfig() const {
688 AutoLock
lock(lock_
);
689 return trace_config_
;
692 void TraceLog::SetDisabled() {
693 AutoLock
lock(lock_
);
694 SetDisabledWhileLocked();
697 void TraceLog::SetDisabledWhileLocked() {
698 lock_
.AssertAcquired();
703 if (dispatching_to_observer_list_
) {
705 << "Cannot manipulate TraceLog::Enabled state from an observer.";
711 if (sampling_thread_
.get()) {
712 // Stop the sampling thread.
713 sampling_thread_
->Stop();
715 PlatformThread::Join(sampling_thread_handle_
);
717 sampling_thread_handle_
= PlatformThreadHandle();
718 sampling_thread_
.reset();
721 trace_config_
.Clear();
722 subtle::NoBarrier_Store(&watch_category_
, 0);
723 watch_event_name_
= "";
724 UpdateCategoryGroupEnabledFlags();
725 AddMetadataEventsWhileLocked();
727 dispatching_to_observer_list_
= true;
728 std::vector
<EnabledStateObserver
*> observer_list
=
729 enabled_state_observer_list_
;
732 // Dispatch to observers outside the lock in case the observer triggers a
734 AutoUnlock
unlock(lock_
);
735 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
736 observer_list
[i
]->OnTraceLogDisabled();
738 dispatching_to_observer_list_
= false;
741 int TraceLog::GetNumTracesRecorded() {
742 AutoLock
lock(lock_
);
745 return num_traces_recorded_
;
748 void TraceLog::AddEnabledStateObserver(EnabledStateObserver
* listener
) {
749 AutoLock
lock(lock_
);
750 enabled_state_observer_list_
.push_back(listener
);
753 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver
* listener
) {
754 AutoLock
lock(lock_
);
755 std::vector
<EnabledStateObserver
*>::iterator it
=
756 std::find(enabled_state_observer_list_
.begin(),
757 enabled_state_observer_list_
.end(), listener
);
758 if (it
!= enabled_state_observer_list_
.end())
759 enabled_state_observer_list_
.erase(it
);
762 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver
* listener
) const {
763 AutoLock
lock(lock_
);
764 std::vector
<EnabledStateObserver
*>::const_iterator it
=
765 std::find(enabled_state_observer_list_
.begin(),
766 enabled_state_observer_list_
.end(), listener
);
767 return it
!= enabled_state_observer_list_
.end();
770 TraceLogStatus
TraceLog::GetStatus() const {
771 AutoLock
lock(lock_
);
772 TraceLogStatus result
;
773 result
.event_capacity
= logged_events_
->Capacity();
774 result
.event_count
= logged_events_
->Size();
778 bool TraceLog::BufferIsFull() const {
779 AutoLock
lock(lock_
);
780 return logged_events_
->IsFull();
783 TraceEvent
* TraceLog::AddEventToThreadSharedChunkWhileLocked(
784 TraceEventHandle
* handle
,
785 bool check_buffer_is_full
) {
786 lock_
.AssertAcquired();
788 if (thread_shared_chunk_
&& thread_shared_chunk_
->IsFull()) {
789 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
790 thread_shared_chunk_
.Pass());
793 if (!thread_shared_chunk_
) {
794 thread_shared_chunk_
=
795 logged_events_
->GetChunk(&thread_shared_chunk_index_
);
796 if (check_buffer_is_full
)
797 CheckIfBufferIsFullWhileLocked();
799 if (!thread_shared_chunk_
)
803 TraceEvent
* trace_event
= thread_shared_chunk_
->AddTraceEvent(&event_index
);
804 if (trace_event
&& handle
) {
805 MakeHandle(thread_shared_chunk_
->seq(), thread_shared_chunk_index_
,
806 event_index
, handle
);
811 void TraceLog::CheckIfBufferIsFullWhileLocked() {
812 lock_
.AssertAcquired();
813 if (logged_events_
->IsFull()) {
814 if (buffer_limit_reached_timestamp_
.is_null()) {
815 buffer_limit_reached_timestamp_
= OffsetNow();
817 SetDisabledWhileLocked();
821 void TraceLog::SetEventCallbackEnabled(const TraceConfig
& trace_config
,
823 AutoLock
lock(lock_
);
824 subtle::NoBarrier_Store(&event_callback_
,
825 reinterpret_cast<subtle::AtomicWord
>(cb
));
826 event_callback_trace_config_
= trace_config
;
827 UpdateCategoryGroupEnabledFlags();
830 void TraceLog::SetEventCallbackDisabled() {
831 AutoLock
lock(lock_
);
832 subtle::NoBarrier_Store(&event_callback_
, 0);
833 UpdateCategoryGroupEnabledFlags();
836 // Flush() works as the following:
837 // 1. Flush() is called in thread A whose task runner is saved in
838 // flush_task_runner_;
839 // 2. If thread_message_loops_ is not empty, thread A posts task to each message
840 // loop to flush the thread local buffers; otherwise finish the flush;
841 // 3. FlushCurrentThread() deletes the thread local event buffer:
842 // - The last batch of events of the thread are flushed into the main buffer;
843 // - The message loop will be removed from thread_message_loops_;
844 // If this is the last message loop, finish the flush;
845 // 4. If any thread hasn't finish its flush in time, finish the flush.
846 void TraceLog::Flush(const TraceLog::OutputCallback
& cb
,
847 bool use_worker_thread
) {
848 FlushInternal(cb
, use_worker_thread
, false);
851 void TraceLog::CancelTracing(const OutputCallback
& cb
) {
853 FlushInternal(cb
, false, true);
856 void TraceLog::FlushInternal(const TraceLog::OutputCallback
& cb
,
857 bool use_worker_thread
,
858 bool discard_events
) {
859 use_worker_thread_
= use_worker_thread
;
861 // Can't flush when tracing is enabled because otherwise PostTask would
862 // - generate more trace events;
863 // - deschedule the calling thread on some platforms causing inaccurate
864 // timing of the trace events.
865 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
867 cb
.Run(empty_result
, false);
868 LOG(WARNING
) << "Ignored TraceLog::Flush called when tracing is enabled";
872 int generation
= this->generation();
873 // Copy of thread_message_loops_ to be used without locking.
874 std::vector
<scoped_refptr
<SingleThreadTaskRunner
>>
875 thread_message_loop_task_runners
;
877 AutoLock
lock(lock_
);
878 DCHECK(!flush_task_runner_
);
879 flush_task_runner_
= ThreadTaskRunnerHandle::IsSet()
880 ? ThreadTaskRunnerHandle::Get()
882 DCHECK_IMPLIES(thread_message_loops_
.size(), flush_task_runner_
);
883 flush_output_callback_
= cb
;
885 if (thread_shared_chunk_
) {
886 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
887 thread_shared_chunk_
.Pass());
890 if (thread_message_loops_
.size()) {
891 for (hash_set
<MessageLoop
*>::const_iterator it
=
892 thread_message_loops_
.begin();
893 it
!= thread_message_loops_
.end(); ++it
) {
894 thread_message_loop_task_runners
.push_back((*it
)->task_runner());
899 if (thread_message_loop_task_runners
.size()) {
900 for (size_t i
= 0; i
< thread_message_loop_task_runners
.size(); ++i
) {
901 thread_message_loop_task_runners
[i
]->PostTask(
902 FROM_HERE
, Bind(&TraceLog::FlushCurrentThread
, Unretained(this),
903 generation
, discard_events
));
905 flush_task_runner_
->PostDelayedTask(
906 FROM_HERE
, Bind(&TraceLog::OnFlushTimeout
, Unretained(this), generation
,
908 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs
));
912 FinishFlush(generation
, discard_events
);
915 // Usually it runs on a different thread.
916 void TraceLog::ConvertTraceEventsToTraceFormat(
917 scoped_ptr
<TraceBuffer
> logged_events
,
918 const OutputCallback
& flush_output_callback
,
919 const ArgumentFilterPredicate
& argument_filter_predicate
) {
920 if (flush_output_callback
.is_null())
923 // The callback need to be called at least once even if there is no events
924 // to let the caller know the completion of flush.
925 scoped_refptr
<RefCountedString
> json_events_str_ptr
= new RefCountedString();
926 while (const TraceBufferChunk
* chunk
= logged_events
->NextChunk()) {
927 for (size_t j
= 0; j
< chunk
->size(); ++j
) {
928 size_t size
= json_events_str_ptr
->size();
929 if (size
> kTraceEventBufferSizeInBytes
) {
930 flush_output_callback
.Run(json_events_str_ptr
, true);
931 json_events_str_ptr
= new RefCountedString();
933 json_events_str_ptr
->data().append(",\n");
935 chunk
->GetEventAt(j
)->AppendAsJSON(&(json_events_str_ptr
->data()),
936 argument_filter_predicate
);
939 flush_output_callback
.Run(json_events_str_ptr
, false);
942 void TraceLog::FinishFlush(int generation
, bool discard_events
) {
943 scoped_ptr
<TraceBuffer
> previous_logged_events
;
944 OutputCallback flush_output_callback
;
945 ArgumentFilterPredicate argument_filter_predicate
;
947 if (!CheckGeneration(generation
))
951 AutoLock
lock(lock_
);
953 previous_logged_events
.swap(logged_events_
);
954 UseNextTraceBuffer();
955 thread_message_loops_
.clear();
957 flush_task_runner_
= NULL
;
958 flush_output_callback
= flush_output_callback_
;
959 flush_output_callback_
.Reset();
961 if (trace_options() & kInternalEnableArgumentFilter
) {
962 CHECK(!argument_filter_predicate_
.is_null());
963 argument_filter_predicate
= argument_filter_predicate_
;
967 if (discard_events
) {
968 if (!flush_output_callback
.is_null()) {
969 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
970 flush_output_callback
.Run(empty_result
, false);
975 if (use_worker_thread_
&&
976 WorkerPool::PostTask(
977 FROM_HERE
, Bind(&TraceLog::ConvertTraceEventsToTraceFormat
,
978 Passed(&previous_logged_events
),
979 flush_output_callback
, argument_filter_predicate
),
984 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
985 flush_output_callback
,
986 argument_filter_predicate
);
989 // Run in each thread holding a local event buffer.
990 void TraceLog::FlushCurrentThread(int generation
, bool discard_events
) {
992 AutoLock
lock(lock_
);
993 if (!CheckGeneration(generation
) || !flush_task_runner_
) {
994 // This is late. The corresponding flush has finished.
999 // This will flush the thread local buffer.
1000 delete thread_local_event_buffer_
.Get();
1002 AutoLock
lock(lock_
);
1003 if (!CheckGeneration(generation
) || !flush_task_runner_
||
1004 thread_message_loops_
.size())
1007 flush_task_runner_
->PostTask(
1008 FROM_HERE
, Bind(&TraceLog::FinishFlush
, Unretained(this), generation
,
1012 void TraceLog::OnFlushTimeout(int generation
, bool discard_events
) {
1014 AutoLock
lock(lock_
);
1015 if (!CheckGeneration(generation
) || !flush_task_runner_
) {
1016 // Flush has finished before timeout.
1021 << "The following threads haven't finished flush in time. "
1022 "If this happens stably for some thread, please call "
1023 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1024 "the thread to avoid its trace events from being lost.";
1025 for (hash_set
<MessageLoop
*>::const_iterator it
=
1026 thread_message_loops_
.begin();
1027 it
!= thread_message_loops_
.end(); ++it
) {
1028 LOG(WARNING
) << "Thread: " << (*it
)->thread_name();
1031 FinishFlush(generation
, discard_events
);
1034 void TraceLog::FlushButLeaveBufferIntact(
1035 const TraceLog::OutputCallback
& flush_output_callback
) {
1036 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1037 ArgumentFilterPredicate argument_filter_predicate
;
1039 AutoLock
lock(lock_
);
1040 AddMetadataEventsWhileLocked();
1041 if (thread_shared_chunk_
) {
1042 // Return the chunk to the main buffer to flush the sampling data.
1043 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1044 thread_shared_chunk_
.Pass());
1046 previous_logged_events
= logged_events_
->CloneForIteration().Pass();
1048 if (trace_options() & kInternalEnableArgumentFilter
) {
1049 CHECK(!argument_filter_predicate_
.is_null());
1050 argument_filter_predicate
= argument_filter_predicate_
;
1054 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1055 flush_output_callback
,
1056 argument_filter_predicate
);
1059 void TraceLog::UseNextTraceBuffer() {
1060 logged_events_
.reset(CreateTraceBuffer());
1061 subtle::NoBarrier_AtomicIncrement(&generation_
, 1);
1062 thread_shared_chunk_
.reset();
1063 thread_shared_chunk_index_
= 0;
1066 TraceEventHandle
TraceLog::AddTraceEvent(
1068 const unsigned char* category_group_enabled
,
1070 unsigned long long id
,
1072 const char** arg_names
,
1073 const unsigned char* arg_types
,
1074 const unsigned long long* arg_values
,
1075 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1076 unsigned int flags
) {
1077 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1078 base::TraceTicks now
= base::TraceTicks::Now();
1079 return AddTraceEventWithThreadIdAndTimestamp(
1081 category_group_enabled
,
1084 trace_event_internal::kNoId
, // context_id
1085 trace_event_internal::kNoId
, // bind_id
1096 TraceEventHandle
TraceLog::AddTraceEventWithContextId(
1098 const unsigned char* category_group_enabled
,
1100 unsigned long long id
,
1101 unsigned long long context_id
,
1103 const char** arg_names
,
1104 const unsigned char* arg_types
,
1105 const unsigned long long* arg_values
,
1106 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1107 unsigned int flags
) {
1108 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1109 base::TraceTicks now
= base::TraceTicks::Now();
1110 return AddTraceEventWithThreadIdAndTimestamp(
1112 category_group_enabled
,
1116 trace_event_internal::kNoId
, // bind_id
1124 flags
| TRACE_EVENT_FLAG_HAS_CONTEXT_ID
);
1127 // Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp
1128 // with kNoId as bind_id
1129 TraceEventHandle
TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1131 const unsigned char* category_group_enabled
,
1133 unsigned long long id
,
1134 unsigned long long context_id
,
1136 const TraceTicks
& timestamp
,
1138 const char** arg_names
,
1139 const unsigned char* arg_types
,
1140 const unsigned long long* arg_values
,
1141 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1142 unsigned int flags
) {
1143 return AddTraceEventWithThreadIdAndTimestamp(
1145 category_group_enabled
,
1149 trace_event_internal::kNoId
, // bind_id
1160 TraceEventHandle
TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1162 const unsigned char* category_group_enabled
,
1164 unsigned long long id
,
1165 unsigned long long context_id
,
1166 unsigned long long bind_id
,
1168 const TraceTicks
& timestamp
,
1170 const char** arg_names
,
1171 const unsigned char* arg_types
,
1172 const unsigned long long* arg_values
,
1173 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1174 unsigned int flags
) {
1175 TraceEventHandle handle
= {0, 0, 0};
1176 if (!*category_group_enabled
)
1179 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1180 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1181 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1182 if (thread_is_in_trace_event_
.Get())
1185 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
1188 DCHECK(!timestamp
.is_null());
1190 if (flags
& TRACE_EVENT_FLAG_MANGLE_ID
) {
1191 if ((flags
& TRACE_EVENT_FLAG_FLOW_IN
) ||
1192 (flags
& TRACE_EVENT_FLAG_FLOW_OUT
))
1193 bind_id
= MangleEventId(bind_id
);
1194 id
= MangleEventId(id
);
1197 TraceTicks offset_event_timestamp
= OffsetTimestamp(timestamp
);
1198 ThreadTicks thread_now
= ThreadNow();
1200 // |thread_local_event_buffer_| can be null if the current thread doesn't have
1201 // a message loop or the message loop is blocked.
1202 InitializeThreadLocalEventBufferIfSupported();
1203 auto thread_local_event_buffer
= thread_local_event_buffer_
.Get();
1205 // Check and update the current thread name only if the event is for the
1206 // current thread to avoid locks in most cases.
1207 if (thread_id
== static_cast<int>(PlatformThread::CurrentId())) {
1208 const char* new_name
=
1209 ThreadIdNameManager::GetInstance()->GetName(thread_id
);
1210 // Check if the thread name has been set or changed since the previous
1211 // call (if any), but don't bother if the new name is empty. Note this will
1212 // not detect a thread name change within the same char* buffer address: we
1213 // favor common case performance over corner case correctness.
1214 if (new_name
!= g_current_thread_name
.Get().Get() && new_name
&&
1216 g_current_thread_name
.Get().Set(new_name
);
1218 AutoLock
thread_info_lock(thread_info_lock_
);
1220 hash_map
<int, std::string
>::iterator existing_name
=
1221 thread_names_
.find(thread_id
);
1222 if (existing_name
== thread_names_
.end()) {
1223 // This is a new thread id, and a new name.
1224 thread_names_
[thread_id
] = new_name
;
1226 // This is a thread id that we've seen before, but potentially with a
1228 std::vector
<StringPiece
> existing_names
= base::SplitStringPiece(
1229 existing_name
->second
, ",", base::KEEP_WHITESPACE
,
1230 base::SPLIT_WANT_NONEMPTY
);
1231 bool found
= std::find(existing_names
.begin(), existing_names
.end(),
1232 new_name
) != existing_names
.end();
1234 if (existing_names
.size())
1235 existing_name
->second
.push_back(',');
1236 existing_name
->second
.append(new_name
);
1243 // This is done sooner rather than later, to avoid creating the event and
1244 // acquiring the lock, which is not needed for ETW as it's already threadsafe.
1245 if (*category_group_enabled
& ENABLED_FOR_ETW_EXPORT
)
1246 TraceEventETWExport::AddEvent(phase
, category_group_enabled
, name
, id
,
1247 num_args
, arg_names
, arg_types
, arg_values
,
1248 convertable_values
);
1251 std::string console_message
;
1252 if (*category_group_enabled
&
1253 (ENABLED_FOR_RECORDING
| ENABLED_FOR_MONITORING
)) {
1254 OptionalAutoLock
lock(&lock_
);
1256 TraceEvent
* trace_event
= NULL
;
1257 if (thread_local_event_buffer
) {
1258 trace_event
= thread_local_event_buffer
->AddTraceEvent(&handle
);
1260 lock
.EnsureAcquired();
1261 trace_event
= AddEventToThreadSharedChunkWhileLocked(&handle
, true);
1265 trace_event
->Initialize(thread_id
,
1266 offset_event_timestamp
,
1269 category_group_enabled
,
1281 #if defined(OS_ANDROID)
1282 trace_event
->SendToATrace();
1286 if (trace_options() & kInternalEchoToConsole
) {
1287 console_message
= EventToConsoleMessage(
1288 phase
== TRACE_EVENT_PHASE_COMPLETE
? TRACE_EVENT_PHASE_BEGIN
: phase
,
1289 timestamp
, trace_event
);
1293 if (console_message
.size())
1294 LOG(ERROR
) << console_message
;
1296 if (reinterpret_cast<const unsigned char*>(
1297 subtle::NoBarrier_Load(&watch_category_
)) == category_group_enabled
) {
1298 bool event_name_matches
;
1299 WatchEventCallback watch_event_callback_copy
;
1301 AutoLock
lock(lock_
);
1302 event_name_matches
= watch_event_name_
== name
;
1303 watch_event_callback_copy
= watch_event_callback_
;
1305 if (event_name_matches
) {
1306 if (!watch_event_callback_copy
.is_null())
1307 watch_event_callback_copy
.Run();
1311 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
1312 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
1313 subtle::NoBarrier_Load(&event_callback_
));
1314 if (event_callback
) {
1316 offset_event_timestamp
,
1317 phase
== TRACE_EVENT_PHASE_COMPLETE
? TRACE_EVENT_PHASE_BEGIN
: phase
,
1318 category_group_enabled
, name
, id
, num_args
, arg_names
, arg_types
,
1326 // May be called when a COMPELETE event ends and the unfinished event has been
1327 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
1328 std::string
TraceLog::EventToConsoleMessage(unsigned char phase
,
1329 const TraceTicks
& timestamp
,
1330 TraceEvent
* trace_event
) {
1331 AutoLock
thread_info_lock(thread_info_lock_
);
1333 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1334 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1335 DCHECK(phase
!= TRACE_EVENT_PHASE_COMPLETE
);
1339 trace_event
? trace_event
->thread_id() : PlatformThread::CurrentId();
1340 if (phase
== TRACE_EVENT_PHASE_END
) {
1341 duration
= timestamp
- thread_event_start_times_
[thread_id
].top();
1342 thread_event_start_times_
[thread_id
].pop();
1345 std::string thread_name
= thread_names_
[thread_id
];
1346 if (thread_colors_
.find(thread_name
) == thread_colors_
.end())
1347 thread_colors_
[thread_name
] = (thread_colors_
.size() % 6) + 1;
1349 std::ostringstream log
;
1350 log
<< base::StringPrintf("%s: \x1b[0;3%dm", thread_name
.c_str(),
1351 thread_colors_
[thread_name
]);
1354 if (thread_event_start_times_
.find(thread_id
) !=
1355 thread_event_start_times_
.end())
1356 depth
= thread_event_start_times_
[thread_id
].size();
1358 for (size_t i
= 0; i
< depth
; ++i
)
1362 trace_event
->AppendPrettyPrinted(&log
);
1363 if (phase
== TRACE_EVENT_PHASE_END
)
1364 log
<< base::StringPrintf(" (%.3f ms)", duration
.InMillisecondsF());
1368 if (phase
== TRACE_EVENT_PHASE_BEGIN
)
1369 thread_event_start_times_
[thread_id
].push(timestamp
);
1374 void TraceLog::AddTraceEventEtw(char phase
,
1377 const char* extra
) {
1379 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
1381 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
1382 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
1385 void TraceLog::AddTraceEventEtw(char phase
,
1388 const std::string
& extra
) {
1390 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
1392 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
1393 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
1396 void TraceLog::UpdateTraceEventDuration(
1397 const unsigned char* category_group_enabled
,
1399 TraceEventHandle handle
) {
1400 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1401 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1402 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1403 if (thread_is_in_trace_event_
.Get())
1406 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
1408 ThreadTicks thread_now
= ThreadNow();
1409 TraceTicks now
= OffsetNow();
1411 std::string console_message
;
1412 if (*category_group_enabled
& ENABLED_FOR_RECORDING
) {
1413 OptionalAutoLock
lock(&lock_
);
1415 TraceEvent
* trace_event
= GetEventByHandleInternal(handle
, &lock
);
1417 DCHECK(trace_event
->phase() == TRACE_EVENT_PHASE_COMPLETE
);
1418 trace_event
->UpdateDuration(now
, thread_now
);
1419 #if defined(OS_ANDROID)
1420 trace_event
->SendToATrace();
1424 if (trace_options() & kInternalEchoToConsole
) {
1426 EventToConsoleMessage(TRACE_EVENT_PHASE_END
, now
, trace_event
);
1430 if (console_message
.size())
1431 LOG(ERROR
) << console_message
;
1433 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
1434 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
1435 subtle::NoBarrier_Load(&event_callback_
));
1436 if (event_callback
) {
1437 event_callback(now
, TRACE_EVENT_PHASE_END
, category_group_enabled
, name
,
1438 trace_event_internal::kNoId
, 0,
1439 nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE
);
1444 void TraceLog::SetWatchEvent(const std::string
& category_name
,
1445 const std::string
& event_name
,
1446 const WatchEventCallback
& callback
) {
1447 const unsigned char* category
=
1448 GetCategoryGroupEnabled(category_name
.c_str());
1449 AutoLock
lock(lock_
);
1450 subtle::NoBarrier_Store(&watch_category_
,
1451 reinterpret_cast<subtle::AtomicWord
>(category
));
1452 watch_event_name_
= event_name
;
1453 watch_event_callback_
= callback
;
1456 void TraceLog::CancelWatchEvent() {
1457 AutoLock
lock(lock_
);
1458 subtle::NoBarrier_Store(&watch_category_
, 0);
1459 watch_event_name_
= "";
1460 watch_event_callback_
.Reset();
1463 uint64
TraceLog::MangleEventId(uint64 id
) {
1464 return id
^ process_id_hash_
;
1467 void TraceLog::AddMetadataEventsWhileLocked() {
1468 lock_
.AssertAcquired();
1470 #if !defined(OS_NACL) // NaCl shouldn't expose the process id.
1471 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1472 0, "num_cpus", "number",
1473 base::SysInfo::NumberOfProcessors());
1476 int current_thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1477 if (process_sort_index_
!= 0) {
1478 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1479 current_thread_id
, "process_sort_index",
1480 "sort_index", process_sort_index_
);
1483 if (process_name_
.size()) {
1484 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1485 current_thread_id
, "process_name", "name",
1489 if (process_labels_
.size() > 0) {
1490 std::vector
<std::string
> labels
;
1491 for (base::hash_map
<int, std::string
>::iterator it
=
1492 process_labels_
.begin();
1493 it
!= process_labels_
.end(); it
++) {
1494 labels
.push_back(it
->second
);
1496 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1497 current_thread_id
, "process_labels", "labels",
1498 base::JoinString(labels
, ","));
1501 // Thread sort indices.
1502 for (hash_map
<int, int>::iterator it
= thread_sort_indices_
.begin();
1503 it
!= thread_sort_indices_
.end(); it
++) {
1504 if (it
->second
== 0)
1506 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1507 it
->first
, "thread_sort_index", "sort_index",
1512 AutoLock
thread_info_lock(thread_info_lock_
);
1513 for (hash_map
<int, std::string
>::iterator it
= thread_names_
.begin();
1514 it
!= thread_names_
.end(); it
++) {
1515 if (it
->second
.empty())
1517 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1518 it
->first
, "thread_name", "name", it
->second
);
1521 // If buffer is full, add a metadata record to report this.
1522 if (!buffer_limit_reached_timestamp_
.is_null()) {
1523 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
1524 current_thread_id
, "trace_buffer_overflowed",
1526 buffer_limit_reached_timestamp_
);
1530 void TraceLog::WaitSamplingEventForTesting() {
1531 if (!sampling_thread_
)
1533 sampling_thread_
->WaitSamplingEventForTesting();
1536 void TraceLog::DeleteForTesting() {
1537 internal::DeleteTraceLogForTesting::Delete();
1540 TraceEvent
* TraceLog::GetEventByHandle(TraceEventHandle handle
) {
1541 return GetEventByHandleInternal(handle
, NULL
);
1544 TraceEvent
* TraceLog::GetEventByHandleInternal(TraceEventHandle handle
,
1545 OptionalAutoLock
* lock
) {
1546 if (!handle
.chunk_seq
)
1549 if (thread_local_event_buffer_
.Get()) {
1550 TraceEvent
* trace_event
=
1551 thread_local_event_buffer_
.Get()->GetEventByHandle(handle
);
1556 // The event has been out-of-control of the thread local buffer.
1557 // Try to get the event from the main buffer with a lock.
1559 lock
->EnsureAcquired();
1561 if (thread_shared_chunk_
&&
1562 handle
.chunk_index
== thread_shared_chunk_index_
) {
1563 return handle
.chunk_seq
== thread_shared_chunk_
->seq()
1564 ? thread_shared_chunk_
->GetEventAt(handle
.event_index
)
1568 return logged_events_
->GetEventByHandle(handle
);
1571 void TraceLog::SetProcessID(int process_id
) {
1572 process_id_
= process_id
;
1573 // Create a FNV hash from the process ID for XORing.
1574 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
1575 unsigned long long offset_basis
= 14695981039346656037ull;
1576 unsigned long long fnv_prime
= 1099511628211ull;
1577 unsigned long long pid
= static_cast<unsigned long long>(process_id_
);
1578 process_id_hash_
= (offset_basis
^ pid
) * fnv_prime
;
1581 void TraceLog::SetProcessSortIndex(int sort_index
) {
1582 AutoLock
lock(lock_
);
1583 process_sort_index_
= sort_index
;
1586 void TraceLog::SetProcessName(const std::string
& process_name
) {
1587 AutoLock
lock(lock_
);
1588 process_name_
= process_name
;
1591 void TraceLog::UpdateProcessLabel(int label_id
,
1592 const std::string
& current_label
) {
1593 if (!current_label
.length())
1594 return RemoveProcessLabel(label_id
);
1596 AutoLock
lock(lock_
);
1597 process_labels_
[label_id
] = current_label
;
1600 void TraceLog::RemoveProcessLabel(int label_id
) {
1601 AutoLock
lock(lock_
);
1602 base::hash_map
<int, std::string
>::iterator it
=
1603 process_labels_
.find(label_id
);
1604 if (it
== process_labels_
.end())
1607 process_labels_
.erase(it
);
1610 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id
, int sort_index
) {
1611 AutoLock
lock(lock_
);
1612 thread_sort_indices_
[static_cast<int>(thread_id
)] = sort_index
;
1615 void TraceLog::SetTimeOffset(TimeDelta offset
) {
1616 time_offset_
= offset
;
1619 size_t TraceLog::GetObserverCountForTest() const {
1620 return enabled_state_observer_list_
.size();
1623 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
1624 thread_blocks_message_loop_
.Set(true);
1625 if (thread_local_event_buffer_
.Get()) {
1626 // This will flush the thread local buffer.
1627 delete thread_local_event_buffer_
.Get();
1631 TraceBuffer
* TraceLog::CreateTraceBuffer() {
1632 InternalTraceOptions options
= trace_options();
1633 if (options
& kInternalRecordContinuously
)
1634 return TraceBuffer::CreateTraceBufferRingBuffer(
1635 kTraceEventRingBufferChunks
);
1636 else if ((options
& kInternalEnableSampling
) && mode_
== MONITORING_MODE
)
1637 return TraceBuffer::CreateTraceBufferRingBuffer(
1638 kMonitorTraceEventBufferChunks
);
1639 else if (options
& kInternalEchoToConsole
)
1640 return TraceBuffer::CreateTraceBufferRingBuffer(
1641 kEchoToConsoleTraceEventBufferChunks
);
1642 else if (options
& kInternalRecordAsMuchAsPossible
)
1643 return TraceBuffer::CreateTraceBufferVectorOfSize(
1644 kTraceEventVectorBigBufferChunks
);
1645 return TraceBuffer::CreateTraceBufferVectorOfSize(
1646 kTraceEventVectorBufferChunks
);
1650 void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
1651 AutoLock
lock(lock_
);
1652 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1653 // Go through each category and set/clear the ETW bit depending on whether the
1654 // category is enabled.
1655 for (size_t i
= 0; i
< category_index
; i
++) {
1656 const char* category_group
= g_category_groups
[i
];
1657 DCHECK(category_group
);
1658 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
1660 g_category_group_enabled
[i
] |= ENABLED_FOR_ETW_EXPORT
;
1662 g_category_group_enabled
[i
] &= ~ENABLED_FOR_ETW_EXPORT
;
1666 #endif // defined(OS_WIN)
1668 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
1669 TraceEventMemoryOverhead
* overhead
) {
1670 overhead
->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this));
1673 } // namespace trace_event
1676 namespace trace_event_internal
{
1678 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
1679 const char* category_group
,
1681 // The single atom works because for now the category_group can only be "gpu".
1682 DCHECK_EQ(strcmp(category_group
, "gpu"), 0);
1683 static TRACE_EVENT_API_ATOMIC_WORD atomic
= 0;
1684 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
1685 category_group
, atomic
, category_group_enabled_
);
1687 if (*category_group_enabled_
) {
1689 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
1690 TRACE_EVENT_PHASE_COMPLETE
,
1691 category_group_enabled_
,
1693 trace_event_internal::kNoId
, // id
1694 trace_event_internal::kNoId
, // context_id
1695 static_cast<int>(base::PlatformThread::CurrentId()), // thread_id
1696 base::TraceTicks::Now(),
1697 trace_event_internal::kZeroNumArgs
,
1702 TRACE_EVENT_FLAG_NONE
);
1706 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
1707 if (*category_group_enabled_
) {
1708 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_
, name_
,
1713 } // namespace trace_event_internal