1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/debug/trace_event_impl.h"
9 #include "base/base_switches.h"
10 #include "base/bind.h"
11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/trace_event.h"
14 #include "base/format_macros.h"
15 #include "base/lazy_instance.h"
16 #include "base/memory/singleton.h"
17 #include "base/message_loop/message_loop.h"
18 #include "base/process/process_metrics.h"
19 #include "base/stl_util.h"
20 #include "base/strings/string_split.h"
21 #include "base/strings/string_tokenizer.h"
22 #include "base/strings/string_util.h"
23 #include "base/strings/stringprintf.h"
24 #include "base/strings/utf_string_conversions.h"
25 #include "base/synchronization/cancellation_flag.h"
26 #include "base/synchronization/waitable_event.h"
27 #include "base/sys_info.h"
28 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
29 #include "base/threading/platform_thread.h"
30 #include "base/threading/thread_id_name_manager.h"
31 #include "base/time/time.h"
34 #include "base/debug/trace_event_win.h"
37 class DeleteTraceLogForTesting
{
39 static void Delete() {
40 Singleton
<base::debug::TraceLog
,
41 LeakySingletonTraits
<base::debug::TraceLog
> >::OnExit(0);
45 // The thread buckets for the sampling profiler.
46 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state
[3];
53 // The overhead of TraceEvent above this threshold will be reported in the
55 const int kOverheadReportThresholdInMicroseconds
= 50;
57 // Controls the number of trace events we will buffer in-memory
58 // before throwing them away.
59 const size_t kTraceEventVectorBufferSize
= 250000;
60 const size_t kTraceEventRingBufferSize
= kTraceEventVectorBufferSize
/ 4;
61 const size_t kTraceEventThreadLocalBufferSize
= 64;
62 const size_t kTraceEventBatchSize
= 1000;
63 const size_t kTraceEventInitialBufferSize
= 1024;
65 const int kThreadFlushTimeoutMs
= 1000;
67 #define MAX_CATEGORY_GROUPS 100
69 // Parallel arrays g_category_groups and g_category_group_enabled are separate
70 // so that a pointer to a member of g_category_group_enabled can be easily
71 // converted to an index into g_category_groups. This allows macros to deal
72 // only with char enabled pointers from g_category_group_enabled, and we can
73 // convert internally to determine the category name from the char enabled
75 const char* g_category_groups
[MAX_CATEGORY_GROUPS
] = {
76 "tracing already shutdown",
77 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
79 // For reporting trace_event overhead. For thread local event buffers only.
80 "trace_event_overhead"};
82 // The enabled flag is char instead of bool so that the API can be used from C.
83 unsigned char g_category_group_enabled
[MAX_CATEGORY_GROUPS
] = { 0 };
84 const int g_category_already_shutdown
= 0;
85 const int g_category_categories_exhausted
= 1;
86 const int g_category_metadata
= 2;
87 const int g_category_trace_event_overhead
= 3;
88 const int g_num_builtin_categories
= 4;
89 int g_category_index
= g_num_builtin_categories
; // Skip default categories.
91 // The name of the current thread. This is used to decide if the current
92 // thread name has changed. We combine all the seen thread names into the
93 // output name for the thread.
94 LazyInstance
<ThreadLocalPointer
<const char> >::Leaky
95 g_current_thread_name
= LAZY_INSTANCE_INITIALIZER
;
97 const char kRecordUntilFull
[] = "record-until-full";
98 const char kRecordContinuously
[] = "record-continuously";
99 const char kEnableSampling
[] = "enable-sampling";
101 TimeTicks
ThreadNow() {
102 return TimeTicks::IsThreadNowSupported() ?
103 TimeTicks::ThreadNow() : TimeTicks();
106 class TraceBufferRingBuffer
: public TraceBuffer
{
108 TraceBufferRingBuffer()
109 : unused_event_index_(0),
110 oldest_event_index_(0) {
111 logged_events_
.reserve(kTraceEventInitialBufferSize
);
114 virtual ~TraceBufferRingBuffer() {}
116 virtual void AddEvent(const TraceEvent
& event
) OVERRIDE
{
117 if (unused_event_index_
< Size())
118 logged_events_
[unused_event_index_
] = event
;
120 logged_events_
.push_back(event
);
122 unused_event_index_
= NextIndex(unused_event_index_
);
123 if (unused_event_index_
== oldest_event_index_
) {
124 oldest_event_index_
= NextIndex(oldest_event_index_
);
128 virtual bool HasMoreEvents() const OVERRIDE
{
129 return oldest_event_index_
!= unused_event_index_
;
132 virtual const TraceEvent
& NextEvent() OVERRIDE
{
133 DCHECK(HasMoreEvents());
135 size_t next
= oldest_event_index_
;
136 oldest_event_index_
= NextIndex(oldest_event_index_
);
137 return GetEventAt(next
);
140 virtual bool IsFull() const OVERRIDE
{
144 virtual size_t CountEnabledByName(
145 const unsigned char* category
,
146 const std::string
& event_name
) const OVERRIDE
{
147 size_t notify_count
= 0;
148 size_t index
= oldest_event_index_
;
149 while (index
!= unused_event_index_
) {
150 const TraceEvent
& event
= GetEventAt(index
);
151 if (category
== event
.category_group_enabled() &&
152 strcmp(event_name
.c_str(), event
.name()) == 0) {
155 index
= NextIndex(index
);
160 virtual const TraceEvent
& GetEventAt(size_t index
) const OVERRIDE
{
161 DCHECK(index
< logged_events_
.size());
162 return logged_events_
[index
];
165 virtual size_t Size() const OVERRIDE
{
166 return logged_events_
.size();
169 virtual size_t Capacity() const OVERRIDE
{
170 return kTraceEventRingBufferSize
;
174 static size_t NextIndex(size_t index
) {
176 if (index
>= kTraceEventRingBufferSize
)
181 size_t unused_event_index_
;
182 size_t oldest_event_index_
;
183 std::vector
<TraceEvent
> logged_events_
;
185 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer
);
188 class TraceBufferVector
: public TraceBuffer
{
190 TraceBufferVector() : current_iteration_index_(0) {
191 logged_events_
.reserve(kTraceEventInitialBufferSize
);
194 virtual ~TraceBufferVector() {
197 virtual void AddEvent(const TraceEvent
& event
) OVERRIDE
{
198 // Note, we have two callers which need to be handled:
199 // - AddEventToMainBufferWhileLocked() which has two cases:
200 // - called directly from AddTraceEventWithThreadIdAndTimeStamp()
201 // which checks if buffer is full and does an early exit if full;
202 // - called from ThreadLocalEventBuffer::FlushWhileLocked();
203 // - AddThreadNameMetadataEvents().
204 // We can not DECHECK(!IsFull()) because we have to add the metadata
205 // events and flush thread-local buffers even if the buffer is full.
206 logged_events_
.push_back(event
);
209 virtual bool HasMoreEvents() const OVERRIDE
{
210 return current_iteration_index_
< Size();
213 virtual const TraceEvent
& NextEvent() OVERRIDE
{
214 DCHECK(HasMoreEvents());
215 return GetEventAt(current_iteration_index_
++);
218 virtual bool IsFull() const OVERRIDE
{
219 return Size() >= kTraceEventVectorBufferSize
;
222 virtual size_t CountEnabledByName(
223 const unsigned char* category
,
224 const std::string
& event_name
) const OVERRIDE
{
225 size_t notify_count
= 0;
226 for (size_t i
= 0; i
< Size(); i
++) {
227 const TraceEvent
& event
= GetEventAt(i
);
228 if (category
== event
.category_group_enabled() &&
229 strcmp(event_name
.c_str(), event
.name()) == 0) {
236 virtual const TraceEvent
& GetEventAt(size_t index
) const OVERRIDE
{
237 DCHECK(index
< logged_events_
.size());
238 return logged_events_
[index
];
241 virtual size_t Size() const OVERRIDE
{
242 return logged_events_
.size();
245 virtual size_t Capacity() const OVERRIDE
{
246 return kTraceEventVectorBufferSize
;
250 size_t current_iteration_index_
;
251 std::vector
<TraceEvent
> logged_events_
;
253 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector
);
256 class TraceBufferDiscardsEvents
: public TraceBuffer
{
258 virtual ~TraceBufferDiscardsEvents() { }
260 virtual void AddEvent(const TraceEvent
& event
) OVERRIDE
{}
261 virtual bool HasMoreEvents() const OVERRIDE
{ return false; }
263 virtual const TraceEvent
& NextEvent() OVERRIDE
{
265 return *static_cast<TraceEvent
*>(NULL
);
268 virtual bool IsFull() const OVERRIDE
{ return false; }
270 virtual size_t CountEnabledByName(
271 const unsigned char* category
,
272 const std::string
& event_name
) const OVERRIDE
{
276 virtual size_t Size() const OVERRIDE
{ return 0; }
278 // As this buffer is never full, we can return any positive number.
279 virtual size_t Capacity() const OVERRIDE
{ return 1; }
281 virtual const TraceEvent
& GetEventAt(size_t index
) const OVERRIDE
{
283 return *static_cast<TraceEvent
*>(NULL
);
289 ////////////////////////////////////////////////////////////////////////////////
293 ////////////////////////////////////////////////////////////////////////////////
297 size_t GetAllocLength(const char* str
) { return str
? strlen(str
) + 1 : 0; }
299 // Copies |*member| into |*buffer|, sets |*member| to point to this new
300 // location, and then advances |*buffer| by the amount written.
301 void CopyTraceEventParameter(char** buffer
,
305 size_t written
= strlcpy(*buffer
, *member
, end
- *buffer
) + 1;
306 DCHECK_LE(static_cast<int>(written
), end
- *buffer
);
314 TraceEvent::TraceEvent()
316 category_group_enabled_(NULL
),
319 phase_(TRACE_EVENT_PHASE_BEGIN
),
321 arg_names_
[0] = NULL
;
322 arg_names_
[1] = NULL
;
323 memset(arg_values_
, 0, sizeof(arg_values_
));
326 TraceEvent::TraceEvent(
329 TimeTicks thread_timestamp
,
331 const unsigned char* category_group_enabled
,
333 unsigned long long id
,
335 const char** arg_names
,
336 const unsigned char* arg_types
,
337 const unsigned long long* arg_values
,
338 scoped_ptr
<ConvertableToTraceFormat
> convertable_values
[],
340 : timestamp_(timestamp
),
341 thread_timestamp_(thread_timestamp
),
343 category_group_enabled_(category_group_enabled
),
345 thread_id_(thread_id
),
349 // Clamp num_args since it may have been set by a third_party library.
350 num_args
= (num_args
> kTraceMaxNumArgs
) ? kTraceMaxNumArgs
: num_args
;
352 for (; i
< num_args
; ++i
) {
353 arg_names_
[i
] = arg_names
[i
];
354 arg_types_
[i
] = arg_types
[i
];
356 if (arg_types
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
357 convertable_values_
[i
].reset(convertable_values
[i
].release());
359 arg_values_
[i
].as_uint
= arg_values
[i
];
361 for (; i
< kTraceMaxNumArgs
; ++i
) {
362 arg_names_
[i
] = NULL
;
363 arg_values_
[i
].as_uint
= 0u;
364 convertable_values_
[i
].reset();
365 arg_types_
[i
] = TRACE_VALUE_TYPE_UINT
;
368 bool copy
= !!(flags
& TRACE_EVENT_FLAG_COPY
);
369 size_t alloc_size
= 0;
371 alloc_size
+= GetAllocLength(name
);
372 for (i
= 0; i
< num_args
; ++i
) {
373 alloc_size
+= GetAllocLength(arg_names_
[i
]);
374 if (arg_types_
[i
] == TRACE_VALUE_TYPE_STRING
)
375 arg_types_
[i
] = TRACE_VALUE_TYPE_COPY_STRING
;
379 bool arg_is_copy
[kTraceMaxNumArgs
];
380 for (i
= 0; i
< num_args
; ++i
) {
381 // No copying of convertable types, we retain ownership.
382 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
385 // We only take a copy of arg_vals if they are of type COPY_STRING.
386 arg_is_copy
[i
] = (arg_types_
[i
] == TRACE_VALUE_TYPE_COPY_STRING
);
388 alloc_size
+= GetAllocLength(arg_values_
[i
].as_string
);
392 parameter_copy_storage_
= new RefCountedString
;
393 parameter_copy_storage_
->data().resize(alloc_size
);
394 char* ptr
= string_as_array(¶meter_copy_storage_
->data());
395 const char* end
= ptr
+ alloc_size
;
397 CopyTraceEventParameter(&ptr
, &name_
, end
);
398 for (i
= 0; i
< num_args
; ++i
) {
399 CopyTraceEventParameter(&ptr
, &arg_names_
[i
], end
);
402 for (i
= 0; i
< num_args
; ++i
) {
403 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
406 CopyTraceEventParameter(&ptr
, &arg_values_
[i
].as_string
, end
);
408 DCHECK_EQ(end
, ptr
) << "Overrun by " << ptr
- end
;
412 TraceEvent::TraceEvent(const TraceEvent
& other
)
413 : timestamp_(other
.timestamp_
),
414 thread_timestamp_(other
.thread_timestamp_
),
416 category_group_enabled_(other
.category_group_enabled_
),
418 thread_id_(other
.thread_id_
),
419 phase_(other
.phase_
),
420 flags_(other
.flags_
) {
421 parameter_copy_storage_
= other
.parameter_copy_storage_
;
423 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
) {
424 arg_values_
[i
] = other
.arg_values_
[i
];
425 arg_names_
[i
] = other
.arg_names_
[i
];
426 arg_types_
[i
] = other
.arg_types_
[i
];
428 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
) {
429 convertable_values_
[i
].reset(
430 const_cast<TraceEvent
*>(&other
)->convertable_values_
[i
].release());
432 convertable_values_
[i
].reset();
437 TraceEvent
& TraceEvent::operator=(const TraceEvent
& other
) {
441 timestamp_
= other
.timestamp_
;
442 thread_timestamp_
= other
.thread_timestamp_
;
444 category_group_enabled_
= other
.category_group_enabled_
;
446 parameter_copy_storage_
= other
.parameter_copy_storage_
;
447 thread_id_
= other
.thread_id_
;
448 phase_
= other
.phase_
;
449 flags_
= other
.flags_
;
451 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
) {
452 arg_values_
[i
] = other
.arg_values_
[i
];
453 arg_names_
[i
] = other
.arg_names_
[i
];
454 arg_types_
[i
] = other
.arg_types_
[i
];
456 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
) {
457 convertable_values_
[i
].reset(
458 const_cast<TraceEvent
*>(&other
)->convertable_values_
[i
].release());
460 convertable_values_
[i
].reset();
466 TraceEvent::~TraceEvent() {
470 void TraceEvent::AppendValueAsJSON(unsigned char type
,
471 TraceEvent::TraceValue value
,
473 std::string::size_type start_pos
;
475 case TRACE_VALUE_TYPE_BOOL
:
476 *out
+= value
.as_bool
? "true" : "false";
478 case TRACE_VALUE_TYPE_UINT
:
479 StringAppendF(out
, "%" PRIu64
, static_cast<uint64
>(value
.as_uint
));
481 case TRACE_VALUE_TYPE_INT
:
482 StringAppendF(out
, "%" PRId64
, static_cast<int64
>(value
.as_int
));
484 case TRACE_VALUE_TYPE_DOUBLE
:
485 StringAppendF(out
, "%f", value
.as_double
);
487 case TRACE_VALUE_TYPE_POINTER
:
488 // JSON only supports double and int numbers.
489 // So as not to lose bits from a 64-bit pointer, output as a hex string.
490 StringAppendF(out
, "\"0x%" PRIx64
"\"", static_cast<uint64
>(
491 reinterpret_cast<intptr_t>(
494 case TRACE_VALUE_TYPE_STRING
:
495 case TRACE_VALUE_TYPE_COPY_STRING
:
497 start_pos
= out
->size();
498 *out
+= value
.as_string
? value
.as_string
: "NULL";
499 // insert backslash before special characters for proper json format.
500 while ((start_pos
= out
->find_first_of("\\\"", start_pos
)) !=
502 out
->insert(start_pos
, 1, '\\');
503 // skip inserted escape character and following character.
509 NOTREACHED() << "Don't know how to print this value";
514 void TraceEvent::AppendAsJSON(std::string
* out
) const {
515 int64 time_int64
= timestamp_
.ToInternalValue();
516 int process_id
= TraceLog::GetInstance()->process_id();
517 // Category group checked at category creation time.
518 DCHECK(!strchr(name_
, '"'));
520 "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
","
521 "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
522 TraceLog::GetCategoryGroupName(category_group_enabled_
),
529 // Output argument names and values, stop at first NULL argument name.
530 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
534 *out
+= arg_names_
[i
];
537 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
538 convertable_values_
[i
]->AppendAsTraceFormat(out
);
540 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], out
);
544 // Output tts if thread_timestamp is valid.
545 if (!thread_timestamp_
.is_null()) {
546 int64 thread_time_int64
= thread_timestamp_
.ToInternalValue();
547 StringAppendF(out
, ",\"tts\":%" PRId64
, thread_time_int64
);
550 // If id_ is set, print it out as a hex string so we don't loose any
551 // bits (it might be a 64-bit pointer).
552 if (flags_
& TRACE_EVENT_FLAG_HAS_ID
)
553 StringAppendF(out
, ",\"id\":\"0x%" PRIx64
"\"", static_cast<uint64
>(id_
));
555 // Instant events also output their scope.
556 if (phase_
== TRACE_EVENT_PHASE_INSTANT
) {
558 switch (flags_
& TRACE_EVENT_FLAG_SCOPE_MASK
) {
559 case TRACE_EVENT_SCOPE_GLOBAL
:
560 scope
= TRACE_EVENT_SCOPE_NAME_GLOBAL
;
563 case TRACE_EVENT_SCOPE_PROCESS
:
564 scope
= TRACE_EVENT_SCOPE_NAME_PROCESS
;
567 case TRACE_EVENT_SCOPE_THREAD
:
568 scope
= TRACE_EVENT_SCOPE_NAME_THREAD
;
571 StringAppendF(out
, ",\"s\":\"%c\"", scope
);
577 void TraceEvent::AppendPrettyPrinted(std::ostringstream
* out
) const {
578 *out
<< name_
<< "[";
579 *out
<< TraceLog::GetCategoryGroupName(category_group_enabled_
);
583 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
586 *out
<< arg_names_
[i
] << ":";
587 std::string value_as_text
;
589 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
590 convertable_values_
[i
]->AppendAsTraceFormat(&value_as_text
);
592 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], &value_as_text
);
594 *out
<< value_as_text
;
600 ////////////////////////////////////////////////////////////////////////////////
604 ////////////////////////////////////////////////////////////////////////////////
606 TraceResultBuffer::OutputCallback
607 TraceResultBuffer::SimpleOutput::GetCallback() {
608 return Bind(&SimpleOutput::Append
, Unretained(this));
611 void TraceResultBuffer::SimpleOutput::Append(
612 const std::string
& json_trace_output
) {
613 json_output
+= json_trace_output
;
616 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
619 TraceResultBuffer::~TraceResultBuffer() {
622 void TraceResultBuffer::SetOutputCallback(
623 const OutputCallback
& json_chunk_callback
) {
624 output_callback_
= json_chunk_callback
;
627 void TraceResultBuffer::Start() {
628 append_comma_
= false;
629 output_callback_
.Run("[");
632 void TraceResultBuffer::AddFragment(const std::string
& trace_fragment
) {
634 output_callback_
.Run(",");
635 append_comma_
= true;
636 output_callback_
.Run(trace_fragment
);
639 void TraceResultBuffer::Finish() {
640 output_callback_
.Run("]");
643 ////////////////////////////////////////////////////////////////////////////////
645 // TraceSamplingThread
647 ////////////////////////////////////////////////////////////////////////////////
648 class TraceBucketData
;
649 typedef base::Callback
<void(TraceBucketData
*)> TraceSampleCallback
;
651 class TraceBucketData
{
653 TraceBucketData(base::subtle::AtomicWord
* bucket
,
655 TraceSampleCallback callback
);
658 TRACE_EVENT_API_ATOMIC_WORD
* bucket
;
659 const char* bucket_name
;
660 TraceSampleCallback callback
;
663 // This object must be created on the IO thread.
664 class TraceSamplingThread
: public PlatformThread::Delegate
{
666 TraceSamplingThread();
667 virtual ~TraceSamplingThread();
669 // Implementation of PlatformThread::Delegate:
670 virtual void ThreadMain() OVERRIDE
;
672 static void DefaultSampleCallback(TraceBucketData
* bucekt_data
);
675 void InstallWaitableEventForSamplingTesting(WaitableEvent
* waitable_event
);
678 friend class TraceLog
;
681 // Not thread-safe. Once the ThreadMain has been called, this can no longer
683 void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
684 const char* const name
,
685 TraceSampleCallback callback
);
686 // Splits a combined "category\0name" into the two component parts.
687 static void ExtractCategoryAndName(const char* combined
,
688 const char** category
,
690 std::vector
<TraceBucketData
> sample_buckets_
;
691 bool thread_running_
;
692 scoped_ptr
<CancellationFlag
> cancellation_flag_
;
693 scoped_ptr
<WaitableEvent
> waitable_event_for_testing_
;
697 TraceSamplingThread::TraceSamplingThread()
698 : thread_running_(false) {
699 cancellation_flag_
.reset(new CancellationFlag
);
702 TraceSamplingThread::~TraceSamplingThread() {
705 void TraceSamplingThread::ThreadMain() {
706 PlatformThread::SetName("Sampling Thread");
707 thread_running_
= true;
708 const int kSamplingFrequencyMicroseconds
= 1000;
709 while (!cancellation_flag_
->IsSet()) {
710 PlatformThread::Sleep(
711 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds
));
713 if (waitable_event_for_testing_
.get())
714 waitable_event_for_testing_
->Signal();
719 void TraceSamplingThread::DefaultSampleCallback(TraceBucketData
* bucket_data
) {
720 TRACE_EVENT_API_ATOMIC_WORD category_and_name
=
721 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data
->bucket
);
722 if (!category_and_name
)
724 const char* const combined
=
725 reinterpret_cast<const char* const>(category_and_name
);
726 const char* category_group
;
728 ExtractCategoryAndName(combined
, &category_group
, &name
);
729 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE
,
730 TraceLog::GetCategoryGroupEnabled(category_group
),
731 name
, 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
734 void TraceSamplingThread::GetSamples() {
735 for (size_t i
= 0; i
< sample_buckets_
.size(); ++i
) {
736 TraceBucketData
* bucket_data
= &sample_buckets_
[i
];
737 bucket_data
->callback
.Run(bucket_data
);
741 void TraceSamplingThread::RegisterSampleBucket(
742 TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
743 const char* const name
,
744 TraceSampleCallback callback
) {
745 DCHECK(!thread_running_
);
746 sample_buckets_
.push_back(TraceBucketData(bucket
, name
, callback
));
750 void TraceSamplingThread::ExtractCategoryAndName(const char* combined
,
751 const char** category
,
753 *category
= combined
;
754 *name
= &combined
[strlen(combined
) + 1];
757 void TraceSamplingThread::Stop() {
758 cancellation_flag_
->Set();
761 void TraceSamplingThread::InstallWaitableEventForSamplingTesting(
762 WaitableEvent
* waitable_event
) {
763 waitable_event_for_testing_
.reset(waitable_event
);
767 TraceBucketData::TraceBucketData(base::subtle::AtomicWord
* bucket
,
769 TraceSampleCallback callback
)
775 TraceBucketData::~TraceBucketData() {
778 ////////////////////////////////////////////////////////////////////////////////
782 ////////////////////////////////////////////////////////////////////////////////
784 class TraceLog::ThreadLocalEventBuffer
785 : public MessageLoop::DestructionObserver
{
787 ThreadLocalEventBuffer(TraceLog
* trace_log
);
788 virtual ~ThreadLocalEventBuffer();
790 void AddEvent(const TraceEvent
& event
, NotificationHelper
* notifier
);
791 void ReportOverhead(const TimeTicks
& event_timestamp
,
792 const TimeTicks
& event_thread_timestamp
);
795 // MessageLoop::DestructionObserver
796 virtual void WillDestroyCurrentMessageLoop() OVERRIDE
;
798 void FlushWhileLocked(NotificationHelper
* notifier
);
800 void CheckThisIsCurrentBuffer() {
801 DCHECK(trace_log_
->thread_local_event_buffer_
.Get() == this);
804 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
805 // as long as the thread exists.
806 TraceLog
* trace_log_
;
807 std::vector
<TraceEvent
> logged_events_
;
811 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer
);
814 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog
* trace_log
)
815 : trace_log_(trace_log
),
817 logged_events_
.reserve(kTraceEventThreadLocalBufferSize
);
819 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
820 // the following message_loop won't be NULL.
821 MessageLoop
* message_loop
= MessageLoop::current();
822 message_loop
->AddDestructionObserver(this);
824 AutoLock
lock(trace_log
->lock_
);
825 trace_log
->thread_message_loops_
.insert(message_loop
);
828 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
829 CheckThisIsCurrentBuffer();
830 MessageLoop::current()->RemoveDestructionObserver(this);
832 // Zero event_count_ happens in either of the following cases:
833 // - no event generated for the thread;
834 // - the thread has no message loop;
835 // - trace_event_overhead is disabled.
837 const char* arg_names
[2] = { "event_count", "average_overhead" };
838 unsigned char arg_types
[2];
839 unsigned long long arg_values
[2];
840 trace_event_internal::SetTraceValue(
841 event_count_
, &arg_types
[0], &arg_values
[0]);
842 trace_event_internal::SetTraceValue(
843 overhead_
.InMillisecondsF() / event_count_
,
844 &arg_types
[1], &arg_values
[1]);
845 logged_events_
.push_back(TraceEvent(
846 static_cast<int>(PlatformThread::CurrentId()),
847 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA
,
848 &g_category_group_enabled
[g_category_metadata
],
849 "trace_event_overhead", trace_event_internal::kNoEventId
,
850 2, arg_names
, arg_types
, arg_values
, NULL
,
851 TRACE_EVENT_FLAG_NONE
));
854 NotificationHelper
notifier(trace_log_
);
856 AutoLock
lock(trace_log_
->lock_
);
857 FlushWhileLocked(¬ifier
);
858 trace_log_
->thread_message_loops_
.erase(MessageLoop::current());
860 trace_log_
->thread_local_event_buffer_
.Set(NULL
);
861 notifier
.SendNotificationIfAny();
864 void TraceLog::ThreadLocalEventBuffer::AddEvent(const TraceEvent
& event
,
865 NotificationHelper
* notifier
) {
866 CheckThisIsCurrentBuffer();
867 logged_events_
.push_back(event
);
868 if (logged_events_
.size() >= kTraceEventThreadLocalBufferSize
) {
869 AutoLock
lock(trace_log_
->lock_
);
870 FlushWhileLocked(notifier
);
874 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
875 const TimeTicks
& event_timestamp
, const TimeTicks
& event_thread_timestamp
) {
876 if (!g_category_group_enabled
[g_category_trace_event_overhead
])
881 TimeTicks::NowFromSystemTraceTime() - trace_log_
->time_offset_
;
882 TimeDelta overhead
= now
- event_timestamp
;
883 if (overhead
.InMicroseconds() >= kOverheadReportThresholdInMicroseconds
) {
884 int thread_id
= static_cast<int>(PlatformThread::CurrentId());
885 // TODO(wangxianzhu): Use X event when it's ready.
886 logged_events_
.push_back(TraceEvent(
887 thread_id
, event_timestamp
, event_thread_timestamp
,
888 TRACE_EVENT_PHASE_BEGIN
,
889 &g_category_group_enabled
[g_category_trace_event_overhead
],
891 0, 0, NULL
, NULL
, NULL
, NULL
, 0));
892 logged_events_
.push_back(TraceEvent(
893 thread_id
, now
, ThreadNow(),
894 TRACE_EVENT_PHASE_END
,
895 &g_category_group_enabled
[g_category_trace_event_overhead
],
897 0, 0, NULL
, NULL
, NULL
, NULL
, 0));
899 overhead_
+= overhead
;
902 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
906 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked(
907 NotificationHelper
* notifier
) {
908 trace_log_
->lock_
.AssertAcquired();
909 for (size_t i
= 0; i
< logged_events_
.size(); ++i
) {
910 trace_log_
->AddEventToMainBufferWhileLocked(logged_events_
[i
]);
912 logged_events_
.resize(0);
913 trace_log_
->CheckIfBufferIsFullWhileLocked(notifier
);
916 TraceLog::NotificationHelper::NotificationHelper(TraceLog
* trace_log
)
917 : trace_log_(trace_log
),
921 TraceLog::NotificationHelper::~NotificationHelper() {
924 void TraceLog::NotificationHelper::AddNotificationWhileLocked(
926 trace_log_
->lock_
.AssertAcquired();
927 if (trace_log_
->notification_callback_
.is_null())
929 if (notification_
== 0)
930 callback_copy_
= trace_log_
->notification_callback_
;
931 notification_
|= notification
;
934 void TraceLog::NotificationHelper::SendNotificationIfAny() {
936 callback_copy_
.Run(notification_
);
940 TraceLog
* TraceLog::GetInstance() {
941 return Singleton
<TraceLog
, LeakySingletonTraits
<TraceLog
> >::get();
945 // Note, if you add more options here you also need to update:
946 // content/browser/devtools/devtools_tracing_handler:TraceOptionsFromString
947 TraceLog::Options
TraceLog::TraceOptionsFromString(const std::string
& options
) {
948 std::vector
<std::string
> split
;
949 base::SplitString(options
, ',', &split
);
951 for (std::vector
<std::string
>::iterator iter
= split
.begin();
954 if (*iter
== kRecordUntilFull
) {
955 ret
|= RECORD_UNTIL_FULL
;
956 } else if (*iter
== kRecordContinuously
) {
957 ret
|= RECORD_CONTINUOUSLY
;
958 } else if (*iter
== kEnableSampling
) {
959 ret
|= ENABLE_SAMPLING
;
961 NOTREACHED(); // Unknown option provided.
964 if (!(ret
& RECORD_UNTIL_FULL
) && !(ret
& RECORD_CONTINUOUSLY
))
965 ret
|= RECORD_UNTIL_FULL
; // Default when no options are specified.
967 return static_cast<Options
>(ret
);
972 num_traces_recorded_(0),
975 dispatching_to_observer_list_(false),
976 process_sort_index_(0),
980 trace_options_(RECORD_UNTIL_FULL
),
981 sampling_thread_handle_(0),
982 category_filter_(CategoryFilter::kDefaultCategoryFilterString
),
984 // Trace is enabled or disabled on one thread while other threads are
985 // accessing the enabled flag. We don't care whether edge-case events are
986 // traced or not, so we allow races on the enabled flag to keep the trace
988 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
989 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
990 // sizeof(g_category_group_enabled),
991 // "trace_event category enabled");
992 for (int i
= 0; i
< MAX_CATEGORY_GROUPS
; ++i
) {
993 ANNOTATE_BENIGN_RACE(&g_category_group_enabled
[i
],
994 "trace_event category enabled");
996 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
999 SetProcessID(static_cast<int>(GetCurrentProcId()));
1001 // NaCl also shouldn't access the command line.
1002 if (CommandLine::InitializedForCurrentProcess() &&
1003 CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole
)) {
1004 std::string category_string
=
1005 CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1006 switches::kTraceToConsole
);
1008 if (category_string
.empty())
1009 category_string
= "*";
1011 SetEnabled(CategoryFilter(category_string
), ECHO_TO_CONSOLE
);
1015 logged_events_
.reset(GetTraceBuffer());
1018 TraceLog::~TraceLog() {
1021 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1022 const char* category_group
) {
1023 TraceLog
* tracelog
= GetInstance();
1025 DCHECK(!g_category_group_enabled
[g_category_already_shutdown
]);
1026 return &g_category_group_enabled
[g_category_already_shutdown
];
1028 return tracelog
->GetCategoryGroupEnabledInternal(category_group
);
1031 const char* TraceLog::GetCategoryGroupName(
1032 const unsigned char* category_group_enabled
) {
1033 // Calculate the index of the category group by finding
1034 // category_group_enabled in g_category_group_enabled array.
1035 uintptr_t category_begin
=
1036 reinterpret_cast<uintptr_t>(g_category_group_enabled
);
1037 uintptr_t category_ptr
= reinterpret_cast<uintptr_t>(category_group_enabled
);
1038 DCHECK(category_ptr
>= category_begin
&&
1039 category_ptr
< reinterpret_cast<uintptr_t>(
1040 g_category_group_enabled
+ MAX_CATEGORY_GROUPS
)) <<
1041 "out of bounds category pointer";
1042 uintptr_t category_index
=
1043 (category_ptr
- category_begin
) / sizeof(g_category_group_enabled
[0]);
1044 return g_category_groups
[category_index
];
1047 void TraceLog::UpdateCategoryGroupEnabledFlag(int category_index
) {
1048 bool is_enabled
= enable_count_
&& category_filter_
.IsCategoryGroupEnabled(
1049 g_category_groups
[category_index
]);
1050 SetCategoryGroupEnabled(category_index
, is_enabled
);
1053 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1054 for (int i
= 0; i
< g_category_index
; i
++)
1055 UpdateCategoryGroupEnabledFlag(i
);
1058 void TraceLog::SetCategoryGroupEnabled(int category_index
, bool is_enabled
) {
1059 g_category_group_enabled
[category_index
] =
1060 is_enabled
? CATEGORY_GROUP_ENABLED
: 0;
1062 #if defined(OS_ANDROID)
1063 ApplyATraceEnabledFlag(&g_category_group_enabled
[category_index
]);
1067 bool TraceLog::IsCategoryGroupEnabled(
1068 const unsigned char* category_group_enabled
) {
1069 // On Android, ATrace and normal trace can be enabled independently.
1070 // This function checks if the normal trace is enabled.
1071 return *category_group_enabled
& CATEGORY_GROUP_ENABLED
;
1074 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1075 const char* category_group
) {
1076 DCHECK(!strchr(category_group
, '"')) <<
1077 "Category groups may not contain double quote";
1078 AutoLock
lock(lock_
);
1080 unsigned char* category_group_enabled
= NULL
;
1081 // Search for pre-existing category group.
1082 for (int i
= 0; i
< g_category_index
; i
++) {
1083 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1084 category_group_enabled
= &g_category_group_enabled
[i
];
1089 if (!category_group_enabled
) {
1090 // Create a new category group
1091 DCHECK(g_category_index
< MAX_CATEGORY_GROUPS
) <<
1092 "must increase MAX_CATEGORY_GROUPS";
1093 if (g_category_index
< MAX_CATEGORY_GROUPS
) {
1094 int new_index
= g_category_index
++;
1095 // Don't hold on to the category_group pointer, so that we can create
1096 // category groups with strings not known at compile time (this is
1097 // required by SetWatchEvent).
1098 const char* new_group
= strdup(category_group
);
1099 ANNOTATE_LEAKING_OBJECT_PTR(new_group
);
1100 g_category_groups
[new_index
] = new_group
;
1101 DCHECK(!g_category_group_enabled
[new_index
]);
1102 // Note that if both included and excluded patterns in the
1103 // CategoryFilter are empty, we exclude nothing,
1104 // thereby enabling this category group.
1105 UpdateCategoryGroupEnabledFlag(new_index
);
1106 category_group_enabled
= &g_category_group_enabled
[new_index
];
1108 category_group_enabled
=
1109 &g_category_group_enabled
[g_category_categories_exhausted
];
1112 return category_group_enabled
;
1115 void TraceLog::GetKnownCategoryGroups(
1116 std::vector
<std::string
>* category_groups
) {
1117 AutoLock
lock(lock_
);
1118 category_groups
->push_back(
1119 g_category_groups
[g_category_trace_event_overhead
]);
1120 for (int i
= g_num_builtin_categories
; i
< g_category_index
; i
++)
1121 category_groups
->push_back(g_category_groups
[i
]);
1124 void TraceLog::SetEnabled(const CategoryFilter
& category_filter
,
1126 std::vector
<EnabledStateObserver
*> observer_list
;
1128 AutoLock
lock(lock_
);
1130 // Can't enable tracing when Flush() is in progress.
1131 DCHECK(!flush_message_loop_proxy_
.get());
1133 Options old_options
= trace_options();
1135 if (enable_count_
++ > 0) {
1136 if (options
!= old_options
) {
1137 DLOG(ERROR
) << "Attemting to re-enable tracing with a different "
1138 << "set of options.";
1141 category_filter_
.Merge(category_filter
);
1142 UpdateCategoryGroupEnabledFlags();
1146 if (options
!= old_options
) {
1147 subtle::NoBarrier_Store(&trace_options_
, options
);
1148 logged_events_
.reset(GetTraceBuffer());
1149 subtle::NoBarrier_Store(&buffer_is_full_
, 0);
1152 if (dispatching_to_observer_list_
) {
1154 "Cannot manipulate TraceLog::Enabled state from an observer.";
1158 num_traces_recorded_
++;
1160 category_filter_
= CategoryFilter(category_filter
);
1161 UpdateCategoryGroupEnabledFlags();
1163 if (options
& ENABLE_SAMPLING
) {
1164 sampling_thread_
.reset(new TraceSamplingThread
);
1165 sampling_thread_
->RegisterSampleBucket(
1168 Bind(&TraceSamplingThread::DefaultSampleCallback
));
1169 sampling_thread_
->RegisterSampleBucket(
1172 Bind(&TraceSamplingThread::DefaultSampleCallback
));
1173 sampling_thread_
->RegisterSampleBucket(
1176 Bind(&TraceSamplingThread::DefaultSampleCallback
));
1177 if (!PlatformThread::Create(
1178 0, sampling_thread_
.get(), &sampling_thread_handle_
)) {
1179 DCHECK(false) << "failed to create thread";
1183 dispatching_to_observer_list_
= true;
1184 observer_list
= enabled_state_observer_list_
;
1186 // Notify observers outside the lock in case they trigger trace events.
1187 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1188 observer_list
[i
]->OnTraceLogEnabled();
1191 AutoLock
lock(lock_
);
1192 dispatching_to_observer_list_
= false;
1196 const CategoryFilter
& TraceLog::GetCurrentCategoryFilter() {
1197 AutoLock
lock(lock_
);
1198 DCHECK(enable_count_
> 0);
1199 return category_filter_
;
1202 void TraceLog::SetDisabled() {
1203 std::vector
<EnabledStateObserver
*> observer_list
;
1205 AutoLock
lock(lock_
);
1206 DCHECK(enable_count_
> 0);
1208 if (--enable_count_
!= 0)
1211 if (dispatching_to_observer_list_
) {
1213 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1217 if (sampling_thread_
.get()) {
1218 // Stop the sampling thread.
1219 sampling_thread_
->Stop();
1221 PlatformThread::Join(sampling_thread_handle_
);
1223 sampling_thread_handle_
= PlatformThreadHandle();
1224 sampling_thread_
.reset();
1227 category_filter_
.Clear();
1228 subtle::NoBarrier_Store(&watch_category_
, 0);
1229 watch_event_name_
= "";
1230 UpdateCategoryGroupEnabledFlags();
1231 AddMetadataEvents();
1233 dispatching_to_observer_list_
= true;
1234 observer_list
= enabled_state_observer_list_
;
1237 // Dispatch to observers outside the lock in case the observer triggers a
1239 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1240 observer_list
[i
]->OnTraceLogDisabled();
1243 AutoLock
lock(lock_
);
1244 dispatching_to_observer_list_
= false;
1248 int TraceLog::GetNumTracesRecorded() {
1249 AutoLock
lock(lock_
);
1250 if (enable_count_
== 0)
1252 return num_traces_recorded_
;
1255 void TraceLog::AddEnabledStateObserver(EnabledStateObserver
* listener
) {
1256 enabled_state_observer_list_
.push_back(listener
);
1259 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver
* listener
) {
1260 std::vector
<EnabledStateObserver
*>::iterator it
=
1261 std::find(enabled_state_observer_list_
.begin(),
1262 enabled_state_observer_list_
.end(),
1264 if (it
!= enabled_state_observer_list_
.end())
1265 enabled_state_observer_list_
.erase(it
);
1268 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver
* listener
) const {
1269 std::vector
<EnabledStateObserver
*>::const_iterator it
=
1270 std::find(enabled_state_observer_list_
.begin(),
1271 enabled_state_observer_list_
.end(),
1273 return it
!= enabled_state_observer_list_
.end();
1276 float TraceLog::GetBufferPercentFull() const {
1277 return static_cast<float>(static_cast<double>(logged_events_
->Size()) /
1278 logged_events_
->Capacity());
1281 void TraceLog::SetNotificationCallback(
1282 const TraceLog::NotificationCallback
& cb
) {
1283 AutoLock
lock(lock_
);
1284 notification_callback_
= cb
;
1287 TraceBuffer
* TraceLog::GetTraceBuffer() {
1288 Options options
= trace_options();
1289 if (options
& RECORD_CONTINUOUSLY
)
1290 return new TraceBufferRingBuffer();
1291 else if (options
& ECHO_TO_CONSOLE
)
1292 return new TraceBufferDiscardsEvents();
1293 return new TraceBufferVector();
1296 void TraceLog::AddEventToMainBufferWhileLocked(const TraceEvent
& trace_event
) {
1297 // Don't check buffer_is_full_ because we want the remaining thread-local
1298 // events to be flushed into the main buffer with this method, otherwise
1299 // we may lose some early events of a thread that generates events sparsely.
1300 lock_
.AssertAcquired();
1301 logged_events_
->AddEvent(trace_event
);
1304 void TraceLog::CheckIfBufferIsFullWhileLocked(NotificationHelper
* notifier
) {
1305 lock_
.AssertAcquired();
1306 if (!subtle::NoBarrier_Load(&buffer_is_full_
) && logged_events_
->IsFull()) {
1307 subtle::NoBarrier_Store(&buffer_is_full_
,
1308 static_cast<subtle::AtomicWord
>(1));
1309 notifier
->AddNotificationWhileLocked(TRACE_BUFFER_FULL
);
1313 void TraceLog::SetEventCallback(EventCallback cb
) {
1314 subtle::NoBarrier_Store(&event_callback_
,
1315 reinterpret_cast<subtle::AtomicWord
>(cb
));
1318 // Flush() works as the following:
1319 // 1. Flush() is called in threadA whose message loop is saved in
1320 // flush_message_loop_proxy_;
1321 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1322 // loop to flush the thread local buffers; otherwise finish the flush;
1323 // 3. FlushCurrentThread() deletes the thread local event buffer:
1324 // - The last batch of events of the thread are flushed into the main buffer;
1325 // - The message loop will be removed from thread_message_loops_;
1326 // If this is the last message loop, finish the flush;
1327 // 4. If any thread hasn't finish its flush in time, finish the flush.
1328 void TraceLog::Flush(const TraceLog::OutputCallback
& cb
) {
1330 // Can't flush when tracing is enabled because otherwise PostTask would
1331 // - it generates more trace events;
1332 // - it deschedules the calling thread on some platforms causing inaccurate
1333 // timing of the trace events.
1334 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
1336 cb
.Run(empty_result
, false);
1337 LOG(WARNING
) << "Ignored TraceLog::Flush called when tracing is enabled";
1343 AutoLock
lock(lock_
);
1344 flush_count
= ++flush_count_
;
1345 DCHECK(!flush_message_loop_proxy_
.get());
1346 flush_message_loop_proxy_
= MessageLoopProxy::current();
1347 DCHECK(!thread_message_loops_
.size() || flush_message_loop_proxy_
.get());
1348 flush_output_callback_
= cb
;
1350 if (thread_message_loops_
.size()) {
1351 for (hash_set
<MessageLoop
*>::const_iterator it
=
1352 thread_message_loops_
.begin();
1353 it
!= thread_message_loops_
.end(); ++it
) {
1356 Bind(&TraceLog::FlushCurrentThread
, Unretained(this), flush_count
));
1358 flush_message_loop_proxy_
->PostDelayedTask(
1360 Bind(&TraceLog::OnFlushTimeout
, Unretained(this), flush_count
),
1361 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs
));
1366 FinishFlush(flush_count
);
1369 void TraceLog::FinishFlush(int flush_count
) {
1370 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1371 OutputCallback flush_output_callback
;
1374 AutoLock
lock(lock_
);
1375 if (flush_count
!= flush_count_
)
1378 previous_logged_events
.swap(logged_events_
);
1379 logged_events_
.reset(GetTraceBuffer());
1380 subtle::NoBarrier_Store(&buffer_is_full_
, 0);
1381 flush_message_loop_proxy_
= NULL
;
1382 flush_output_callback
= flush_output_callback_
;
1383 flush_output_callback_
.Reset();
1386 if (flush_output_callback
.is_null())
1389 bool has_more_events
= previous_logged_events
->HasMoreEvents();
1390 // The callback need to be called at least once even if there is no events
1391 // to let the caller know the completion of flush.
1393 scoped_refptr
<RefCountedString
> json_events_str_ptr
=
1394 new RefCountedString();
1396 for (size_t i
= 0; has_more_events
&& i
< kTraceEventBatchSize
; ++i
) {
1398 *(&(json_events_str_ptr
->data())) += ",";
1400 previous_logged_events
->NextEvent().AppendAsJSON(
1401 &(json_events_str_ptr
->data()));
1403 has_more_events
= previous_logged_events
->HasMoreEvents();
1406 flush_output_callback
.Run(json_events_str_ptr
, has_more_events
);
1407 } while (has_more_events
);
1410 // Run in each thread holding a local event buffer.
1411 void TraceLog::FlushCurrentThread(int flush_count
) {
1413 AutoLock
lock(lock_
);
1414 if (flush_count
!= flush_count_
|| !flush_message_loop_proxy_
) {
1415 // This is late. The corresponding flush has finished.
1420 delete thread_local_event_buffer_
.Get();
1423 AutoLock
lock(lock_
);
1424 if (flush_count
!= flush_count_
|| !flush_message_loop_proxy_
||
1425 thread_message_loops_
.size())
1428 flush_message_loop_proxy_
->PostTask(
1430 Bind(&TraceLog::FinishFlush
, Unretained(this), flush_count
));
1434 void TraceLog::OnFlushTimeout(int flush_count
) {
1436 AutoLock
lock(lock_
);
1437 if (flush_count
!= flush_count_
|| !flush_message_loop_proxy_
) {
1438 // Flush has finished before timeout.
1442 LOG(WARNING
) << thread_message_loops_
.size() << " threads haven't finished"
1443 << " flush in time. Discarding remaining events of them";
1445 FinishFlush(flush_count
);
1448 void TraceLog::AddTraceEvent(
1450 const unsigned char* category_group_enabled
,
1452 unsigned long long id
,
1454 const char** arg_names
,
1455 const unsigned char* arg_types
,
1456 const unsigned long long* arg_values
,
1457 scoped_ptr
<ConvertableToTraceFormat
> convertable_values
[],
1458 unsigned char flags
) {
1459 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1460 base::TimeTicks now
= base::TimeTicks::NowFromSystemTraceTime();
1461 AddTraceEventWithThreadIdAndTimestamp(phase
, category_group_enabled
, name
, id
,
1462 thread_id
, now
, num_args
, arg_names
,
1463 arg_types
, arg_values
,
1464 convertable_values
, flags
);
1467 void TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1469 const unsigned char* category_group_enabled
,
1471 unsigned long long id
,
1473 const TimeTicks
& timestamp
,
1475 const char** arg_names
,
1476 const unsigned char* arg_types
,
1477 const unsigned long long* arg_values
,
1478 scoped_ptr
<ConvertableToTraceFormat
> convertable_values
[],
1479 unsigned char flags
) {
1482 if (flags
& TRACE_EVENT_FLAG_MANGLE_ID
)
1483 id
^= process_id_hash_
;
1485 #if defined(OS_ANDROID)
1486 SendToATrace(phase
, GetCategoryGroupName(category_group_enabled
), name
, id
,
1487 num_args
, arg_names
, arg_types
, arg_values
, convertable_values
,
1491 if (!IsCategoryGroupEnabled(category_group_enabled
))
1494 TimeTicks now
= timestamp
- time_offset_
;
1495 TimeTicks thread_now
= ThreadNow();
1497 NotificationHelper
notifier(this);
1499 ThreadLocalEventBuffer
* thread_local_event_buffer
= NULL
;
1500 // A ThreadLocalEventBuffer needs the message loop
1501 // - to know when the thread exits;
1502 // - to handle the final flush.
1503 // For a thread without a message loop, the trace events will be added into
1504 // the main buffer directly.
1505 if (MessageLoop::current()) {
1506 thread_local_event_buffer
= thread_local_event_buffer_
.Get();
1507 if (!thread_local_event_buffer
) {
1508 thread_local_event_buffer
= new ThreadLocalEventBuffer(this);
1509 thread_local_event_buffer_
.Set(thread_local_event_buffer
);
1513 // Check and update the current thread name only if the event is for the
1514 // current thread to avoid locks in most cases.
1515 if (thread_id
== static_cast<int>(PlatformThread::CurrentId())) {
1516 const char* new_name
= ThreadIdNameManager::GetInstance()->
1518 // Check if the thread name has been set or changed since the previous
1519 // call (if any), but don't bother if the new name is empty. Note this will
1520 // not detect a thread name change within the same char* buffer address: we
1521 // favor common case performance over corner case correctness.
1522 if (new_name
!= g_current_thread_name
.Get().Get() &&
1523 new_name
&& *new_name
) {
1524 g_current_thread_name
.Get().Set(new_name
);
1526 AutoLock
lock(lock_
);
1527 hash_map
<int, std::string
>::iterator existing_name
=
1528 thread_names_
.find(thread_id
);
1529 if (existing_name
== thread_names_
.end()) {
1530 // This is a new thread id, and a new name.
1531 thread_names_
[thread_id
] = new_name
;
1533 // This is a thread id that we've seen before, but potentially with a
1535 std::vector
<StringPiece
> existing_names
;
1536 Tokenize(existing_name
->second
, ",", &existing_names
);
1537 bool found
= std::find(existing_names
.begin(),
1538 existing_names
.end(),
1539 new_name
) != existing_names
.end();
1541 existing_name
->second
.push_back(',');
1542 existing_name
->second
.append(new_name
);
1548 if (!subtle::NoBarrier_Load(&buffer_is_full_
)) {
1549 TraceEvent
trace_event(thread_id
, now
, thread_now
, phase
,
1550 category_group_enabled
, name
, id
,
1551 num_args
, arg_names
, arg_types
, arg_values
,
1552 convertable_values
, flags
);
1554 if (thread_local_event_buffer
) {
1555 thread_local_event_buffer
->AddEvent(trace_event
, ¬ifier
);
1557 AutoLock
lock(lock_
);
1558 AddEventToMainBufferWhileLocked(trace_event
);
1559 CheckIfBufferIsFullWhileLocked(¬ifier
);
1562 if (trace_options() & ECHO_TO_CONSOLE
) {
1563 AutoLock
lock(lock_
);
1566 if (phase
== TRACE_EVENT_PHASE_END
) {
1567 duration
= timestamp
- thread_event_start_times_
[thread_id
].top();
1568 thread_event_start_times_
[thread_id
].pop();
1571 std::string thread_name
= thread_names_
[thread_id
];
1572 if (thread_colors_
.find(thread_name
) == thread_colors_
.end())
1573 thread_colors_
[thread_name
] = (thread_colors_
.size() % 6) + 1;
1575 std::ostringstream log
;
1576 log
<< base::StringPrintf("%s: \x1b[0;3%dm",
1577 thread_name
.c_str(),
1578 thread_colors_
[thread_name
]);
1581 if (thread_event_start_times_
.find(thread_id
) !=
1582 thread_event_start_times_
.end())
1583 depth
= thread_event_start_times_
[thread_id
].size();
1585 for (size_t i
= 0; i
< depth
; ++i
)
1588 trace_event
.AppendPrettyPrinted(&log
);
1589 if (phase
== TRACE_EVENT_PHASE_END
)
1590 log
<< base::StringPrintf(" (%.3f ms)", duration
.InMillisecondsF());
1592 LOG(ERROR
) << log
.str() << "\x1b[0;m";
1594 if (phase
== TRACE_EVENT_PHASE_BEGIN
)
1595 thread_event_start_times_
[thread_id
].push(timestamp
);
1599 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
1600 &watch_category_
)) == category_group_enabled
) {
1601 AutoLock
lock(lock_
);
1602 if (watch_event_name_
== name
)
1603 notifier
.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION
);
1606 notifier
.SendNotificationIfAny();
1607 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
1608 subtle::NoBarrier_Load(&event_callback_
));
1609 if (event_callback
) {
1610 event_callback(phase
, category_group_enabled
, name
, id
,
1611 num_args
, arg_names
, arg_types
, arg_values
,
1615 if (thread_local_event_buffer
)
1616 thread_local_event_buffer
->ReportOverhead(now
, thread_now
);
1619 void TraceLog::AddTraceEventEtw(char phase
,
1622 const char* extra
) {
1624 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
1626 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
1627 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
1630 void TraceLog::AddTraceEventEtw(char phase
,
1633 const std::string
& extra
)
1636 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
1638 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
1639 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
1642 void TraceLog::SetWatchEvent(const std::string
& category_name
,
1643 const std::string
& event_name
) {
1644 const unsigned char* category
= GetCategoryGroupEnabled(
1645 category_name
.c_str());
1646 size_t notify_count
= 0;
1648 AutoLock
lock(lock_
);
1649 subtle::NoBarrier_Store(&watch_category_
,
1650 reinterpret_cast<subtle::AtomicWord
>(category
));
1651 watch_event_name_
= event_name
;
1653 // First, search existing events for watch event because we want to catch
1654 // it even if it has already occurred.
1655 notify_count
= logged_events_
->CountEnabledByName(category
, event_name
);
1658 // Send notification for each event found.
1659 for (size_t i
= 0; i
< notify_count
; ++i
) {
1660 NotificationHelper
notifier(this);
1662 notifier
.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION
);
1664 notifier
.SendNotificationIfAny();
1668 void TraceLog::CancelWatchEvent() {
1669 AutoLock
lock(lock_
);
1670 subtle::NoBarrier_Store(&watch_category_
, 0);
1671 watch_event_name_
= "";
1676 template <typename T
>
1677 void AddMetadataEventToBuffer(
1678 TraceBuffer
* logged_events
,
1680 const char* metadata_name
, const char* arg_name
,
1683 unsigned char arg_type
;
1684 unsigned long long arg_value
;
1685 trace_event_internal::SetTraceValue(value
, &arg_type
, &arg_value
);
1686 logged_events
->AddEvent(TraceEvent(
1688 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA
,
1689 &g_category_group_enabled
[g_category_metadata
],
1690 metadata_name
, trace_event_internal::kNoEventId
,
1691 num_args
, &arg_name
, &arg_type
, &arg_value
, NULL
,
1692 TRACE_EVENT_FLAG_NONE
));
1697 void TraceLog::AddMetadataEvents() {
1698 lock_
.AssertAcquired();
1700 int current_thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1701 if (process_sort_index_
!= 0) {
1702 AddMetadataEventToBuffer(logged_events_
.get(),
1704 "process_sort_index", "sort_index",
1705 process_sort_index_
);
1708 if (process_name_
.size()) {
1709 AddMetadataEventToBuffer(logged_events_
.get(),
1711 "process_name", "name",
1715 if (process_labels_
.size() > 0) {
1716 std::vector
<std::string
> labels
;
1717 for(base::hash_map
<int, std::string
>::iterator it
= process_labels_
.begin();
1718 it
!= process_labels_
.end();
1720 labels
.push_back(it
->second
);
1722 AddMetadataEventToBuffer(logged_events_
.get(),
1724 "process_labels", "labels",
1725 JoinString(labels
, ','));
1728 // Thread sort indices.
1729 for(hash_map
<int, int>::iterator it
= thread_sort_indices_
.begin();
1730 it
!= thread_sort_indices_
.end();
1732 if (it
->second
== 0)
1734 AddMetadataEventToBuffer(logged_events_
.get(),
1736 "thread_sort_index", "sort_index",
1741 for(hash_map
<int, std::string
>::iterator it
= thread_names_
.begin();
1742 it
!= thread_names_
.end();
1744 if (it
->second
.empty())
1746 AddMetadataEventToBuffer(logged_events_
.get(),
1748 "thread_name", "name",
1753 void TraceLog::InstallWaitableEventForSamplingTesting(
1754 WaitableEvent
* waitable_event
) {
1755 sampling_thread_
->InstallWaitableEventForSamplingTesting(waitable_event
);
1758 void TraceLog::DeleteForTesting() {
1759 DeleteTraceLogForTesting::Delete();
1762 void TraceLog::SetProcessID(int process_id
) {
1763 process_id_
= process_id
;
1764 // Create a FNV hash from the process ID for XORing.
1765 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
1766 unsigned long long offset_basis
= 14695981039346656037ull;
1767 unsigned long long fnv_prime
= 1099511628211ull;
1768 unsigned long long pid
= static_cast<unsigned long long>(process_id_
);
1769 process_id_hash_
= (offset_basis
^ pid
) * fnv_prime
;
1772 void TraceLog::SetProcessSortIndex(int sort_index
) {
1773 AutoLock
lock(lock_
);
1774 process_sort_index_
= sort_index
;
1777 void TraceLog::SetProcessName(const std::string
& process_name
) {
1778 AutoLock
lock(lock_
);
1779 process_name_
= process_name
;
1782 void TraceLog::UpdateProcessLabel(
1783 int label_id
, const std::string
& current_label
) {
1784 if(!current_label
.length())
1785 return RemoveProcessLabel(label_id
);
1787 AutoLock
lock(lock_
);
1788 process_labels_
[label_id
] = current_label
;
1791 void TraceLog::RemoveProcessLabel(int label_id
) {
1792 AutoLock
lock(lock_
);
1793 base::hash_map
<int, std::string
>::iterator it
= process_labels_
.find(
1795 if (it
== process_labels_
.end())
1798 process_labels_
.erase(it
);
1801 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id
, int sort_index
) {
1802 AutoLock
lock(lock_
);
1803 thread_sort_indices_
[static_cast<int>(thread_id
)] = sort_index
;
1806 void TraceLog::SetTimeOffset(TimeDelta offset
) {
1807 time_offset_
= offset
;
1810 size_t TraceLog::GetObserverCountForTest() const {
1811 return enabled_state_observer_list_
.size();
1814 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
1815 const std::string
& str
) {
1816 return str
.empty() ||
1818 str
.at(str
.length() - 1) == ' ';
1821 bool CategoryFilter::DoesCategoryGroupContainCategory(
1822 const char* category_group
,
1823 const char* category
) const {
1825 CStringTokenizer
category_group_tokens(category_group
,
1826 category_group
+ strlen(category_group
), ",");
1827 while (category_group_tokens
.GetNext()) {
1828 std::string category_group_token
= category_group_tokens
.token();
1829 // Don't allow empty tokens, nor tokens with leading or trailing space.
1830 DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
1831 category_group_token
))
1832 << "Disallowed category string";
1833 if (MatchPattern(category_group_token
.c_str(), category
))
1839 CategoryFilter::CategoryFilter(const std::string
& filter_string
) {
1840 if (!filter_string
.empty())
1841 Initialize(filter_string
);
1843 Initialize(CategoryFilter::kDefaultCategoryFilterString
);
1846 CategoryFilter::CategoryFilter(const CategoryFilter
& cf
)
1847 : included_(cf
.included_
),
1848 disabled_(cf
.disabled_
),
1849 excluded_(cf
.excluded_
) {
1852 CategoryFilter::~CategoryFilter() {
1855 CategoryFilter
& CategoryFilter::operator=(const CategoryFilter
& rhs
) {
1859 included_
= rhs
.included_
;
1860 disabled_
= rhs
.disabled_
;
1861 excluded_
= rhs
.excluded_
;
1865 void CategoryFilter::Initialize(const std::string
& filter_string
) {
1866 // Tokenize list of categories, delimited by ','.
1867 StringTokenizer
tokens(filter_string
, ",");
1868 // Add each token to the appropriate list (included_,excluded_).
1869 while (tokens
.GetNext()) {
1870 std::string category
= tokens
.token();
1871 // Ignore empty categories.
1872 if (category
.empty())
1874 // Excluded categories start with '-'.
1875 if (category
.at(0) == '-') {
1876 // Remove '-' from category string.
1877 category
= category
.substr(1);
1878 excluded_
.push_back(category
);
1879 } else if (category
.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
1880 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
1881 disabled_
.push_back(category
);
1883 included_
.push_back(category
);
1888 void CategoryFilter::WriteString(const StringList
& values
,
1890 bool included
) const {
1891 bool prepend_comma
= !out
->empty();
1893 for (StringList::const_iterator ci
= values
.begin();
1894 ci
!= values
.end(); ++ci
) {
1895 if (token_cnt
> 0 || prepend_comma
)
1896 StringAppendF(out
, ",");
1897 StringAppendF(out
, "%s%s", (included
? "" : "-"), ci
->c_str());
1902 std::string
CategoryFilter::ToString() const {
1903 std::string filter_string
;
1904 WriteString(included_
, &filter_string
, true);
1905 WriteString(disabled_
, &filter_string
, true);
1906 WriteString(excluded_
, &filter_string
, false);
1907 return filter_string
;
1910 bool CategoryFilter::IsCategoryGroupEnabled(
1911 const char* category_group_name
) const {
1912 // TraceLog should call this method only as part of enabling/disabling
1914 StringList::const_iterator ci
;
1916 // Check the disabled- filters and the disabled-* wildcard first so that a
1917 // "*" filter does not include the disabled.
1918 for (ci
= disabled_
.begin(); ci
!= disabled_
.end(); ++ci
) {
1919 if (DoesCategoryGroupContainCategory(category_group_name
, ci
->c_str()))
1922 if (DoesCategoryGroupContainCategory(category_group_name
,
1923 TRACE_DISABLED_BY_DEFAULT("*")))
1926 for (ci
= included_
.begin(); ci
!= included_
.end(); ++ci
) {
1927 if (DoesCategoryGroupContainCategory(category_group_name
, ci
->c_str()))
1931 for (ci
= excluded_
.begin(); ci
!= excluded_
.end(); ++ci
) {
1932 if (DoesCategoryGroupContainCategory(category_group_name
, ci
->c_str()))
1935 // If the category group is not excluded, and there are no included patterns
1936 // we consider this pattern enabled.
1937 return included_
.empty();
1940 bool CategoryFilter::HasIncludedPatterns() const {
1941 return !included_
.empty();
1944 void CategoryFilter::Merge(const CategoryFilter
& nested_filter
) {
1945 // Keep included patterns only if both filters have an included entry.
1946 // Otherwise, one of the filter was specifying "*" and we want to honour the
1948 if (HasIncludedPatterns() && nested_filter
.HasIncludedPatterns()) {
1949 included_
.insert(included_
.end(),
1950 nested_filter
.included_
.begin(),
1951 nested_filter
.included_
.end());
1956 disabled_
.insert(disabled_
.end(),
1957 nested_filter
.disabled_
.begin(),
1958 nested_filter
.disabled_
.end());
1959 excluded_
.insert(excluded_
.end(),
1960 nested_filter
.excluded_
.begin(),
1961 nested_filter
.excluded_
.end());
1964 void CategoryFilter::Clear() {
1970 } // namespace debug
1973 namespace trace_event_internal
{
1975 ScopedTrace::ScopedTrace(
1976 TRACE_EVENT_API_ATOMIC_WORD
* event_uid
, const char* name
) {
1977 category_group_enabled_
=
1978 reinterpret_cast<const unsigned char*>(TRACE_EVENT_API_ATOMIC_LOAD(
1980 if (!category_group_enabled_
) {
1981 category_group_enabled_
= TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("gpu");
1982 TRACE_EVENT_API_ATOMIC_STORE(
1984 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD
>(category_group_enabled_
));
1986 if (*category_group_enabled_
) {
1988 TRACE_EVENT_API_ADD_TRACE_EVENT(
1989 TRACE_EVENT_PHASE_BEGIN
, // phase
1990 category_group_enabled_
, // category enabled
1997 NULL
, // convertable_values
1998 TRACE_EVENT_FLAG_NONE
); // flags
2000 category_group_enabled_
= NULL
;
2005 ScopedTrace::~ScopedTrace() {
2006 if (category_group_enabled_
&& *category_group_enabled_
) {
2007 TRACE_EVENT_API_ADD_TRACE_EVENT(
2008 TRACE_EVENT_PHASE_END
, // phase
2009 category_group_enabled_
, // category enabled
2016 NULL
, // convertable values
2017 TRACE_EVENT_FLAG_NONE
); // flags
2021 } // namespace trace_event_internal