1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/trace_event_impl.h"
10 #include "base/base_switches.h"
11 #include "base/bind.h"
12 #include "base/command_line.h"
13 #include "base/debug/leak_annotations.h"
14 #include "base/format_macros.h"
15 #include "base/json/string_escape.h"
16 #include "base/lazy_instance.h"
17 #include "base/memory/singleton.h"
18 #include "base/message_loop/message_loop.h"
19 #include "base/process/process_metrics.h"
20 #include "base/stl_util.h"
21 #include "base/strings/string_number_conversions.h"
22 #include "base/strings/string_split.h"
23 #include "base/strings/string_tokenizer.h"
24 #include "base/strings/string_util.h"
25 #include "base/strings/stringprintf.h"
26 #include "base/strings/utf_string_conversions.h"
27 #include "base/synchronization/cancellation_flag.h"
28 #include "base/synchronization/waitable_event.h"
29 #include "base/sys_info.h"
30 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
31 #include "base/threading/platform_thread.h"
32 #include "base/threading/thread_id_name_manager.h"
33 #include "base/threading/worker_pool.h"
34 #include "base/time/time.h"
35 #include "base/trace_event/trace_event.h"
36 #include "base/trace_event/trace_event_synthetic_delay.h"
39 #include "base/trace_event/trace_event_etw_export_win.h"
40 #include "base/trace_event/trace_event_win.h"
43 class DeleteTraceLogForTesting
{
45 static void Delete() {
46 Singleton
<base::trace_event::TraceLog
,
47 LeakySingletonTraits
<base::trace_event::TraceLog
>>::OnExit(0);
51 // The thread buckets for the sampling profiler.
52 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state
[3];
55 namespace trace_event
{
59 // The overhead of TraceEvent above this threshold will be reported in the
61 const int kOverheadReportThresholdInMicroseconds
= 50;
63 // String options that can be used to initialize TraceOptions.
64 const char kRecordUntilFull
[] = "record-until-full";
65 const char kRecordContinuously
[] = "record-continuously";
66 const char kRecordAsMuchAsPossible
[] = "record-as-much-as-possible";
67 const char kTraceToConsole
[] = "trace-to-console";
68 const char kEnableSampling
[] = "enable-sampling";
69 const char kEnableSystrace
[] = "enable-systrace";
71 // Controls the number of trace events we will buffer in-memory
72 // before throwing them away.
73 const size_t kTraceBufferChunkSize
= TraceBufferChunk::kTraceBufferChunkSize
;
74 const size_t kTraceEventVectorBigBufferChunks
=
75 512000000 / kTraceBufferChunkSize
;
76 const size_t kTraceEventVectorBufferChunks
= 256000 / kTraceBufferChunkSize
;
77 const size_t kTraceEventRingBufferChunks
= kTraceEventVectorBufferChunks
/ 4;
78 const size_t kTraceEventBufferSizeInBytes
= 100 * 1024;
79 // Can store results for 30 seconds with 1 ms sampling interval.
80 const size_t kMonitorTraceEventBufferChunks
= 30000 / kTraceBufferChunkSize
;
81 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
82 const size_t kEchoToConsoleTraceEventBufferChunks
= 256;
84 const int kThreadFlushTimeoutMs
= 3000;
87 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
88 const char kEchoToConsoleCategoryFilter
[] = "-ipc,-task";
91 const char kSyntheticDelayCategoryFilterPrefix
[] = "DELAY(";
93 #define MAX_CATEGORY_GROUPS 100
95 // Parallel arrays g_category_groups and g_category_group_enabled are separate
96 // so that a pointer to a member of g_category_group_enabled can be easily
97 // converted to an index into g_category_groups. This allows macros to deal
98 // only with char enabled pointers from g_category_group_enabled, and we can
99 // convert internally to determine the category name from the char enabled
101 const char* g_category_groups
[MAX_CATEGORY_GROUPS
] = {
103 "tracing already shutdown",
104 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
106 // For reporting trace_event overhead. For thread local event buffers only.
107 "trace_event_overhead"};
109 // The enabled flag is char instead of bool so that the API can be used from C.
110 unsigned char g_category_group_enabled
[MAX_CATEGORY_GROUPS
] = { 0 };
111 // Indexes here have to match the g_category_groups array indexes above.
112 const int g_category_already_shutdown
= 1;
113 const int g_category_categories_exhausted
= 2;
114 const int g_category_metadata
= 3;
115 const int g_category_trace_event_overhead
= 4;
116 const int g_num_builtin_categories
= 5;
117 // Skip default categories.
118 base::subtle::AtomicWord g_category_index
= g_num_builtin_categories
;
120 // The name of the current thread. This is used to decide if the current
121 // thread name has changed. We combine all the seen thread names into the
122 // output name for the thread.
123 LazyInstance
<ThreadLocalPointer
<const char> >::Leaky
124 g_current_thread_name
= LAZY_INSTANCE_INITIALIZER
;
126 TimeTicks
ThreadNow() {
127 return TimeTicks::IsThreadNowSupported() ?
128 TimeTicks::ThreadNow() : TimeTicks();
131 class TraceBufferRingBuffer
: public TraceBuffer
{
133 TraceBufferRingBuffer(size_t max_chunks
)
134 : max_chunks_(max_chunks
),
135 recyclable_chunks_queue_(new size_t[queue_capacity()]),
137 queue_tail_(max_chunks
),
138 current_iteration_index_(0),
139 current_chunk_seq_(1) {
140 chunks_
.reserve(max_chunks
);
141 for (size_t i
= 0; i
< max_chunks
; ++i
)
142 recyclable_chunks_queue_
[i
] = i
;
145 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
146 // Because the number of threads is much less than the number of chunks,
147 // the queue should never be empty.
148 DCHECK(!QueueIsEmpty());
150 *index
= recyclable_chunks_queue_
[queue_head_
];
151 queue_head_
= NextQueueIndex(queue_head_
);
152 current_iteration_index_
= queue_head_
;
154 if (*index
>= chunks_
.size())
155 chunks_
.resize(*index
+ 1);
157 TraceBufferChunk
* chunk
= chunks_
[*index
];
158 chunks_
[*index
] = NULL
; // Put NULL in the slot of a in-flight chunk.
160 chunk
->Reset(current_chunk_seq_
++);
162 chunk
= new TraceBufferChunk(current_chunk_seq_
++);
164 return scoped_ptr
<TraceBufferChunk
>(chunk
);
167 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
> chunk
) override
{
168 // When this method is called, the queue should not be full because it
169 // can contain all chunks including the one to be returned.
170 DCHECK(!QueueIsFull());
172 DCHECK_LT(index
, chunks_
.size());
173 DCHECK(!chunks_
[index
]);
174 chunks_
[index
] = chunk
.release();
175 recyclable_chunks_queue_
[queue_tail_
] = index
;
176 queue_tail_
= NextQueueIndex(queue_tail_
);
179 bool IsFull() const override
{ return false; }
181 size_t Size() const override
{
182 // This is approximate because not all of the chunks are full.
183 return chunks_
.size() * kTraceBufferChunkSize
;
186 size_t Capacity() const override
{
187 return max_chunks_
* kTraceBufferChunkSize
;
190 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
191 if (handle
.chunk_index
>= chunks_
.size())
193 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
194 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
196 return chunk
->GetEventAt(handle
.event_index
);
199 const TraceBufferChunk
* NextChunk() override
{
203 while (current_iteration_index_
!= queue_tail_
) {
204 size_t chunk_index
= recyclable_chunks_queue_
[current_iteration_index_
];
205 current_iteration_index_
= NextQueueIndex(current_iteration_index_
);
206 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
208 DCHECK(chunks_
[chunk_index
]);
209 return chunks_
[chunk_index
];
214 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
215 scoped_ptr
<ClonedTraceBuffer
> cloned_buffer(new ClonedTraceBuffer());
216 for (size_t queue_index
= queue_head_
; queue_index
!= queue_tail_
;
217 queue_index
= NextQueueIndex(queue_index
)) {
218 size_t chunk_index
= recyclable_chunks_queue_
[queue_index
];
219 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
221 TraceBufferChunk
* chunk
= chunks_
[chunk_index
];
222 cloned_buffer
->chunks_
.push_back(chunk
? chunk
->Clone().release() : NULL
);
224 return cloned_buffer
.Pass();
228 class ClonedTraceBuffer
: public TraceBuffer
{
230 ClonedTraceBuffer() : current_iteration_index_(0) {}
232 // The only implemented method.
233 const TraceBufferChunk
* NextChunk() override
{
234 return current_iteration_index_
< chunks_
.size() ?
235 chunks_
[current_iteration_index_
++] : NULL
;
238 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
240 return scoped_ptr
<TraceBufferChunk
>();
242 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
>) override
{
245 bool IsFull() const override
{ return false; }
246 size_t Size() const override
{ return 0; }
247 size_t Capacity() const override
{ return 0; }
248 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
251 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
253 return scoped_ptr
<TraceBuffer
>();
256 size_t current_iteration_index_
;
257 ScopedVector
<TraceBufferChunk
> chunks_
;
260 bool QueueIsEmpty() const {
261 return queue_head_
== queue_tail_
;
264 size_t QueueSize() const {
265 return queue_tail_
> queue_head_
? queue_tail_
- queue_head_
:
266 queue_tail_
+ queue_capacity() - queue_head_
;
269 bool QueueIsFull() const {
270 return QueueSize() == queue_capacity() - 1;
273 size_t queue_capacity() const {
274 // One extra space to help distinguish full state and empty state.
275 return max_chunks_
+ 1;
278 size_t NextQueueIndex(size_t index
) const {
280 if (index
>= queue_capacity())
286 ScopedVector
<TraceBufferChunk
> chunks_
;
288 scoped_ptr
<size_t[]> recyclable_chunks_queue_
;
292 size_t current_iteration_index_
;
293 uint32 current_chunk_seq_
;
295 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer
);
298 class TraceBufferVector
: public TraceBuffer
{
300 TraceBufferVector(size_t max_chunks
)
301 : in_flight_chunk_count_(0),
302 current_iteration_index_(0),
303 max_chunks_(max_chunks
) {
304 chunks_
.reserve(max_chunks_
);
307 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
308 // This function may be called when adding normal events or indirectly from
309 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
310 // have to add the metadata events and flush thread-local buffers even if
311 // the buffer is full.
312 *index
= chunks_
.size();
313 chunks_
.push_back(NULL
); // Put NULL in the slot of a in-flight chunk.
314 ++in_flight_chunk_count_
;
315 // + 1 because zero chunk_seq is not allowed.
316 return scoped_ptr
<TraceBufferChunk
>(
317 new TraceBufferChunk(static_cast<uint32
>(*index
) + 1));
320 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
> chunk
) override
{
321 DCHECK_GT(in_flight_chunk_count_
, 0u);
322 DCHECK_LT(index
, chunks_
.size());
323 DCHECK(!chunks_
[index
]);
324 --in_flight_chunk_count_
;
325 chunks_
[index
] = chunk
.release();
328 bool IsFull() const override
{ return chunks_
.size() >= max_chunks_
; }
330 size_t Size() const override
{
331 // This is approximate because not all of the chunks are full.
332 return chunks_
.size() * kTraceBufferChunkSize
;
335 size_t Capacity() const override
{
336 return max_chunks_
* kTraceBufferChunkSize
;
339 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
340 if (handle
.chunk_index
>= chunks_
.size())
342 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
343 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
345 return chunk
->GetEventAt(handle
.event_index
);
348 const TraceBufferChunk
* NextChunk() override
{
349 while (current_iteration_index_
< chunks_
.size()) {
350 // Skip in-flight chunks.
351 const TraceBufferChunk
* chunk
= chunks_
[current_iteration_index_
++];
358 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
360 return scoped_ptr
<TraceBuffer
>();
364 size_t in_flight_chunk_count_
;
365 size_t current_iteration_index_
;
367 ScopedVector
<TraceBufferChunk
> chunks_
;
369 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector
);
372 template <typename T
>
373 void InitializeMetadataEvent(TraceEvent
* trace_event
,
375 const char* metadata_name
, const char* arg_name
,
381 unsigned char arg_type
;
382 unsigned long long arg_value
;
383 ::trace_event_internal::SetTraceValue(value
, &arg_type
, &arg_value
);
384 trace_event
->Initialize(thread_id
,
385 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA
,
386 &g_category_group_enabled
[g_category_metadata
],
387 metadata_name
, ::trace_event_internal::kNoEventId
,
388 num_args
, &arg_name
, &arg_type
, &arg_value
, NULL
,
389 TRACE_EVENT_FLAG_NONE
);
392 class AutoThreadLocalBoolean
{
394 explicit AutoThreadLocalBoolean(ThreadLocalBoolean
* thread_local_boolean
)
395 : thread_local_boolean_(thread_local_boolean
) {
396 DCHECK(!thread_local_boolean_
->Get());
397 thread_local_boolean_
->Set(true);
399 ~AutoThreadLocalBoolean() {
400 thread_local_boolean_
->Set(false);
404 ThreadLocalBoolean
* thread_local_boolean_
;
405 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean
);
410 void TraceBufferChunk::Reset(uint32 new_seq
) {
411 for (size_t i
= 0; i
< next_free_
; ++i
)
417 TraceEvent
* TraceBufferChunk::AddTraceEvent(size_t* event_index
) {
419 *event_index
= next_free_
++;
420 return &chunk_
[*event_index
];
423 scoped_ptr
<TraceBufferChunk
> TraceBufferChunk::Clone() const {
424 scoped_ptr
<TraceBufferChunk
> cloned_chunk(new TraceBufferChunk(seq_
));
425 cloned_chunk
->next_free_
= next_free_
;
426 for (size_t i
= 0; i
< next_free_
; ++i
)
427 cloned_chunk
->chunk_
[i
].CopyFrom(chunk_
[i
]);
428 return cloned_chunk
.Pass();
431 // A helper class that allows the lock to be acquired in the middle of the scope
432 // and unlocks at the end of scope if locked.
433 class TraceLog::OptionalAutoLock
{
435 explicit OptionalAutoLock(Lock
* lock
) : lock_(lock
), locked_(false) {}
437 ~OptionalAutoLock() {
442 void EnsureAcquired() {
452 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock
);
455 // Use this function instead of TraceEventHandle constructor to keep the
456 // overhead of ScopedTracer (trace_event.h) constructor minimum.
457 void MakeHandle(uint32 chunk_seq
, size_t chunk_index
, size_t event_index
,
458 TraceEventHandle
* handle
) {
460 DCHECK(chunk_index
< (1u << 16));
461 DCHECK(event_index
< (1u << 16));
462 handle
->chunk_seq
= chunk_seq
;
463 handle
->chunk_index
= static_cast<uint16
>(chunk_index
);
464 handle
->event_index
= static_cast<uint16
>(event_index
);
467 ////////////////////////////////////////////////////////////////////////////////
471 ////////////////////////////////////////////////////////////////////////////////
475 size_t GetAllocLength(const char* str
) { return str
? strlen(str
) + 1 : 0; }
477 // Copies |*member| into |*buffer|, sets |*member| to point to this new
478 // location, and then advances |*buffer| by the amount written.
479 void CopyTraceEventParameter(char** buffer
,
483 size_t written
= strlcpy(*buffer
, *member
, end
- *buffer
) + 1;
484 DCHECK_LE(static_cast<int>(written
), end
- *buffer
);
492 TraceEvent::TraceEvent()
493 : duration_(TimeDelta::FromInternalValue(-1)),
495 category_group_enabled_(NULL
),
498 phase_(TRACE_EVENT_PHASE_BEGIN
),
500 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
501 arg_names_
[i
] = NULL
;
502 memset(arg_values_
, 0, sizeof(arg_values_
));
505 TraceEvent::~TraceEvent() {
508 void TraceEvent::CopyFrom(const TraceEvent
& other
) {
509 timestamp_
= other
.timestamp_
;
510 thread_timestamp_
= other
.thread_timestamp_
;
511 duration_
= other
.duration_
;
513 category_group_enabled_
= other
.category_group_enabled_
;
515 thread_id_
= other
.thread_id_
;
516 phase_
= other
.phase_
;
517 flags_
= other
.flags_
;
518 parameter_copy_storage_
= other
.parameter_copy_storage_
;
520 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
) {
521 arg_names_
[i
] = other
.arg_names_
[i
];
522 arg_types_
[i
] = other
.arg_types_
[i
];
523 arg_values_
[i
] = other
.arg_values_
[i
];
524 convertable_values_
[i
] = other
.convertable_values_
[i
];
528 void TraceEvent::Initialize(
531 TimeTicks thread_timestamp
,
533 const unsigned char* category_group_enabled
,
535 unsigned long long id
,
537 const char** arg_names
,
538 const unsigned char* arg_types
,
539 const unsigned long long* arg_values
,
540 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
541 unsigned char flags
) {
542 timestamp_
= timestamp
;
543 thread_timestamp_
= thread_timestamp
;
544 duration_
= TimeDelta::FromInternalValue(-1);
546 category_group_enabled_
= category_group_enabled
;
548 thread_id_
= thread_id
;
552 // Clamp num_args since it may have been set by a third_party library.
553 num_args
= (num_args
> kTraceMaxNumArgs
) ? kTraceMaxNumArgs
: num_args
;
555 for (; i
< num_args
; ++i
) {
556 arg_names_
[i
] = arg_names
[i
];
557 arg_types_
[i
] = arg_types
[i
];
559 if (arg_types
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
560 convertable_values_
[i
] = convertable_values
[i
];
562 arg_values_
[i
].as_uint
= arg_values
[i
];
564 for (; i
< kTraceMaxNumArgs
; ++i
) {
565 arg_names_
[i
] = NULL
;
566 arg_values_
[i
].as_uint
= 0u;
567 convertable_values_
[i
] = NULL
;
568 arg_types_
[i
] = TRACE_VALUE_TYPE_UINT
;
571 bool copy
= !!(flags
& TRACE_EVENT_FLAG_COPY
);
572 size_t alloc_size
= 0;
574 alloc_size
+= GetAllocLength(name
);
575 for (i
= 0; i
< num_args
; ++i
) {
576 alloc_size
+= GetAllocLength(arg_names_
[i
]);
577 if (arg_types_
[i
] == TRACE_VALUE_TYPE_STRING
)
578 arg_types_
[i
] = TRACE_VALUE_TYPE_COPY_STRING
;
582 bool arg_is_copy
[kTraceMaxNumArgs
];
583 for (i
= 0; i
< num_args
; ++i
) {
584 // No copying of convertable types, we retain ownership.
585 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
588 // We only take a copy of arg_vals if they are of type COPY_STRING.
589 arg_is_copy
[i
] = (arg_types_
[i
] == TRACE_VALUE_TYPE_COPY_STRING
);
591 alloc_size
+= GetAllocLength(arg_values_
[i
].as_string
);
595 parameter_copy_storage_
= new RefCountedString
;
596 parameter_copy_storage_
->data().resize(alloc_size
);
597 char* ptr
= string_as_array(¶meter_copy_storage_
->data());
598 const char* end
= ptr
+ alloc_size
;
600 CopyTraceEventParameter(&ptr
, &name_
, end
);
601 for (i
= 0; i
< num_args
; ++i
) {
602 CopyTraceEventParameter(&ptr
, &arg_names_
[i
], end
);
605 for (i
= 0; i
< num_args
; ++i
) {
606 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
609 CopyTraceEventParameter(&ptr
, &arg_values_
[i
].as_string
, end
);
611 DCHECK_EQ(end
, ptr
) << "Overrun by " << ptr
- end
;
615 void TraceEvent::Reset() {
616 // Only reset fields that won't be initialized in Initialize(), or that may
617 // hold references to other objects.
618 duration_
= TimeDelta::FromInternalValue(-1);
619 parameter_copy_storage_
= NULL
;
620 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
621 convertable_values_
[i
] = NULL
;
624 void TraceEvent::UpdateDuration(const TimeTicks
& now
,
625 const TimeTicks
& thread_now
) {
626 DCHECK_EQ(duration_
.ToInternalValue(), -1);
627 duration_
= now
- timestamp_
;
628 thread_duration_
= thread_now
- thread_timestamp_
;
632 void TraceEvent::AppendValueAsJSON(unsigned char type
,
633 TraceEvent::TraceValue value
,
636 case TRACE_VALUE_TYPE_BOOL
:
637 *out
+= value
.as_bool
? "true" : "false";
639 case TRACE_VALUE_TYPE_UINT
:
640 StringAppendF(out
, "%" PRIu64
, static_cast<uint64
>(value
.as_uint
));
642 case TRACE_VALUE_TYPE_INT
:
643 StringAppendF(out
, "%" PRId64
, static_cast<int64
>(value
.as_int
));
645 case TRACE_VALUE_TYPE_DOUBLE
: {
646 // FIXME: base/json/json_writer.cc is using the same code,
647 // should be made into a common method.
649 double val
= value
.as_double
;
650 if (std::isfinite(val
)) {
651 real
= DoubleToString(val
);
652 // Ensure that the number has a .0 if there's no decimal or 'e'. This
653 // makes sure that when we read the JSON back, it's interpreted as a
654 // real rather than an int.
655 if (real
.find('.') == std::string::npos
&&
656 real
.find('e') == std::string::npos
&&
657 real
.find('E') == std::string::npos
) {
660 // The JSON spec requires that non-integer values in the range (-1,1)
661 // have a zero before the decimal point - ".52" is not valid, "0.52" is.
662 if (real
[0] == '.') {
664 } else if (real
.length() > 1 && real
[0] == '-' && real
[1] == '.') {
665 // "-.1" bad "-0.1" good
668 } else if (std::isnan(val
)){
669 // The JSON spec doesn't allow NaN and Infinity (since these are
670 // objects in EcmaScript). Use strings instead.
672 } else if (val
< 0) {
673 real
= "\"-Infinity\"";
675 real
= "\"Infinity\"";
677 StringAppendF(out
, "%s", real
.c_str());
680 case TRACE_VALUE_TYPE_POINTER
:
681 // JSON only supports double and int numbers.
682 // So as not to lose bits from a 64-bit pointer, output as a hex string.
683 StringAppendF(out
, "\"0x%" PRIx64
"\"", static_cast<uint64
>(
684 reinterpret_cast<intptr_t>(
687 case TRACE_VALUE_TYPE_STRING
:
688 case TRACE_VALUE_TYPE_COPY_STRING
:
689 EscapeJSONString(value
.as_string
? value
.as_string
: "NULL", true, out
);
692 NOTREACHED() << "Don't know how to print this value";
697 void TraceEvent::AppendAsJSON(std::string
* out
) const {
698 int64 time_int64
= timestamp_
.ToInternalValue();
699 int process_id
= TraceLog::GetInstance()->process_id();
700 // Category group checked at category creation time.
701 DCHECK(!strchr(name_
, '"'));
703 "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
","
704 "\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":{",
709 TraceLog::GetCategoryGroupName(category_group_enabled_
),
712 // Output argument names and values, stop at first NULL argument name.
713 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
717 *out
+= arg_names_
[i
];
720 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
721 convertable_values_
[i
]->AppendAsTraceFormat(out
);
723 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], out
);
727 if (phase_
== TRACE_EVENT_PHASE_COMPLETE
) {
728 int64 duration
= duration_
.ToInternalValue();
730 StringAppendF(out
, ",\"dur\":%" PRId64
, duration
);
731 if (!thread_timestamp_
.is_null()) {
732 int64 thread_duration
= thread_duration_
.ToInternalValue();
733 if (thread_duration
!= -1)
734 StringAppendF(out
, ",\"tdur\":%" PRId64
, thread_duration
);
738 // Output tts if thread_timestamp is valid.
739 if (!thread_timestamp_
.is_null()) {
740 int64 thread_time_int64
= thread_timestamp_
.ToInternalValue();
741 StringAppendF(out
, ",\"tts\":%" PRId64
, thread_time_int64
);
744 // If id_ is set, print it out as a hex string so we don't loose any
745 // bits (it might be a 64-bit pointer).
746 if (flags_
& TRACE_EVENT_FLAG_HAS_ID
)
747 StringAppendF(out
, ",\"id\":\"0x%" PRIx64
"\"", static_cast<uint64
>(id_
));
749 // Instant events also output their scope.
750 if (phase_
== TRACE_EVENT_PHASE_INSTANT
) {
752 switch (flags_
& TRACE_EVENT_FLAG_SCOPE_MASK
) {
753 case TRACE_EVENT_SCOPE_GLOBAL
:
754 scope
= TRACE_EVENT_SCOPE_NAME_GLOBAL
;
757 case TRACE_EVENT_SCOPE_PROCESS
:
758 scope
= TRACE_EVENT_SCOPE_NAME_PROCESS
;
761 case TRACE_EVENT_SCOPE_THREAD
:
762 scope
= TRACE_EVENT_SCOPE_NAME_THREAD
;
765 StringAppendF(out
, ",\"s\":\"%c\"", scope
);
771 void TraceEvent::AppendPrettyPrinted(std::ostringstream
* out
) const {
772 *out
<< name_
<< "[";
773 *out
<< TraceLog::GetCategoryGroupName(category_group_enabled_
);
777 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
780 *out
<< arg_names_
[i
] << ":";
781 std::string value_as_text
;
783 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
784 convertable_values_
[i
]->AppendAsTraceFormat(&value_as_text
);
786 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], &value_as_text
);
788 *out
<< value_as_text
;
794 ////////////////////////////////////////////////////////////////////////////////
798 ////////////////////////////////////////////////////////////////////////////////
800 TraceResultBuffer::OutputCallback
801 TraceResultBuffer::SimpleOutput::GetCallback() {
802 return Bind(&SimpleOutput::Append
, Unretained(this));
805 void TraceResultBuffer::SimpleOutput::Append(
806 const std::string
& json_trace_output
) {
807 json_output
+= json_trace_output
;
810 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
813 TraceResultBuffer::~TraceResultBuffer() {
816 void TraceResultBuffer::SetOutputCallback(
817 const OutputCallback
& json_chunk_callback
) {
818 output_callback_
= json_chunk_callback
;
821 void TraceResultBuffer::Start() {
822 append_comma_
= false;
823 output_callback_
.Run("[");
826 void TraceResultBuffer::AddFragment(const std::string
& trace_fragment
) {
828 output_callback_
.Run(",");
829 append_comma_
= true;
830 output_callback_
.Run(trace_fragment
);
833 void TraceResultBuffer::Finish() {
834 output_callback_
.Run("]");
837 ////////////////////////////////////////////////////////////////////////////////
839 // TraceSamplingThread
841 ////////////////////////////////////////////////////////////////////////////////
842 class TraceBucketData
;
843 typedef base::Callback
<void(TraceBucketData
*)> TraceSampleCallback
;
845 class TraceBucketData
{
847 TraceBucketData(base::subtle::AtomicWord
* bucket
,
849 TraceSampleCallback callback
);
852 TRACE_EVENT_API_ATOMIC_WORD
* bucket
;
853 const char* bucket_name
;
854 TraceSampleCallback callback
;
857 // This object must be created on the IO thread.
858 class TraceSamplingThread
: public PlatformThread::Delegate
{
860 TraceSamplingThread();
861 ~TraceSamplingThread() override
;
863 // Implementation of PlatformThread::Delegate:
864 void ThreadMain() override
;
866 static void DefaultSamplingCallback(TraceBucketData
* bucekt_data
);
869 void WaitSamplingEventForTesting();
872 friend class TraceLog
;
875 // Not thread-safe. Once the ThreadMain has been called, this can no longer
877 void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
878 const char* const name
,
879 TraceSampleCallback callback
);
880 // Splits a combined "category\0name" into the two component parts.
881 static void ExtractCategoryAndName(const char* combined
,
882 const char** category
,
884 std::vector
<TraceBucketData
> sample_buckets_
;
885 bool thread_running_
;
886 CancellationFlag cancellation_flag_
;
887 WaitableEvent waitable_event_for_testing_
;
891 TraceSamplingThread::TraceSamplingThread()
892 : thread_running_(false),
893 waitable_event_for_testing_(false, false) {
896 TraceSamplingThread::~TraceSamplingThread() {
899 void TraceSamplingThread::ThreadMain() {
900 PlatformThread::SetName("Sampling Thread");
901 thread_running_
= true;
902 const int kSamplingFrequencyMicroseconds
= 1000;
903 while (!cancellation_flag_
.IsSet()) {
904 PlatformThread::Sleep(
905 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds
));
907 waitable_event_for_testing_
.Signal();
912 void TraceSamplingThread::DefaultSamplingCallback(
913 TraceBucketData
* bucket_data
) {
914 TRACE_EVENT_API_ATOMIC_WORD category_and_name
=
915 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data
->bucket
);
916 if (!category_and_name
)
918 const char* const combined
=
919 reinterpret_cast<const char* const>(category_and_name
);
920 const char* category_group
;
922 ExtractCategoryAndName(combined
, &category_group
, &name
);
923 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE
,
924 TraceLog::GetCategoryGroupEnabled(category_group
),
925 name
, 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
928 void TraceSamplingThread::GetSamples() {
929 for (size_t i
= 0; i
< sample_buckets_
.size(); ++i
) {
930 TraceBucketData
* bucket_data
= &sample_buckets_
[i
];
931 bucket_data
->callback
.Run(bucket_data
);
935 void TraceSamplingThread::RegisterSampleBucket(
936 TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
937 const char* const name
,
938 TraceSampleCallback callback
) {
939 // Access to sample_buckets_ doesn't cause races with the sampling thread
940 // that uses the sample_buckets_, because it is guaranteed that
941 // RegisterSampleBucket is called before the sampling thread is created.
942 DCHECK(!thread_running_
);
943 sample_buckets_
.push_back(TraceBucketData(bucket
, name
, callback
));
947 void TraceSamplingThread::ExtractCategoryAndName(const char* combined
,
948 const char** category
,
950 *category
= combined
;
951 *name
= &combined
[strlen(combined
) + 1];
954 void TraceSamplingThread::Stop() {
955 cancellation_flag_
.Set();
958 void TraceSamplingThread::WaitSamplingEventForTesting() {
959 waitable_event_for_testing_
.Wait();
962 TraceBucketData::TraceBucketData(base::subtle::AtomicWord
* bucket
,
964 TraceSampleCallback callback
)
970 TraceBucketData::~TraceBucketData() {
973 ////////////////////////////////////////////////////////////////////////////////
977 ////////////////////////////////////////////////////////////////////////////////
979 bool TraceOptions::SetFromString(const std::string
& options_string
) {
980 record_mode
= RECORD_UNTIL_FULL
;
981 enable_sampling
= false;
982 enable_systrace
= false;
984 std::vector
<std::string
> split
;
985 std::vector
<std::string
>::iterator iter
;
986 base::SplitString(options_string
, ',', &split
);
987 for (iter
= split
.begin(); iter
!= split
.end(); ++iter
) {
988 if (*iter
== kRecordUntilFull
) {
989 record_mode
= RECORD_UNTIL_FULL
;
990 } else if (*iter
== kRecordContinuously
) {
991 record_mode
= RECORD_CONTINUOUSLY
;
992 } else if (*iter
== kTraceToConsole
) {
993 record_mode
= ECHO_TO_CONSOLE
;
994 } else if (*iter
== kRecordAsMuchAsPossible
) {
995 record_mode
= RECORD_AS_MUCH_AS_POSSIBLE
;
996 } else if (*iter
== kEnableSampling
) {
997 enable_sampling
= true;
998 } else if (*iter
== kEnableSystrace
) {
999 enable_systrace
= true;
1007 std::string
TraceOptions::ToString() const {
1009 switch (record_mode
) {
1010 case RECORD_UNTIL_FULL
:
1011 ret
= kRecordUntilFull
;
1013 case RECORD_CONTINUOUSLY
:
1014 ret
= kRecordContinuously
;
1016 case ECHO_TO_CONSOLE
:
1017 ret
= kTraceToConsole
;
1019 case RECORD_AS_MUCH_AS_POSSIBLE
:
1020 ret
= kRecordAsMuchAsPossible
;
1025 if (enable_sampling
)
1026 ret
= ret
+ "," + kEnableSampling
;
1027 if (enable_systrace
)
1028 ret
= ret
+ "," + kEnableSystrace
;
1032 ////////////////////////////////////////////////////////////////////////////////
1036 ////////////////////////////////////////////////////////////////////////////////
1038 class TraceLog::ThreadLocalEventBuffer
1039 : public MessageLoop::DestructionObserver
{
1041 ThreadLocalEventBuffer(TraceLog
* trace_log
);
1042 ~ThreadLocalEventBuffer() override
;
1044 TraceEvent
* AddTraceEvent(TraceEventHandle
* handle
);
1046 void ReportOverhead(const TimeTicks
& event_timestamp
,
1047 const TimeTicks
& event_thread_timestamp
);
1049 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) {
1050 if (!chunk_
|| handle
.chunk_seq
!= chunk_
->seq() ||
1051 handle
.chunk_index
!= chunk_index_
)
1054 return chunk_
->GetEventAt(handle
.event_index
);
1057 int generation() const { return generation_
; }
1060 // MessageLoop::DestructionObserver
1061 void WillDestroyCurrentMessageLoop() override
;
1063 void FlushWhileLocked();
1065 void CheckThisIsCurrentBuffer() const {
1066 DCHECK(trace_log_
->thread_local_event_buffer_
.Get() == this);
1069 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1070 // as long as the thread exists.
1071 TraceLog
* trace_log_
;
1072 scoped_ptr
<TraceBufferChunk
> chunk_
;
1073 size_t chunk_index_
;
1075 TimeDelta overhead_
;
1078 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer
);
1081 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog
* trace_log
)
1082 : trace_log_(trace_log
),
1085 generation_(trace_log
->generation()) {
1086 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
1087 // the following message_loop won't be NULL.
1088 MessageLoop
* message_loop
= MessageLoop::current();
1089 message_loop
->AddDestructionObserver(this);
1091 AutoLock
lock(trace_log
->lock_
);
1092 trace_log
->thread_message_loops_
.insert(message_loop
);
1095 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
1096 CheckThisIsCurrentBuffer();
1097 MessageLoop::current()->RemoveDestructionObserver(this);
1099 // Zero event_count_ happens in either of the following cases:
1100 // - no event generated for the thread;
1101 // - the thread has no message loop;
1102 // - trace_event_overhead is disabled.
1104 InitializeMetadataEvent(AddTraceEvent(NULL
),
1105 static_cast<int>(base::PlatformThread::CurrentId()),
1106 "overhead", "average_overhead",
1107 overhead_
.InMillisecondsF() / event_count_
);
1111 AutoLock
lock(trace_log_
->lock_
);
1113 trace_log_
->thread_message_loops_
.erase(MessageLoop::current());
1115 trace_log_
->thread_local_event_buffer_
.Set(NULL
);
1118 TraceEvent
* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
1119 TraceEventHandle
* handle
) {
1120 CheckThisIsCurrentBuffer();
1122 if (chunk_
&& chunk_
->IsFull()) {
1123 AutoLock
lock(trace_log_
->lock_
);
1128 AutoLock
lock(trace_log_
->lock_
);
1129 chunk_
= trace_log_
->logged_events_
->GetChunk(&chunk_index_
);
1130 trace_log_
->CheckIfBufferIsFullWhileLocked();
1136 TraceEvent
* trace_event
= chunk_
->AddTraceEvent(&event_index
);
1137 if (trace_event
&& handle
)
1138 MakeHandle(chunk_
->seq(), chunk_index_
, event_index
, handle
);
1143 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1144 const TimeTicks
& event_timestamp
,
1145 const TimeTicks
& event_thread_timestamp
) {
1146 if (!g_category_group_enabled
[g_category_trace_event_overhead
])
1149 CheckThisIsCurrentBuffer();
1152 TimeTicks thread_now
= ThreadNow();
1153 TimeTicks now
= trace_log_
->OffsetNow();
1154 TimeDelta overhead
= now
- event_timestamp
;
1155 if (overhead
.InMicroseconds() >= kOverheadReportThresholdInMicroseconds
) {
1156 TraceEvent
* trace_event
= AddTraceEvent(NULL
);
1158 trace_event
->Initialize(
1159 static_cast<int>(PlatformThread::CurrentId()),
1160 event_timestamp
, event_thread_timestamp
,
1161 TRACE_EVENT_PHASE_COMPLETE
,
1162 &g_category_group_enabled
[g_category_trace_event_overhead
],
1163 "overhead", 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
1164 trace_event
->UpdateDuration(now
, thread_now
);
1167 overhead_
+= overhead
;
1170 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1174 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1178 trace_log_
->lock_
.AssertAcquired();
1179 if (trace_log_
->CheckGeneration(generation_
)) {
1180 // Return the chunk to the buffer only if the generation matches.
1181 trace_log_
->logged_events_
->ReturnChunk(chunk_index_
, chunk_
.Pass());
1183 // Otherwise this method may be called from the destructor, or TraceLog will
1184 // find the generation mismatch and delete this buffer soon.
1187 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {
1190 TraceLogStatus::~TraceLogStatus() {
1194 TraceLog
* TraceLog::GetInstance() {
1195 return Singleton
<TraceLog
, LeakySingletonTraits
<TraceLog
> >::get();
1198 TraceLog::TraceLog()
1200 num_traces_recorded_(0),
1202 dispatching_to_observer_list_(false),
1203 process_sort_index_(0),
1204 process_id_hash_(0),
1207 trace_options_(kInternalRecordUntilFull
),
1208 sampling_thread_handle_(0),
1209 category_filter_(CategoryFilter::kDefaultCategoryFilterString
),
1210 event_callback_category_filter_(
1211 CategoryFilter::kDefaultCategoryFilterString
),
1212 thread_shared_chunk_index_(0),
1214 use_worker_thread_(false) {
1215 // Trace is enabled or disabled on one thread while other threads are
1216 // accessing the enabled flag. We don't care whether edge-case events are
1217 // traced or not, so we allow races on the enabled flag to keep the trace
1219 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1220 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1221 // sizeof(g_category_group_enabled),
1222 // "trace_event category enabled");
1223 for (int i
= 0; i
< MAX_CATEGORY_GROUPS
; ++i
) {
1224 ANNOTATE_BENIGN_RACE(&g_category_group_enabled
[i
],
1225 "trace_event category enabled");
1227 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
1230 SetProcessID(static_cast<int>(GetCurrentProcId()));
1232 // NaCl also shouldn't access the command line.
1233 if (CommandLine::InitializedForCurrentProcess() &&
1234 CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole
)) {
1235 std::string filter
= CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1236 switches::kTraceToConsole
);
1237 if (filter
.empty()) {
1238 filter
= kEchoToConsoleCategoryFilter
;
1241 filter
.append(kEchoToConsoleCategoryFilter
);
1244 LOG(ERROR
) << "Start " << switches::kTraceToConsole
1245 << " with CategoryFilter '" << filter
<< "'.";
1246 SetEnabled(CategoryFilter(filter
),
1248 TraceOptions(ECHO_TO_CONSOLE
));
1252 logged_events_
.reset(CreateTraceBuffer());
1255 TraceLog::~TraceLog() {
1258 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1259 const char* category_group
) {
1260 TraceLog
* tracelog
= GetInstance();
1262 DCHECK(!g_category_group_enabled
[g_category_already_shutdown
]);
1263 return &g_category_group_enabled
[g_category_already_shutdown
];
1265 return tracelog
->GetCategoryGroupEnabledInternal(category_group
);
1268 const char* TraceLog::GetCategoryGroupName(
1269 const unsigned char* category_group_enabled
) {
1270 // Calculate the index of the category group by finding
1271 // category_group_enabled in g_category_group_enabled array.
1272 uintptr_t category_begin
=
1273 reinterpret_cast<uintptr_t>(g_category_group_enabled
);
1274 uintptr_t category_ptr
= reinterpret_cast<uintptr_t>(category_group_enabled
);
1275 DCHECK(category_ptr
>= category_begin
&&
1276 category_ptr
< reinterpret_cast<uintptr_t>(
1277 g_category_group_enabled
+ MAX_CATEGORY_GROUPS
)) <<
1278 "out of bounds category pointer";
1279 uintptr_t category_index
=
1280 (category_ptr
- category_begin
) / sizeof(g_category_group_enabled
[0]);
1281 return g_category_groups
[category_index
];
1284 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index
) {
1285 unsigned char enabled_flag
= 0;
1286 const char* category_group
= g_category_groups
[category_index
];
1287 if (mode_
== RECORDING_MODE
&&
1288 category_filter_
.IsCategoryGroupEnabled(category_group
))
1289 enabled_flag
|= ENABLED_FOR_RECORDING
;
1290 else if (mode_
== MONITORING_MODE
&&
1291 category_filter_
.IsCategoryGroupEnabled(category_group
))
1292 enabled_flag
|= ENABLED_FOR_MONITORING
;
1293 if (event_callback_
&&
1294 event_callback_category_filter_
.IsCategoryGroupEnabled(category_group
))
1295 enabled_flag
|= ENABLED_FOR_EVENT_CALLBACK
;
1297 if (base::trace_event::TraceEventETWExport::isETWExportEnabled())
1298 enabled_flag
|= ENABLED_FOR_ETW_EXPORT
;
1301 g_category_group_enabled
[category_index
] = enabled_flag
;
1304 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1305 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1306 for (size_t i
= 0; i
< category_index
; i
++)
1307 UpdateCategoryGroupEnabledFlag(i
);
1310 void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() {
1311 ResetTraceEventSyntheticDelays();
1312 const CategoryFilter::StringList
& delays
=
1313 category_filter_
.GetSyntheticDelayValues();
1314 CategoryFilter::StringList::const_iterator ci
;
1315 for (ci
= delays
.begin(); ci
!= delays
.end(); ++ci
) {
1316 StringTokenizer
tokens(*ci
, ";");
1317 if (!tokens
.GetNext())
1319 TraceEventSyntheticDelay
* delay
=
1320 TraceEventSyntheticDelay::Lookup(tokens
.token());
1321 while (tokens
.GetNext()) {
1322 std::string token
= tokens
.token();
1324 double target_duration
= strtod(token
.c_str(), &duration_end
);
1325 if (duration_end
!= token
.c_str()) {
1326 delay
->SetTargetDuration(TimeDelta::FromMicroseconds(
1327 static_cast<int64
>(target_duration
* 1e6
)));
1328 } else if (token
== "static") {
1329 delay
->SetMode(TraceEventSyntheticDelay::STATIC
);
1330 } else if (token
== "oneshot") {
1331 delay
->SetMode(TraceEventSyntheticDelay::ONE_SHOT
);
1332 } else if (token
== "alternating") {
1333 delay
->SetMode(TraceEventSyntheticDelay::ALTERNATING
);
1339 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1340 const char* category_group
) {
1341 DCHECK(!strchr(category_group
, '"')) <<
1342 "Category groups may not contain double quote";
1343 // The g_category_groups is append only, avoid using a lock for the fast path.
1344 size_t current_category_index
= base::subtle::Acquire_Load(&g_category_index
);
1346 // Search for pre-existing category group.
1347 for (size_t i
= 0; i
< current_category_index
; ++i
) {
1348 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1349 return &g_category_group_enabled
[i
];
1353 unsigned char* category_group_enabled
= NULL
;
1354 // This is the slow path: the lock is not held in the case above, so more
1355 // than one thread could have reached here trying to add the same category.
1356 // Only hold to lock when actually appending a new category, and
1357 // check the categories groups again.
1358 AutoLock
lock(lock_
);
1359 size_t category_index
= base::subtle::Acquire_Load(&g_category_index
);
1360 for (size_t i
= 0; i
< category_index
; ++i
) {
1361 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1362 return &g_category_group_enabled
[i
];
1366 // Create a new category group.
1367 DCHECK(category_index
< MAX_CATEGORY_GROUPS
) <<
1368 "must increase MAX_CATEGORY_GROUPS";
1369 if (category_index
< MAX_CATEGORY_GROUPS
) {
1370 // Don't hold on to the category_group pointer, so that we can create
1371 // category groups with strings not known at compile time (this is
1372 // required by SetWatchEvent).
1373 const char* new_group
= strdup(category_group
);
1374 ANNOTATE_LEAKING_OBJECT_PTR(new_group
);
1375 g_category_groups
[category_index
] = new_group
;
1376 DCHECK(!g_category_group_enabled
[category_index
]);
1377 // Note that if both included and excluded patterns in the
1378 // CategoryFilter are empty, we exclude nothing,
1379 // thereby enabling this category group.
1380 UpdateCategoryGroupEnabledFlag(category_index
);
1381 category_group_enabled
= &g_category_group_enabled
[category_index
];
1382 // Update the max index now.
1383 base::subtle::Release_Store(&g_category_index
, category_index
+ 1);
1385 category_group_enabled
=
1386 &g_category_group_enabled
[g_category_categories_exhausted
];
1388 return category_group_enabled
;
1391 void TraceLog::GetKnownCategoryGroups(
1392 std::vector
<std::string
>* category_groups
) {
1393 AutoLock
lock(lock_
);
1394 category_groups
->push_back(
1395 g_category_groups
[g_category_trace_event_overhead
]);
1396 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1397 for (size_t i
= g_num_builtin_categories
; i
< category_index
; i
++)
1398 category_groups
->push_back(g_category_groups
[i
]);
1401 void TraceLog::SetEnabled(const CategoryFilter
& category_filter
,
1403 const TraceOptions
& options
) {
1404 std::vector
<EnabledStateObserver
*> observer_list
;
1406 AutoLock
lock(lock_
);
1408 // Can't enable tracing when Flush() is in progress.
1409 DCHECK(!flush_message_loop_proxy_
.get());
1411 InternalTraceOptions new_options
=
1412 GetInternalOptionsFromTraceOptions(options
);
1414 InternalTraceOptions old_options
= trace_options();
1417 if (new_options
!= old_options
) {
1418 DLOG(ERROR
) << "Attempting to re-enable tracing with a different "
1419 << "set of options.";
1422 if (mode
!= mode_
) {
1423 DLOG(ERROR
) << "Attempting to re-enable tracing with a different mode.";
1426 category_filter_
.Merge(category_filter
);
1427 UpdateCategoryGroupEnabledFlags();
1431 if (dispatching_to_observer_list_
) {
1433 "Cannot manipulate TraceLog::Enabled state from an observer.";
1439 if (new_options
!= old_options
) {
1440 subtle::NoBarrier_Store(&trace_options_
, new_options
);
1441 UseNextTraceBuffer();
1444 num_traces_recorded_
++;
1446 category_filter_
= CategoryFilter(category_filter
);
1447 UpdateCategoryGroupEnabledFlags();
1448 UpdateSyntheticDelaysFromCategoryFilter();
1450 if (new_options
& kInternalEnableSampling
) {
1451 sampling_thread_
.reset(new TraceSamplingThread
);
1452 sampling_thread_
->RegisterSampleBucket(
1455 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1456 sampling_thread_
->RegisterSampleBucket(
1459 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1460 sampling_thread_
->RegisterSampleBucket(
1463 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1464 if (!PlatformThread::Create(
1465 0, sampling_thread_
.get(), &sampling_thread_handle_
)) {
1466 DCHECK(false) << "failed to create thread";
1470 dispatching_to_observer_list_
= true;
1471 observer_list
= enabled_state_observer_list_
;
1473 // Notify observers outside the lock in case they trigger trace events.
1474 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1475 observer_list
[i
]->OnTraceLogEnabled();
1478 AutoLock
lock(lock_
);
1479 dispatching_to_observer_list_
= false;
1483 TraceLog::InternalTraceOptions
TraceLog::GetInternalOptionsFromTraceOptions(
1484 const TraceOptions
& options
) {
1485 InternalTraceOptions ret
=
1486 options
.enable_sampling
? kInternalEnableSampling
: kInternalNone
;
1487 switch (options
.record_mode
) {
1488 case RECORD_UNTIL_FULL
:
1489 return ret
| kInternalRecordUntilFull
;
1490 case RECORD_CONTINUOUSLY
:
1491 return ret
| kInternalRecordContinuously
;
1492 case ECHO_TO_CONSOLE
:
1493 return ret
| kInternalEchoToConsole
;
1494 case RECORD_AS_MUCH_AS_POSSIBLE
:
1495 return ret
| kInternalRecordAsMuchAsPossible
;
1498 return kInternalNone
;
1501 CategoryFilter
TraceLog::GetCurrentCategoryFilter() {
1502 AutoLock
lock(lock_
);
1503 return category_filter_
;
1506 TraceOptions
TraceLog::GetCurrentTraceOptions() const {
1508 InternalTraceOptions option
= trace_options();
1509 ret
.enable_sampling
= (option
& kInternalEnableSampling
) != 0;
1510 if (option
& kInternalRecordUntilFull
)
1511 ret
.record_mode
= RECORD_UNTIL_FULL
;
1512 else if (option
& kInternalRecordContinuously
)
1513 ret
.record_mode
= RECORD_CONTINUOUSLY
;
1514 else if (option
& kInternalEchoToConsole
)
1515 ret
.record_mode
= ECHO_TO_CONSOLE
;
1516 else if (option
& kInternalRecordAsMuchAsPossible
)
1517 ret
.record_mode
= RECORD_AS_MUCH_AS_POSSIBLE
;
1523 void TraceLog::SetDisabled() {
1524 AutoLock
lock(lock_
);
1525 SetDisabledWhileLocked();
1528 void TraceLog::SetDisabledWhileLocked() {
1529 lock_
.AssertAcquired();
1534 if (dispatching_to_observer_list_
) {
1536 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1542 if (sampling_thread_
.get()) {
1543 // Stop the sampling thread.
1544 sampling_thread_
->Stop();
1546 PlatformThread::Join(sampling_thread_handle_
);
1548 sampling_thread_handle_
= PlatformThreadHandle();
1549 sampling_thread_
.reset();
1552 category_filter_
.Clear();
1553 subtle::NoBarrier_Store(&watch_category_
, 0);
1554 watch_event_name_
= "";
1555 UpdateCategoryGroupEnabledFlags();
1556 AddMetadataEventsWhileLocked();
1558 dispatching_to_observer_list_
= true;
1559 std::vector
<EnabledStateObserver
*> observer_list
=
1560 enabled_state_observer_list_
;
1563 // Dispatch to observers outside the lock in case the observer triggers a
1565 AutoUnlock
unlock(lock_
);
1566 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1567 observer_list
[i
]->OnTraceLogDisabled();
1569 dispatching_to_observer_list_
= false;
1572 int TraceLog::GetNumTracesRecorded() {
1573 AutoLock
lock(lock_
);
1576 return num_traces_recorded_
;
1579 void TraceLog::AddEnabledStateObserver(EnabledStateObserver
* listener
) {
1580 enabled_state_observer_list_
.push_back(listener
);
1583 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver
* listener
) {
1584 std::vector
<EnabledStateObserver
*>::iterator it
=
1585 std::find(enabled_state_observer_list_
.begin(),
1586 enabled_state_observer_list_
.end(),
1588 if (it
!= enabled_state_observer_list_
.end())
1589 enabled_state_observer_list_
.erase(it
);
1592 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver
* listener
) const {
1593 std::vector
<EnabledStateObserver
*>::const_iterator it
=
1594 std::find(enabled_state_observer_list_
.begin(),
1595 enabled_state_observer_list_
.end(),
1597 return it
!= enabled_state_observer_list_
.end();
1600 TraceLogStatus
TraceLog::GetStatus() const {
1601 AutoLock
lock(lock_
);
1602 TraceLogStatus result
;
1603 result
.event_capacity
= logged_events_
->Capacity();
1604 result
.event_count
= logged_events_
->Size();
1608 bool TraceLog::BufferIsFull() const {
1609 AutoLock
lock(lock_
);
1610 return logged_events_
->IsFull();
1613 TraceBuffer
* TraceLog::CreateTraceBuffer() {
1614 InternalTraceOptions options
= trace_options();
1615 if (options
& kInternalRecordContinuously
)
1616 return new TraceBufferRingBuffer(kTraceEventRingBufferChunks
);
1617 else if ((options
& kInternalEnableSampling
) && mode_
== MONITORING_MODE
)
1618 return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks
);
1619 else if (options
& kInternalEchoToConsole
)
1620 return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks
);
1621 else if (options
& kInternalRecordAsMuchAsPossible
)
1622 return CreateTraceBufferVectorOfSize(kTraceEventVectorBigBufferChunks
);
1623 return CreateTraceBufferVectorOfSize(kTraceEventVectorBufferChunks
);
1626 TraceBuffer
* TraceLog::CreateTraceBufferVectorOfSize(size_t max_chunks
) {
1627 return new TraceBufferVector(max_chunks
);
1630 TraceEvent
* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1631 TraceEventHandle
* handle
, bool check_buffer_is_full
) {
1632 lock_
.AssertAcquired();
1634 if (thread_shared_chunk_
&& thread_shared_chunk_
->IsFull()) {
1635 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1636 thread_shared_chunk_
.Pass());
1639 if (!thread_shared_chunk_
) {
1640 thread_shared_chunk_
= logged_events_
->GetChunk(
1641 &thread_shared_chunk_index_
);
1642 if (check_buffer_is_full
)
1643 CheckIfBufferIsFullWhileLocked();
1645 if (!thread_shared_chunk_
)
1649 TraceEvent
* trace_event
= thread_shared_chunk_
->AddTraceEvent(&event_index
);
1650 if (trace_event
&& handle
) {
1651 MakeHandle(thread_shared_chunk_
->seq(), thread_shared_chunk_index_
,
1652 event_index
, handle
);
1657 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1658 lock_
.AssertAcquired();
1659 if (logged_events_
->IsFull()) {
1660 if (buffer_limit_reached_timestamp_
.is_null()) {
1661 buffer_limit_reached_timestamp_
= OffsetNow();
1663 SetDisabledWhileLocked();
1667 void TraceLog::SetEventCallbackEnabled(const CategoryFilter
& category_filter
,
1669 AutoLock
lock(lock_
);
1670 subtle::NoBarrier_Store(&event_callback_
,
1671 reinterpret_cast<subtle::AtomicWord
>(cb
));
1672 event_callback_category_filter_
= category_filter
;
1673 UpdateCategoryGroupEnabledFlags();
1676 void TraceLog::SetEventCallbackDisabled() {
1677 AutoLock
lock(lock_
);
1678 subtle::NoBarrier_Store(&event_callback_
, 0);
1679 UpdateCategoryGroupEnabledFlags();
1682 // Flush() works as the following:
1683 // 1. Flush() is called in threadA whose message loop is saved in
1684 // flush_message_loop_proxy_;
1685 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1686 // loop to flush the thread local buffers; otherwise finish the flush;
1687 // 3. FlushCurrentThread() deletes the thread local event buffer:
1688 // - The last batch of events of the thread are flushed into the main buffer;
1689 // - The message loop will be removed from thread_message_loops_;
1690 // If this is the last message loop, finish the flush;
1691 // 4. If any thread hasn't finish its flush in time, finish the flush.
1692 void TraceLog::Flush(const TraceLog::OutputCallback
& cb
,
1693 bool use_worker_thread
) {
1694 use_worker_thread_
= use_worker_thread
;
1696 // Can't flush when tracing is enabled because otherwise PostTask would
1697 // - generate more trace events;
1698 // - deschedule the calling thread on some platforms causing inaccurate
1699 // timing of the trace events.
1700 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
1702 cb
.Run(empty_result
, false);
1703 LOG(WARNING
) << "Ignored TraceLog::Flush called when tracing is enabled";
1707 int generation
= this->generation();
1708 // Copy of thread_message_loops_ to be used without locking.
1709 std::vector
<scoped_refptr
<SingleThreadTaskRunner
> >
1710 thread_message_loop_task_runners
;
1712 AutoLock
lock(lock_
);
1713 DCHECK(!flush_message_loop_proxy_
.get());
1714 flush_message_loop_proxy_
= MessageLoopProxy::current();
1715 DCHECK(!thread_message_loops_
.size() || flush_message_loop_proxy_
.get());
1716 flush_output_callback_
= cb
;
1718 if (thread_shared_chunk_
) {
1719 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1720 thread_shared_chunk_
.Pass());
1723 if (thread_message_loops_
.size()) {
1724 for (hash_set
<MessageLoop
*>::const_iterator it
=
1725 thread_message_loops_
.begin();
1726 it
!= thread_message_loops_
.end(); ++it
) {
1727 thread_message_loop_task_runners
.push_back((*it
)->task_runner());
1732 if (thread_message_loop_task_runners
.size()) {
1733 for (size_t i
= 0; i
< thread_message_loop_task_runners
.size(); ++i
) {
1734 thread_message_loop_task_runners
[i
]->PostTask(
1736 Bind(&TraceLog::FlushCurrentThread
, Unretained(this), generation
));
1738 flush_message_loop_proxy_
->PostDelayedTask(
1740 Bind(&TraceLog::OnFlushTimeout
, Unretained(this), generation
),
1741 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs
));
1745 FinishFlush(generation
);
1748 // Usually it runs on a different thread.
1749 void TraceLog::ConvertTraceEventsToTraceFormat(
1750 scoped_ptr
<TraceBuffer
> logged_events
,
1751 const TraceLog::OutputCallback
& flush_output_callback
) {
1753 if (flush_output_callback
.is_null())
1756 // The callback need to be called at least once even if there is no events
1757 // to let the caller know the completion of flush.
1758 bool has_more_events
= true;
1760 scoped_refptr
<RefCountedString
> json_events_str_ptr
=
1761 new RefCountedString();
1763 while (json_events_str_ptr
->size() < kTraceEventBufferSizeInBytes
) {
1764 const TraceBufferChunk
* chunk
= logged_events
->NextChunk();
1765 has_more_events
= chunk
!= NULL
;
1768 for (size_t j
= 0; j
< chunk
->size(); ++j
) {
1769 if (json_events_str_ptr
->size())
1770 json_events_str_ptr
->data().append(",\n");
1771 chunk
->GetEventAt(j
)->AppendAsJSON(&(json_events_str_ptr
->data()));
1774 flush_output_callback
.Run(json_events_str_ptr
, has_more_events
);
1775 } while (has_more_events
);
1778 void TraceLog::FinishFlush(int generation
) {
1779 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1780 OutputCallback flush_output_callback
;
1782 if (!CheckGeneration(generation
))
1786 AutoLock
lock(lock_
);
1788 previous_logged_events
.swap(logged_events_
);
1789 UseNextTraceBuffer();
1790 thread_message_loops_
.clear();
1792 flush_message_loop_proxy_
= NULL
;
1793 flush_output_callback
= flush_output_callback_
;
1794 flush_output_callback_
.Reset();
1797 if (use_worker_thread_
&&
1798 WorkerPool::PostTask(
1800 Bind(&TraceLog::ConvertTraceEventsToTraceFormat
,
1801 Passed(&previous_logged_events
),
1802 flush_output_callback
),
1807 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1808 flush_output_callback
);
1811 // Run in each thread holding a local event buffer.
1812 void TraceLog::FlushCurrentThread(int generation
) {
1814 AutoLock
lock(lock_
);
1815 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
.get()) {
1816 // This is late. The corresponding flush has finished.
1821 // This will flush the thread local buffer.
1822 delete thread_local_event_buffer_
.Get();
1824 AutoLock
lock(lock_
);
1825 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
.get() ||
1826 thread_message_loops_
.size())
1829 flush_message_loop_proxy_
->PostTask(
1831 Bind(&TraceLog::FinishFlush
, Unretained(this), generation
));
1834 void TraceLog::OnFlushTimeout(int generation
) {
1836 AutoLock
lock(lock_
);
1837 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
.get()) {
1838 // Flush has finished before timeout.
1843 "The following threads haven't finished flush in time. "
1844 "If this happens stably for some thread, please call "
1845 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1846 "the thread to avoid its trace events from being lost.";
1847 for (hash_set
<MessageLoop
*>::const_iterator it
=
1848 thread_message_loops_
.begin();
1849 it
!= thread_message_loops_
.end(); ++it
) {
1850 LOG(WARNING
) << "Thread: " << (*it
)->thread_name();
1853 FinishFlush(generation
);
1856 void TraceLog::FlushButLeaveBufferIntact(
1857 const TraceLog::OutputCallback
& flush_output_callback
) {
1858 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1860 AutoLock
lock(lock_
);
1861 AddMetadataEventsWhileLocked();
1862 if (thread_shared_chunk_
) {
1863 // Return the chunk to the main buffer to flush the sampling data.
1864 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1865 thread_shared_chunk_
.Pass());
1867 previous_logged_events
= logged_events_
->CloneForIteration().Pass();
1870 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1871 flush_output_callback
);
1874 void TraceLog::UseNextTraceBuffer() {
1875 logged_events_
.reset(CreateTraceBuffer());
1876 subtle::NoBarrier_AtomicIncrement(&generation_
, 1);
1877 thread_shared_chunk_
.reset();
1878 thread_shared_chunk_index_
= 0;
1881 TraceEventHandle
TraceLog::AddTraceEvent(
1883 const unsigned char* category_group_enabled
,
1885 unsigned long long id
,
1887 const char** arg_names
,
1888 const unsigned char* arg_types
,
1889 const unsigned long long* arg_values
,
1890 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1891 unsigned char flags
) {
1892 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1893 base::TimeTicks now
= base::TimeTicks::NowFromSystemTraceTime();
1894 return AddTraceEventWithThreadIdAndTimestamp(phase
, category_group_enabled
,
1895 name
, id
, thread_id
, now
,
1896 num_args
, arg_names
,
1897 arg_types
, arg_values
,
1898 convertable_values
, flags
);
1901 TraceEventHandle
TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1903 const unsigned char* category_group_enabled
,
1905 unsigned long long id
,
1907 const TimeTicks
& timestamp
,
1909 const char** arg_names
,
1910 const unsigned char* arg_types
,
1911 const unsigned long long* arg_values
,
1912 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1913 unsigned char flags
) {
1914 TraceEventHandle handle
= { 0, 0, 0 };
1915 if (!*category_group_enabled
)
1918 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1919 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1920 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1921 if (thread_is_in_trace_event_
.Get())
1924 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
1927 DCHECK(!timestamp
.is_null());
1929 if (flags
& TRACE_EVENT_FLAG_MANGLE_ID
)
1930 id
= MangleEventId(id
);
1932 TimeTicks offset_event_timestamp
= OffsetTimestamp(timestamp
);
1933 TimeTicks now
= flags
& TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP
?
1934 OffsetNow() : offset_event_timestamp
;
1935 TimeTicks thread_now
= ThreadNow();
1937 ThreadLocalEventBuffer
* thread_local_event_buffer
= NULL
;
1938 // A ThreadLocalEventBuffer needs the message loop
1939 // - to know when the thread exits;
1940 // - to handle the final flush.
1941 // For a thread without a message loop or the message loop may be blocked, the
1942 // trace events will be added into the main buffer directly.
1943 if (!thread_blocks_message_loop_
.Get() && MessageLoop::current()) {
1944 thread_local_event_buffer
= thread_local_event_buffer_
.Get();
1945 if (thread_local_event_buffer
&&
1946 !CheckGeneration(thread_local_event_buffer
->generation())) {
1947 delete thread_local_event_buffer
;
1948 thread_local_event_buffer
= NULL
;
1950 if (!thread_local_event_buffer
) {
1951 thread_local_event_buffer
= new ThreadLocalEventBuffer(this);
1952 thread_local_event_buffer_
.Set(thread_local_event_buffer
);
1956 // Check and update the current thread name only if the event is for the
1957 // current thread to avoid locks in most cases.
1958 if (thread_id
== static_cast<int>(PlatformThread::CurrentId())) {
1959 const char* new_name
= ThreadIdNameManager::GetInstance()->
1961 // Check if the thread name has been set or changed since the previous
1962 // call (if any), but don't bother if the new name is empty. Note this will
1963 // not detect a thread name change within the same char* buffer address: we
1964 // favor common case performance over corner case correctness.
1965 if (new_name
!= g_current_thread_name
.Get().Get() &&
1966 new_name
&& *new_name
) {
1967 g_current_thread_name
.Get().Set(new_name
);
1969 AutoLock
thread_info_lock(thread_info_lock_
);
1971 hash_map
<int, std::string
>::iterator existing_name
=
1972 thread_names_
.find(thread_id
);
1973 if (existing_name
== thread_names_
.end()) {
1974 // This is a new thread id, and a new name.
1975 thread_names_
[thread_id
] = new_name
;
1977 // This is a thread id that we've seen before, but potentially with a
1979 std::vector
<StringPiece
> existing_names
;
1980 Tokenize(existing_name
->second
, ",", &existing_names
);
1981 bool found
= std::find(existing_names
.begin(),
1982 existing_names
.end(),
1983 new_name
) != existing_names
.end();
1985 if (existing_names
.size())
1986 existing_name
->second
.push_back(',');
1987 existing_name
->second
.append(new_name
);
1994 // This is done sooner rather than later, to avoid creating the event and
1995 // acquiring the lock, which is not needed for ETW as it's already threadsafe.
1996 if (*category_group_enabled
& ENABLED_FOR_ETW_EXPORT
)
1997 TraceEventETWExport::AddEvent(phase
, category_group_enabled
, name
, id
,
1998 num_args
, arg_names
, arg_types
, arg_values
,
1999 convertable_values
);
2002 std::string console_message
;
2003 if (*category_group_enabled
&
2004 (ENABLED_FOR_RECORDING
| ENABLED_FOR_MONITORING
)) {
2005 OptionalAutoLock
lock(&lock_
);
2007 TraceEvent
* trace_event
= NULL
;
2008 if (thread_local_event_buffer
) {
2009 trace_event
= thread_local_event_buffer
->AddTraceEvent(&handle
);
2011 lock
.EnsureAcquired();
2012 trace_event
= AddEventToThreadSharedChunkWhileLocked(&handle
, true);
2016 trace_event
->Initialize(thread_id
, offset_event_timestamp
, thread_now
,
2017 phase
, category_group_enabled
, name
, id
,
2018 num_args
, arg_names
, arg_types
, arg_values
,
2019 convertable_values
, flags
);
2021 #if defined(OS_ANDROID)
2022 trace_event
->SendToATrace();
2026 if (trace_options() & kInternalEchoToConsole
) {
2027 console_message
= EventToConsoleMessage(
2028 phase
== TRACE_EVENT_PHASE_COMPLETE
? TRACE_EVENT_PHASE_BEGIN
: phase
,
2029 timestamp
, trace_event
);
2033 if (console_message
.size())
2034 LOG(ERROR
) << console_message
;
2036 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
2037 &watch_category_
)) == category_group_enabled
) {
2038 bool event_name_matches
;
2039 WatchEventCallback watch_event_callback_copy
;
2041 AutoLock
lock(lock_
);
2042 event_name_matches
= watch_event_name_
== name
;
2043 watch_event_callback_copy
= watch_event_callback_
;
2045 if (event_name_matches
) {
2046 if (!watch_event_callback_copy
.is_null())
2047 watch_event_callback_copy
.Run();
2051 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
2052 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
2053 subtle::NoBarrier_Load(&event_callback_
));
2054 if (event_callback
) {
2055 event_callback(offset_event_timestamp
,
2056 phase
== TRACE_EVENT_PHASE_COMPLETE
?
2057 TRACE_EVENT_PHASE_BEGIN
: phase
,
2058 category_group_enabled
, name
, id
,
2059 num_args
, arg_names
, arg_types
, arg_values
,
2064 if (thread_local_event_buffer
)
2065 thread_local_event_buffer
->ReportOverhead(now
, thread_now
);
2070 // May be called when a COMPELETE event ends and the unfinished event has been
2071 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
2072 std::string
TraceLog::EventToConsoleMessage(unsigned char phase
,
2073 const TimeTicks
& timestamp
,
2074 TraceEvent
* trace_event
) {
2075 AutoLock
thread_info_lock(thread_info_lock_
);
2077 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
2078 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
2079 DCHECK(phase
!= TRACE_EVENT_PHASE_COMPLETE
);
2082 int thread_id
= trace_event
?
2083 trace_event
->thread_id() : PlatformThread::CurrentId();
2084 if (phase
== TRACE_EVENT_PHASE_END
) {
2085 duration
= timestamp
- thread_event_start_times_
[thread_id
].top();
2086 thread_event_start_times_
[thread_id
].pop();
2089 std::string thread_name
= thread_names_
[thread_id
];
2090 if (thread_colors_
.find(thread_name
) == thread_colors_
.end())
2091 thread_colors_
[thread_name
] = (thread_colors_
.size() % 6) + 1;
2093 std::ostringstream log
;
2094 log
<< base::StringPrintf("%s: \x1b[0;3%dm",
2095 thread_name
.c_str(),
2096 thread_colors_
[thread_name
]);
2099 if (thread_event_start_times_
.find(thread_id
) !=
2100 thread_event_start_times_
.end())
2101 depth
= thread_event_start_times_
[thread_id
].size();
2103 for (size_t i
= 0; i
< depth
; ++i
)
2107 trace_event
->AppendPrettyPrinted(&log
);
2108 if (phase
== TRACE_EVENT_PHASE_END
)
2109 log
<< base::StringPrintf(" (%.3f ms)", duration
.InMillisecondsF());
2113 if (phase
== TRACE_EVENT_PHASE_BEGIN
)
2114 thread_event_start_times_
[thread_id
].push(timestamp
);
2119 void TraceLog::AddTraceEventEtw(char phase
,
2122 const char* extra
) {
2124 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
2126 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
2127 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
2130 void TraceLog::AddTraceEventEtw(char phase
,
2133 const std::string
& extra
) {
2135 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
2137 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
2138 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
2141 void TraceLog::UpdateTraceEventDuration(
2142 const unsigned char* category_group_enabled
,
2144 TraceEventHandle handle
) {
2145 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
2146 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
2147 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
2148 if (thread_is_in_trace_event_
.Get())
2151 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
2153 TimeTicks thread_now
= ThreadNow();
2154 TimeTicks now
= OffsetNow();
2156 std::string console_message
;
2157 if (*category_group_enabled
& ENABLED_FOR_RECORDING
) {
2158 OptionalAutoLock
lock(&lock_
);
2160 TraceEvent
* trace_event
= GetEventByHandleInternal(handle
, &lock
);
2162 DCHECK(trace_event
->phase() == TRACE_EVENT_PHASE_COMPLETE
);
2163 trace_event
->UpdateDuration(now
, thread_now
);
2164 #if defined(OS_ANDROID)
2165 trace_event
->SendToATrace();
2169 if (trace_options() & kInternalEchoToConsole
) {
2170 console_message
= EventToConsoleMessage(TRACE_EVENT_PHASE_END
,
2175 if (console_message
.size())
2176 LOG(ERROR
) << console_message
;
2178 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
2179 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
2180 subtle::NoBarrier_Load(&event_callback_
));
2181 if (event_callback
) {
2182 event_callback(now
, TRACE_EVENT_PHASE_END
, category_group_enabled
, name
,
2183 trace_event_internal::kNoEventId
, 0, NULL
, NULL
, NULL
,
2184 TRACE_EVENT_FLAG_NONE
);
2189 void TraceLog::SetWatchEvent(const std::string
& category_name
,
2190 const std::string
& event_name
,
2191 const WatchEventCallback
& callback
) {
2192 const unsigned char* category
= GetCategoryGroupEnabled(
2193 category_name
.c_str());
2194 AutoLock
lock(lock_
);
2195 subtle::NoBarrier_Store(&watch_category_
,
2196 reinterpret_cast<subtle::AtomicWord
>(category
));
2197 watch_event_name_
= event_name
;
2198 watch_event_callback_
= callback
;
2201 void TraceLog::CancelWatchEvent() {
2202 AutoLock
lock(lock_
);
2203 subtle::NoBarrier_Store(&watch_category_
, 0);
2204 watch_event_name_
= "";
2205 watch_event_callback_
.Reset();
2208 uint64
TraceLog::MangleEventId(uint64 id
) {
2209 return id
^ process_id_hash_
;
2212 void TraceLog::AddMetadataEventsWhileLocked() {
2213 lock_
.AssertAcquired();
2215 #if !defined(OS_NACL) // NaCl shouldn't expose the process id.
2216 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2218 "num_cpus", "number",
2219 base::SysInfo::NumberOfProcessors());
2223 int current_thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
2224 if (process_sort_index_
!= 0) {
2225 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2227 "process_sort_index", "sort_index",
2228 process_sort_index_
);
2231 if (process_name_
.size()) {
2232 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2234 "process_name", "name",
2238 if (process_labels_
.size() > 0) {
2239 std::vector
<std::string
> labels
;
2240 for(base::hash_map
<int, std::string
>::iterator it
= process_labels_
.begin();
2241 it
!= process_labels_
.end();
2243 labels
.push_back(it
->second
);
2245 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2247 "process_labels", "labels",
2248 JoinString(labels
, ','));
2251 // Thread sort indices.
2252 for(hash_map
<int, int>::iterator it
= thread_sort_indices_
.begin();
2253 it
!= thread_sort_indices_
.end();
2255 if (it
->second
== 0)
2257 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2259 "thread_sort_index", "sort_index",
2264 AutoLock
thread_info_lock(thread_info_lock_
);
2265 for(hash_map
<int, std::string
>::iterator it
= thread_names_
.begin();
2266 it
!= thread_names_
.end();
2268 if (it
->second
.empty())
2270 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2272 "thread_name", "name",
2276 // If buffer is full, add a metadata record to report this.
2277 if (!buffer_limit_reached_timestamp_
.is_null()) {
2278 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2280 "trace_buffer_overflowed",
2282 buffer_limit_reached_timestamp_
);
2286 void TraceLog::WaitSamplingEventForTesting() {
2287 if (!sampling_thread_
)
2289 sampling_thread_
->WaitSamplingEventForTesting();
2292 void TraceLog::DeleteForTesting() {
2293 DeleteTraceLogForTesting::Delete();
2296 TraceEvent
* TraceLog::GetEventByHandle(TraceEventHandle handle
) {
2297 return GetEventByHandleInternal(handle
, NULL
);
2300 TraceEvent
* TraceLog::GetEventByHandleInternal(TraceEventHandle handle
,
2301 OptionalAutoLock
* lock
) {
2302 if (!handle
.chunk_seq
)
2305 if (thread_local_event_buffer_
.Get()) {
2306 TraceEvent
* trace_event
=
2307 thread_local_event_buffer_
.Get()->GetEventByHandle(handle
);
2312 // The event has been out-of-control of the thread local buffer.
2313 // Try to get the event from the main buffer with a lock.
2315 lock
->EnsureAcquired();
2317 if (thread_shared_chunk_
&&
2318 handle
.chunk_index
== thread_shared_chunk_index_
) {
2319 return handle
.chunk_seq
== thread_shared_chunk_
->seq() ?
2320 thread_shared_chunk_
->GetEventAt(handle
.event_index
) : NULL
;
2323 return logged_events_
->GetEventByHandle(handle
);
2326 void TraceLog::SetProcessID(int process_id
) {
2327 process_id_
= process_id
;
2328 // Create a FNV hash from the process ID for XORing.
2329 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2330 unsigned long long offset_basis
= 14695981039346656037ull;
2331 unsigned long long fnv_prime
= 1099511628211ull;
2332 unsigned long long pid
= static_cast<unsigned long long>(process_id_
);
2333 process_id_hash_
= (offset_basis
^ pid
) * fnv_prime
;
2336 void TraceLog::SetProcessSortIndex(int sort_index
) {
2337 AutoLock
lock(lock_
);
2338 process_sort_index_
= sort_index
;
2341 void TraceLog::SetProcessName(const std::string
& process_name
) {
2342 AutoLock
lock(lock_
);
2343 process_name_
= process_name
;
2346 void TraceLog::UpdateProcessLabel(
2347 int label_id
, const std::string
& current_label
) {
2348 if(!current_label
.length())
2349 return RemoveProcessLabel(label_id
);
2351 AutoLock
lock(lock_
);
2352 process_labels_
[label_id
] = current_label
;
2355 void TraceLog::RemoveProcessLabel(int label_id
) {
2356 AutoLock
lock(lock_
);
2357 base::hash_map
<int, std::string
>::iterator it
= process_labels_
.find(
2359 if (it
== process_labels_
.end())
2362 process_labels_
.erase(it
);
2365 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id
, int sort_index
) {
2366 AutoLock
lock(lock_
);
2367 thread_sort_indices_
[static_cast<int>(thread_id
)] = sort_index
;
2370 void TraceLog::SetTimeOffset(TimeDelta offset
) {
2371 time_offset_
= offset
;
2374 size_t TraceLog::GetObserverCountForTest() const {
2375 return enabled_state_observer_list_
.size();
2378 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2379 thread_blocks_message_loop_
.Set(true);
2380 if (thread_local_event_buffer_
.Get()) {
2381 // This will flush the thread local buffer.
2382 delete thread_local_event_buffer_
.Get();
2386 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2387 const std::string
& str
) {
2388 return str
.empty() ||
2390 str
.at(str
.length() - 1) == ' ';
2393 CategoryFilter::CategoryFilter(const std::string
& filter_string
) {
2394 if (!filter_string
.empty())
2395 Initialize(filter_string
);
2397 Initialize(CategoryFilter::kDefaultCategoryFilterString
);
2400 CategoryFilter::CategoryFilter() {
2401 Initialize(CategoryFilter::kDefaultCategoryFilterString
);
2404 CategoryFilter::CategoryFilter(const CategoryFilter
& cf
)
2405 : included_(cf
.included_
),
2406 disabled_(cf
.disabled_
),
2407 excluded_(cf
.excluded_
),
2408 delays_(cf
.delays_
) {
2411 CategoryFilter::~CategoryFilter() {
2414 CategoryFilter
& CategoryFilter::operator=(const CategoryFilter
& rhs
) {
2418 included_
= rhs
.included_
;
2419 disabled_
= rhs
.disabled_
;
2420 excluded_
= rhs
.excluded_
;
2421 delays_
= rhs
.delays_
;
2425 void CategoryFilter::Initialize(const std::string
& filter_string
) {
2426 // Tokenize list of categories, delimited by ','.
2427 StringTokenizer
tokens(filter_string
, ",");
2428 // Add each token to the appropriate list (included_,excluded_).
2429 while (tokens
.GetNext()) {
2430 std::string category
= tokens
.token();
2431 // Ignore empty categories.
2432 if (category
.empty())
2434 // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
2435 if (category
.find(kSyntheticDelayCategoryFilterPrefix
) == 0 &&
2436 category
.at(category
.size() - 1) == ')') {
2437 category
= category
.substr(
2438 strlen(kSyntheticDelayCategoryFilterPrefix
),
2439 category
.size() - strlen(kSyntheticDelayCategoryFilterPrefix
) - 1);
2440 size_t name_length
= category
.find(';');
2441 if (name_length
!= std::string::npos
&& name_length
> 0 &&
2442 name_length
!= category
.size() - 1) {
2443 delays_
.push_back(category
);
2445 } else if (category
.at(0) == '-') {
2446 // Excluded categories start with '-'.
2447 // Remove '-' from category string.
2448 category
= category
.substr(1);
2449 excluded_
.push_back(category
);
2450 } else if (category
.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
2451 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
2452 disabled_
.push_back(category
);
2454 included_
.push_back(category
);
2459 void CategoryFilter::WriteString(const StringList
& values
,
2461 bool included
) const {
2462 bool prepend_comma
= !out
->empty();
2464 for (StringList::const_iterator ci
= values
.begin();
2465 ci
!= values
.end(); ++ci
) {
2466 if (token_cnt
> 0 || prepend_comma
)
2467 StringAppendF(out
, ",");
2468 StringAppendF(out
, "%s%s", (included
? "" : "-"), ci
->c_str());
2473 void CategoryFilter::WriteString(const StringList
& delays
,
2474 std::string
* out
) const {
2475 bool prepend_comma
= !out
->empty();
2477 for (StringList::const_iterator ci
= delays
.begin();
2478 ci
!= delays
.end(); ++ci
) {
2479 if (token_cnt
> 0 || prepend_comma
)
2480 StringAppendF(out
, ",");
2481 StringAppendF(out
, "%s%s)", kSyntheticDelayCategoryFilterPrefix
,
2487 std::string
CategoryFilter::ToString() const {
2488 std::string filter_string
;
2489 WriteString(included_
, &filter_string
, true);
2490 WriteString(disabled_
, &filter_string
, true);
2491 WriteString(excluded_
, &filter_string
, false);
2492 WriteString(delays_
, &filter_string
);
2493 return filter_string
;
2496 bool CategoryFilter::IsCategoryGroupEnabled(
2497 const char* category_group_name
) const {
2498 // TraceLog should call this method only as part of enabling/disabling
2501 bool had_enabled_by_default
= false;
2502 DCHECK(category_group_name
);
2503 CStringTokenizer
category_group_tokens(
2504 category_group_name
, category_group_name
+ strlen(category_group_name
),
2506 while (category_group_tokens
.GetNext()) {
2507 std::string category_group_token
= category_group_tokens
.token();
2508 // Don't allow empty tokens, nor tokens with leading or trailing space.
2509 DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2510 category_group_token
))
2511 << "Disallowed category string";
2512 if (IsCategoryEnabled(category_group_token
.c_str())) {
2515 if (!MatchPattern(category_group_token
.c_str(),
2516 TRACE_DISABLED_BY_DEFAULT("*")))
2517 had_enabled_by_default
= true;
2519 // Do a second pass to check for explicitly disabled categories
2520 // (those explicitly enabled have priority due to first pass).
2521 category_group_tokens
.Reset();
2522 bool category_group_disabled
= false;
2523 while (category_group_tokens
.GetNext()) {
2524 std::string category_group_token
= category_group_tokens
.token();
2525 for (StringList::const_iterator ci
= excluded_
.begin();
2526 ci
!= excluded_
.end(); ++ci
) {
2527 if (MatchPattern(category_group_token
.c_str(), ci
->c_str())) {
2528 // Current token of category_group_name is present in excluded_list.
2529 // Flag the exclusion and proceed further to check if any of the
2530 // remaining categories of category_group_name is not present in the
2532 category_group_disabled
= true;
2535 // One of the category of category_group_name is not present in
2536 // excluded_ list. So, it has to be included_ list. Enable the
2537 // category_group_name for recording.
2538 category_group_disabled
= false;
2540 // One of the categories present in category_group_name is not present in
2541 // excluded_ list. Implies this category_group_name group can be enabled
2542 // for recording, since one of its groups is enabled for recording.
2543 if (!category_group_disabled
)
2546 // If the category group is not excluded, and there are no included patterns
2547 // we consider this category group enabled, as long as it had categories
2548 // other than disabled-by-default.
2549 return !category_group_disabled
&&
2550 included_
.empty() && had_enabled_by_default
;
2553 bool CategoryFilter::IsCategoryEnabled(const char* category_name
) const {
2554 StringList::const_iterator ci
;
2556 // Check the disabled- filters and the disabled-* wildcard first so that a
2557 // "*" filter does not include the disabled.
2558 for (ci
= disabled_
.begin(); ci
!= disabled_
.end(); ++ci
) {
2559 if (MatchPattern(category_name
, ci
->c_str()))
2563 if (MatchPattern(category_name
, TRACE_DISABLED_BY_DEFAULT("*")))
2566 for (ci
= included_
.begin(); ci
!= included_
.end(); ++ci
) {
2567 if (MatchPattern(category_name
, ci
->c_str()))
2574 bool CategoryFilter::HasIncludedPatterns() const {
2575 return !included_
.empty();
2578 void CategoryFilter::Merge(const CategoryFilter
& nested_filter
) {
2579 // Keep included patterns only if both filters have an included entry.
2580 // Otherwise, one of the filter was specifying "*" and we want to honour the
2582 if (HasIncludedPatterns() && nested_filter
.HasIncludedPatterns()) {
2583 included_
.insert(included_
.end(),
2584 nested_filter
.included_
.begin(),
2585 nested_filter
.included_
.end());
2590 disabled_
.insert(disabled_
.end(),
2591 nested_filter
.disabled_
.begin(),
2592 nested_filter
.disabled_
.end());
2593 excluded_
.insert(excluded_
.end(),
2594 nested_filter
.excluded_
.begin(),
2595 nested_filter
.excluded_
.end());
2596 delays_
.insert(delays_
.end(),
2597 nested_filter
.delays_
.begin(),
2598 nested_filter
.delays_
.end());
2601 void CategoryFilter::Clear() {
2607 const CategoryFilter::StringList
&
2608 CategoryFilter::GetSyntheticDelayValues() const {
2612 } // namespace trace_event
2615 namespace trace_event_internal
{
2617 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2618 const char* category_group
, const char* name
) {
2619 // The single atom works because for now the category_group can only be "gpu".
2620 DCHECK_EQ(strcmp(category_group
, "gpu"), 0);
2621 static TRACE_EVENT_API_ATOMIC_WORD atomic
= 0;
2622 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2623 category_group
, atomic
, category_group_enabled_
);
2625 if (*category_group_enabled_
) {
2627 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2628 TRACE_EVENT_PHASE_COMPLETE
, category_group_enabled_
, name
,
2629 trace_event_internal::kNoEventId
,
2630 static_cast<int>(base::PlatformThread::CurrentId()),
2631 base::TimeTicks::NowFromSystemTraceTime(),
2632 0, NULL
, NULL
, NULL
, NULL
, TRACE_EVENT_FLAG_NONE
);
2636 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2637 if (*category_group_enabled_
) {
2638 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_
,
2639 name_
, event_handle_
);
2643 } // namespace trace_event_internal