1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/debug/trace_event_impl.h"
9 #include "base/base_switches.h"
10 #include "base/bind.h"
11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/trace_event.h"
14 #include "base/debug/trace_event_synthetic_delay.h"
15 #include "base/float_util.h"
16 #include "base/format_macros.h"
17 #include "base/json/string_escape.h"
18 #include "base/lazy_instance.h"
19 #include "base/memory/singleton.h"
20 #include "base/message_loop/message_loop.h"
21 #include "base/process/process_metrics.h"
22 #include "base/stl_util.h"
23 #include "base/strings/string_number_conversions.h"
24 #include "base/strings/string_split.h"
25 #include "base/strings/string_tokenizer.h"
26 #include "base/strings/string_util.h"
27 #include "base/strings/stringprintf.h"
28 #include "base/strings/utf_string_conversions.h"
29 #include "base/synchronization/cancellation_flag.h"
30 #include "base/synchronization/waitable_event.h"
31 #include "base/sys_info.h"
32 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
33 #include "base/threading/platform_thread.h"
34 #include "base/threading/thread_id_name_manager.h"
35 #include "base/time/time.h"
38 #include "base/debug/trace_event_win.h"
41 class DeleteTraceLogForTesting
{
43 static void Delete() {
44 Singleton
<base::debug::TraceLog
,
45 LeakySingletonTraits
<base::debug::TraceLog
> >::OnExit(0);
49 // The thread buckets for the sampling profiler.
50 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state
[3];
57 // The overhead of TraceEvent above this threshold will be reported in the
59 const int kOverheadReportThresholdInMicroseconds
= 50;
61 // String options that can be used to initialize TraceOptions.
62 const char kRecordUntilFull
[] = "record-until-full";
63 const char kRecordContinuously
[] = "record-continuously";
64 const char kRecordAsMuchAsPossible
[] = "record-as-much-as-possible";
65 const char kTraceToConsole
[] = "trace-to-console";
66 const char kEnableSampling
[] = "enable-sampling";
67 const char kEnableSystrace
[] = "enable-systrace";
69 // Controls the number of trace events we will buffer in-memory
70 // before throwing them away.
71 const size_t kTraceBufferChunkSize
= TraceBufferChunk::kTraceBufferChunkSize
;
72 const size_t kTraceEventVectorBigBufferChunks
=
73 512000000 / kTraceBufferChunkSize
;
74 const size_t kTraceEventVectorBufferChunks
= 256000 / kTraceBufferChunkSize
;
75 const size_t kTraceEventRingBufferChunks
= kTraceEventVectorBufferChunks
/ 4;
76 const size_t kTraceEventBatchChunks
= 1000 / kTraceBufferChunkSize
;
77 // Can store results for 30 seconds with 1 ms sampling interval.
78 const size_t kMonitorTraceEventBufferChunks
= 30000 / kTraceBufferChunkSize
;
79 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
80 const size_t kEchoToConsoleTraceEventBufferChunks
= 256;
82 const int kThreadFlushTimeoutMs
= 3000;
85 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
86 const char kEchoToConsoleCategoryFilter
[] = "-ipc,-task";
89 const char kSyntheticDelayCategoryFilterPrefix
[] = "DELAY(";
91 #define MAX_CATEGORY_GROUPS 100
93 // Parallel arrays g_category_groups and g_category_group_enabled are separate
94 // so that a pointer to a member of g_category_group_enabled can be easily
95 // converted to an index into g_category_groups. This allows macros to deal
96 // only with char enabled pointers from g_category_group_enabled, and we can
97 // convert internally to determine the category name from the char enabled
99 const char* g_category_groups
[MAX_CATEGORY_GROUPS
] = {
101 "tracing already shutdown",
102 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
104 // For reporting trace_event overhead. For thread local event buffers only.
105 "trace_event_overhead"};
107 // The enabled flag is char instead of bool so that the API can be used from C.
108 unsigned char g_category_group_enabled
[MAX_CATEGORY_GROUPS
] = { 0 };
109 // Indexes here have to match the g_category_groups array indexes above.
110 const int g_category_already_shutdown
= 1;
111 const int g_category_categories_exhausted
= 2;
112 const int g_category_metadata
= 3;
113 const int g_category_trace_event_overhead
= 4;
114 const int g_num_builtin_categories
= 5;
115 // Skip default categories.
116 base::subtle::AtomicWord g_category_index
= g_num_builtin_categories
;
118 // The name of the current thread. This is used to decide if the current
119 // thread name has changed. We combine all the seen thread names into the
120 // output name for the thread.
121 LazyInstance
<ThreadLocalPointer
<const char> >::Leaky
122 g_current_thread_name
= LAZY_INSTANCE_INITIALIZER
;
124 TimeTicks
ThreadNow() {
125 return TimeTicks::IsThreadNowSupported() ?
126 TimeTicks::ThreadNow() : TimeTicks();
129 class TraceBufferRingBuffer
: public TraceBuffer
{
131 TraceBufferRingBuffer(size_t max_chunks
)
132 : max_chunks_(max_chunks
),
133 recyclable_chunks_queue_(new size_t[queue_capacity()]),
135 queue_tail_(max_chunks
),
136 current_iteration_index_(0),
137 current_chunk_seq_(1) {
138 chunks_
.reserve(max_chunks
);
139 for (size_t i
= 0; i
< max_chunks
; ++i
)
140 recyclable_chunks_queue_
[i
] = i
;
143 virtual scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) OVERRIDE
{
144 // Because the number of threads is much less than the number of chunks,
145 // the queue should never be empty.
146 DCHECK(!QueueIsEmpty());
148 *index
= recyclable_chunks_queue_
[queue_head_
];
149 queue_head_
= NextQueueIndex(queue_head_
);
150 current_iteration_index_
= queue_head_
;
152 if (*index
>= chunks_
.size())
153 chunks_
.resize(*index
+ 1);
155 TraceBufferChunk
* chunk
= chunks_
[*index
];
156 chunks_
[*index
] = NULL
; // Put NULL in the slot of a in-flight chunk.
158 chunk
->Reset(current_chunk_seq_
++);
160 chunk
= new TraceBufferChunk(current_chunk_seq_
++);
162 return scoped_ptr
<TraceBufferChunk
>(chunk
);
165 virtual void ReturnChunk(size_t index
,
166 scoped_ptr
<TraceBufferChunk
> chunk
) OVERRIDE
{
167 // When this method is called, the queue should not be full because it
168 // can contain all chunks including the one to be returned.
169 DCHECK(!QueueIsFull());
171 DCHECK_LT(index
, chunks_
.size());
172 DCHECK(!chunks_
[index
]);
173 chunks_
[index
] = chunk
.release();
174 recyclable_chunks_queue_
[queue_tail_
] = index
;
175 queue_tail_
= NextQueueIndex(queue_tail_
);
178 virtual bool IsFull() const OVERRIDE
{
182 virtual size_t Size() const OVERRIDE
{
183 // This is approximate because not all of the chunks are full.
184 return chunks_
.size() * kTraceBufferChunkSize
;
187 virtual size_t Capacity() const OVERRIDE
{
188 return max_chunks_
* kTraceBufferChunkSize
;
191 virtual TraceEvent
* GetEventByHandle(TraceEventHandle handle
) OVERRIDE
{
192 if (handle
.chunk_index
>= chunks_
.size())
194 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
195 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
197 return chunk
->GetEventAt(handle
.event_index
);
200 virtual const TraceBufferChunk
* NextChunk() OVERRIDE
{
204 while (current_iteration_index_
!= queue_tail_
) {
205 size_t chunk_index
= recyclable_chunks_queue_
[current_iteration_index_
];
206 current_iteration_index_
= NextQueueIndex(current_iteration_index_
);
207 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
209 DCHECK(chunks_
[chunk_index
]);
210 return chunks_
[chunk_index
];
215 virtual scoped_ptr
<TraceBuffer
> CloneForIteration() const OVERRIDE
{
216 scoped_ptr
<ClonedTraceBuffer
> cloned_buffer(new ClonedTraceBuffer());
217 for (size_t queue_index
= queue_head_
; queue_index
!= queue_tail_
;
218 queue_index
= NextQueueIndex(queue_index
)) {
219 size_t chunk_index
= recyclable_chunks_queue_
[queue_index
];
220 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
222 TraceBufferChunk
* chunk
= chunks_
[chunk_index
];
223 cloned_buffer
->chunks_
.push_back(chunk
? chunk
->Clone().release() : NULL
);
225 return cloned_buffer
.PassAs
<TraceBuffer
>();
229 class ClonedTraceBuffer
: public TraceBuffer
{
231 ClonedTraceBuffer() : current_iteration_index_(0) {}
233 // The only implemented method.
234 virtual const TraceBufferChunk
* NextChunk() OVERRIDE
{
235 return current_iteration_index_
< chunks_
.size() ?
236 chunks_
[current_iteration_index_
++] : NULL
;
239 virtual scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) OVERRIDE
{
241 return scoped_ptr
<TraceBufferChunk
>();
243 virtual void ReturnChunk(size_t index
,
244 scoped_ptr
<TraceBufferChunk
>) OVERRIDE
{
247 virtual bool IsFull() const OVERRIDE
{ return false; }
248 virtual size_t Size() const OVERRIDE
{ return 0; }
249 virtual size_t Capacity() const OVERRIDE
{ return 0; }
250 virtual TraceEvent
* GetEventByHandle(TraceEventHandle handle
) OVERRIDE
{
253 virtual scoped_ptr
<TraceBuffer
> CloneForIteration() const OVERRIDE
{
255 return scoped_ptr
<TraceBuffer
>();
258 size_t current_iteration_index_
;
259 ScopedVector
<TraceBufferChunk
> chunks_
;
262 bool QueueIsEmpty() const {
263 return queue_head_
== queue_tail_
;
266 size_t QueueSize() const {
267 return queue_tail_
> queue_head_
? queue_tail_
- queue_head_
:
268 queue_tail_
+ queue_capacity() - queue_head_
;
271 bool QueueIsFull() const {
272 return QueueSize() == queue_capacity() - 1;
275 size_t queue_capacity() const {
276 // One extra space to help distinguish full state and empty state.
277 return max_chunks_
+ 1;
280 size_t NextQueueIndex(size_t index
) const {
282 if (index
>= queue_capacity())
288 ScopedVector
<TraceBufferChunk
> chunks_
;
290 scoped_ptr
<size_t[]> recyclable_chunks_queue_
;
294 size_t current_iteration_index_
;
295 uint32 current_chunk_seq_
;
297 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer
);
300 class TraceBufferVector
: public TraceBuffer
{
302 TraceBufferVector(size_t max_chunks
)
303 : in_flight_chunk_count_(0),
304 current_iteration_index_(0),
305 max_chunks_(max_chunks
) {
306 chunks_
.reserve(max_chunks_
);
309 virtual scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) OVERRIDE
{
310 // This function may be called when adding normal events or indirectly from
311 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
312 // have to add the metadata events and flush thread-local buffers even if
313 // the buffer is full.
314 *index
= chunks_
.size();
315 chunks_
.push_back(NULL
); // Put NULL in the slot of a in-flight chunk.
316 ++in_flight_chunk_count_
;
317 // + 1 because zero chunk_seq is not allowed.
318 return scoped_ptr
<TraceBufferChunk
>(
319 new TraceBufferChunk(static_cast<uint32
>(*index
) + 1));
322 virtual void ReturnChunk(size_t index
,
323 scoped_ptr
<TraceBufferChunk
> chunk
) OVERRIDE
{
324 DCHECK_GT(in_flight_chunk_count_
, 0u);
325 DCHECK_LT(index
, chunks_
.size());
326 DCHECK(!chunks_
[index
]);
327 --in_flight_chunk_count_
;
328 chunks_
[index
] = chunk
.release();
331 virtual bool IsFull() const OVERRIDE
{
332 return chunks_
.size() >= max_chunks_
;
335 virtual size_t Size() const OVERRIDE
{
336 // This is approximate because not all of the chunks are full.
337 return chunks_
.size() * kTraceBufferChunkSize
;
340 virtual size_t Capacity() const OVERRIDE
{
341 return max_chunks_
* kTraceBufferChunkSize
;
344 virtual TraceEvent
* GetEventByHandle(TraceEventHandle handle
) OVERRIDE
{
345 if (handle
.chunk_index
>= chunks_
.size())
347 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
348 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
350 return chunk
->GetEventAt(handle
.event_index
);
353 virtual const TraceBufferChunk
* NextChunk() OVERRIDE
{
354 while (current_iteration_index_
< chunks_
.size()) {
355 // Skip in-flight chunks.
356 const TraceBufferChunk
* chunk
= chunks_
[current_iteration_index_
++];
363 virtual scoped_ptr
<TraceBuffer
> CloneForIteration() const OVERRIDE
{
365 return scoped_ptr
<TraceBuffer
>();
369 size_t in_flight_chunk_count_
;
370 size_t current_iteration_index_
;
372 ScopedVector
<TraceBufferChunk
> chunks_
;
374 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector
);
377 template <typename T
>
378 void InitializeMetadataEvent(TraceEvent
* trace_event
,
380 const char* metadata_name
, const char* arg_name
,
386 unsigned char arg_type
;
387 unsigned long long arg_value
;
388 ::trace_event_internal::SetTraceValue(value
, &arg_type
, &arg_value
);
389 trace_event
->Initialize(thread_id
,
390 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA
,
391 &g_category_group_enabled
[g_category_metadata
],
392 metadata_name
, ::trace_event_internal::kNoEventId
,
393 num_args
, &arg_name
, &arg_type
, &arg_value
, NULL
,
394 TRACE_EVENT_FLAG_NONE
);
397 class AutoThreadLocalBoolean
{
399 explicit AutoThreadLocalBoolean(ThreadLocalBoolean
* thread_local_boolean
)
400 : thread_local_boolean_(thread_local_boolean
) {
401 DCHECK(!thread_local_boolean_
->Get());
402 thread_local_boolean_
->Set(true);
404 ~AutoThreadLocalBoolean() {
405 thread_local_boolean_
->Set(false);
409 ThreadLocalBoolean
* thread_local_boolean_
;
410 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean
);
415 void TraceBufferChunk::Reset(uint32 new_seq
) {
416 for (size_t i
= 0; i
< next_free_
; ++i
)
422 TraceEvent
* TraceBufferChunk::AddTraceEvent(size_t* event_index
) {
424 *event_index
= next_free_
++;
425 return &chunk_
[*event_index
];
428 scoped_ptr
<TraceBufferChunk
> TraceBufferChunk::Clone() const {
429 scoped_ptr
<TraceBufferChunk
> cloned_chunk(new TraceBufferChunk(seq_
));
430 cloned_chunk
->next_free_
= next_free_
;
431 for (size_t i
= 0; i
< next_free_
; ++i
)
432 cloned_chunk
->chunk_
[i
].CopyFrom(chunk_
[i
]);
433 return cloned_chunk
.Pass();
436 // A helper class that allows the lock to be acquired in the middle of the scope
437 // and unlocks at the end of scope if locked.
438 class TraceLog::OptionalAutoLock
{
440 explicit OptionalAutoLock(Lock
& lock
)
445 ~OptionalAutoLock() {
450 void EnsureAcquired() {
460 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock
);
463 // Use this function instead of TraceEventHandle constructor to keep the
464 // overhead of ScopedTracer (trace_event.h) constructor minimum.
465 void MakeHandle(uint32 chunk_seq
, size_t chunk_index
, size_t event_index
,
466 TraceEventHandle
* handle
) {
468 DCHECK(chunk_index
< (1u << 16));
469 DCHECK(event_index
< (1u << 16));
470 handle
->chunk_seq
= chunk_seq
;
471 handle
->chunk_index
= static_cast<uint16
>(chunk_index
);
472 handle
->event_index
= static_cast<uint16
>(event_index
);
475 ////////////////////////////////////////////////////////////////////////////////
479 ////////////////////////////////////////////////////////////////////////////////
483 size_t GetAllocLength(const char* str
) { return str
? strlen(str
) + 1 : 0; }
485 // Copies |*member| into |*buffer|, sets |*member| to point to this new
486 // location, and then advances |*buffer| by the amount written.
487 void CopyTraceEventParameter(char** buffer
,
491 size_t written
= strlcpy(*buffer
, *member
, end
- *buffer
) + 1;
492 DCHECK_LE(static_cast<int>(written
), end
- *buffer
);
500 TraceEvent::TraceEvent()
501 : duration_(TimeDelta::FromInternalValue(-1)),
503 category_group_enabled_(NULL
),
506 phase_(TRACE_EVENT_PHASE_BEGIN
),
508 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
509 arg_names_
[i
] = NULL
;
510 memset(arg_values_
, 0, sizeof(arg_values_
));
513 TraceEvent::~TraceEvent() {
516 void TraceEvent::CopyFrom(const TraceEvent
& other
) {
517 timestamp_
= other
.timestamp_
;
518 thread_timestamp_
= other
.thread_timestamp_
;
519 duration_
= other
.duration_
;
521 category_group_enabled_
= other
.category_group_enabled_
;
523 thread_id_
= other
.thread_id_
;
524 phase_
= other
.phase_
;
525 flags_
= other
.flags_
;
526 parameter_copy_storage_
= other
.parameter_copy_storage_
;
528 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
) {
529 arg_names_
[i
] = other
.arg_names_
[i
];
530 arg_types_
[i
] = other
.arg_types_
[i
];
531 arg_values_
[i
] = other
.arg_values_
[i
];
532 convertable_values_
[i
] = other
.convertable_values_
[i
];
536 void TraceEvent::Initialize(
539 TimeTicks thread_timestamp
,
541 const unsigned char* category_group_enabled
,
543 unsigned long long id
,
545 const char** arg_names
,
546 const unsigned char* arg_types
,
547 const unsigned long long* arg_values
,
548 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
549 unsigned char flags
) {
550 timestamp_
= timestamp
;
551 thread_timestamp_
= thread_timestamp
;
552 duration_
= TimeDelta::FromInternalValue(-1);
554 category_group_enabled_
= category_group_enabled
;
556 thread_id_
= thread_id
;
560 // Clamp num_args since it may have been set by a third_party library.
561 num_args
= (num_args
> kTraceMaxNumArgs
) ? kTraceMaxNumArgs
: num_args
;
563 for (; i
< num_args
; ++i
) {
564 arg_names_
[i
] = arg_names
[i
];
565 arg_types_
[i
] = arg_types
[i
];
567 if (arg_types
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
568 convertable_values_
[i
] = convertable_values
[i
];
570 arg_values_
[i
].as_uint
= arg_values
[i
];
572 for (; i
< kTraceMaxNumArgs
; ++i
) {
573 arg_names_
[i
] = NULL
;
574 arg_values_
[i
].as_uint
= 0u;
575 convertable_values_
[i
] = NULL
;
576 arg_types_
[i
] = TRACE_VALUE_TYPE_UINT
;
579 bool copy
= !!(flags
& TRACE_EVENT_FLAG_COPY
);
580 size_t alloc_size
= 0;
582 alloc_size
+= GetAllocLength(name
);
583 for (i
= 0; i
< num_args
; ++i
) {
584 alloc_size
+= GetAllocLength(arg_names_
[i
]);
585 if (arg_types_
[i
] == TRACE_VALUE_TYPE_STRING
)
586 arg_types_
[i
] = TRACE_VALUE_TYPE_COPY_STRING
;
590 bool arg_is_copy
[kTraceMaxNumArgs
];
591 for (i
= 0; i
< num_args
; ++i
) {
592 // No copying of convertable types, we retain ownership.
593 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
596 // We only take a copy of arg_vals if they are of type COPY_STRING.
597 arg_is_copy
[i
] = (arg_types_
[i
] == TRACE_VALUE_TYPE_COPY_STRING
);
599 alloc_size
+= GetAllocLength(arg_values_
[i
].as_string
);
603 parameter_copy_storage_
= new RefCountedString
;
604 parameter_copy_storage_
->data().resize(alloc_size
);
605 char* ptr
= string_as_array(¶meter_copy_storage_
->data());
606 const char* end
= ptr
+ alloc_size
;
608 CopyTraceEventParameter(&ptr
, &name_
, end
);
609 for (i
= 0; i
< num_args
; ++i
) {
610 CopyTraceEventParameter(&ptr
, &arg_names_
[i
], end
);
613 for (i
= 0; i
< num_args
; ++i
) {
614 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
617 CopyTraceEventParameter(&ptr
, &arg_values_
[i
].as_string
, end
);
619 DCHECK_EQ(end
, ptr
) << "Overrun by " << ptr
- end
;
623 void TraceEvent::Reset() {
624 // Only reset fields that won't be initialized in Initialize(), or that may
625 // hold references to other objects.
626 duration_
= TimeDelta::FromInternalValue(-1);
627 parameter_copy_storage_
= NULL
;
628 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
629 convertable_values_
[i
] = NULL
;
632 void TraceEvent::UpdateDuration(const TimeTicks
& now
,
633 const TimeTicks
& thread_now
) {
634 DCHECK(duration_
.ToInternalValue() == -1);
635 duration_
= now
- timestamp_
;
636 thread_duration_
= thread_now
- thread_timestamp_
;
640 void TraceEvent::AppendValueAsJSON(unsigned char type
,
641 TraceEvent::TraceValue value
,
644 case TRACE_VALUE_TYPE_BOOL
:
645 *out
+= value
.as_bool
? "true" : "false";
647 case TRACE_VALUE_TYPE_UINT
:
648 StringAppendF(out
, "%" PRIu64
, static_cast<uint64
>(value
.as_uint
));
650 case TRACE_VALUE_TYPE_INT
:
651 StringAppendF(out
, "%" PRId64
, static_cast<int64
>(value
.as_int
));
653 case TRACE_VALUE_TYPE_DOUBLE
: {
654 // FIXME: base/json/json_writer.cc is using the same code,
655 // should be made into a common method.
657 double val
= value
.as_double
;
659 real
= DoubleToString(val
);
660 // Ensure that the number has a .0 if there's no decimal or 'e'. This
661 // makes sure that when we read the JSON back, it's interpreted as a
662 // real rather than an int.
663 if (real
.find('.') == std::string::npos
&&
664 real
.find('e') == std::string::npos
&&
665 real
.find('E') == std::string::npos
) {
668 // The JSON spec requires that non-integer values in the range (-1,1)
669 // have a zero before the decimal point - ".52" is not valid, "0.52" is.
670 if (real
[0] == '.') {
672 } else if (real
.length() > 1 && real
[0] == '-' && real
[1] == '.') {
673 // "-.1" bad "-0.1" good
676 } else if (IsNaN(val
)){
677 // The JSON spec doesn't allow NaN and Infinity (since these are
678 // objects in EcmaScript). Use strings instead.
680 } else if (val
< 0) {
681 real
= "\"-Infinity\"";
683 real
= "\"Infinity\"";
685 StringAppendF(out
, "%s", real
.c_str());
688 case TRACE_VALUE_TYPE_POINTER
:
689 // JSON only supports double and int numbers.
690 // So as not to lose bits from a 64-bit pointer, output as a hex string.
691 StringAppendF(out
, "\"0x%" PRIx64
"\"", static_cast<uint64
>(
692 reinterpret_cast<intptr_t>(
695 case TRACE_VALUE_TYPE_STRING
:
696 case TRACE_VALUE_TYPE_COPY_STRING
:
697 EscapeJSONString(value
.as_string
? value
.as_string
: "NULL", true, out
);
700 NOTREACHED() << "Don't know how to print this value";
705 void TraceEvent::AppendAsJSON(std::string
* out
) const {
706 int64 time_int64
= timestamp_
.ToInternalValue();
707 int process_id
= TraceLog::GetInstance()->process_id();
708 // Category group checked at category creation time.
709 DCHECK(!strchr(name_
, '"'));
711 "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
","
712 "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
713 TraceLog::GetCategoryGroupName(category_group_enabled_
),
720 // Output argument names and values, stop at first NULL argument name.
721 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
725 *out
+= arg_names_
[i
];
728 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
729 convertable_values_
[i
]->AppendAsTraceFormat(out
);
731 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], out
);
735 if (phase_
== TRACE_EVENT_PHASE_COMPLETE
) {
736 int64 duration
= duration_
.ToInternalValue();
738 StringAppendF(out
, ",\"dur\":%" PRId64
, duration
);
739 if (!thread_timestamp_
.is_null()) {
740 int64 thread_duration
= thread_duration_
.ToInternalValue();
741 if (thread_duration
!= -1)
742 StringAppendF(out
, ",\"tdur\":%" PRId64
, thread_duration
);
746 // Output tts if thread_timestamp is valid.
747 if (!thread_timestamp_
.is_null()) {
748 int64 thread_time_int64
= thread_timestamp_
.ToInternalValue();
749 StringAppendF(out
, ",\"tts\":%" PRId64
, thread_time_int64
);
752 // If id_ is set, print it out as a hex string so we don't loose any
753 // bits (it might be a 64-bit pointer).
754 if (flags_
& TRACE_EVENT_FLAG_HAS_ID
)
755 StringAppendF(out
, ",\"id\":\"0x%" PRIx64
"\"", static_cast<uint64
>(id_
));
757 // Instant events also output their scope.
758 if (phase_
== TRACE_EVENT_PHASE_INSTANT
) {
760 switch (flags_
& TRACE_EVENT_FLAG_SCOPE_MASK
) {
761 case TRACE_EVENT_SCOPE_GLOBAL
:
762 scope
= TRACE_EVENT_SCOPE_NAME_GLOBAL
;
765 case TRACE_EVENT_SCOPE_PROCESS
:
766 scope
= TRACE_EVENT_SCOPE_NAME_PROCESS
;
769 case TRACE_EVENT_SCOPE_THREAD
:
770 scope
= TRACE_EVENT_SCOPE_NAME_THREAD
;
773 StringAppendF(out
, ",\"s\":\"%c\"", scope
);
779 void TraceEvent::AppendPrettyPrinted(std::ostringstream
* out
) const {
780 *out
<< name_
<< "[";
781 *out
<< TraceLog::GetCategoryGroupName(category_group_enabled_
);
785 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
788 *out
<< arg_names_
[i
] << ":";
789 std::string value_as_text
;
791 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
792 convertable_values_
[i
]->AppendAsTraceFormat(&value_as_text
);
794 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], &value_as_text
);
796 *out
<< value_as_text
;
802 ////////////////////////////////////////////////////////////////////////////////
806 ////////////////////////////////////////////////////////////////////////////////
808 TraceResultBuffer::OutputCallback
809 TraceResultBuffer::SimpleOutput::GetCallback() {
810 return Bind(&SimpleOutput::Append
, Unretained(this));
813 void TraceResultBuffer::SimpleOutput::Append(
814 const std::string
& json_trace_output
) {
815 json_output
+= json_trace_output
;
818 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
821 TraceResultBuffer::~TraceResultBuffer() {
824 void TraceResultBuffer::SetOutputCallback(
825 const OutputCallback
& json_chunk_callback
) {
826 output_callback_
= json_chunk_callback
;
829 void TraceResultBuffer::Start() {
830 append_comma_
= false;
831 output_callback_
.Run("[");
834 void TraceResultBuffer::AddFragment(const std::string
& trace_fragment
) {
836 output_callback_
.Run(",");
837 append_comma_
= true;
838 output_callback_
.Run(trace_fragment
);
841 void TraceResultBuffer::Finish() {
842 output_callback_
.Run("]");
845 ////////////////////////////////////////////////////////////////////////////////
847 // TraceSamplingThread
849 ////////////////////////////////////////////////////////////////////////////////
850 class TraceBucketData
;
851 typedef base::Callback
<void(TraceBucketData
*)> TraceSampleCallback
;
853 class TraceBucketData
{
855 TraceBucketData(base::subtle::AtomicWord
* bucket
,
857 TraceSampleCallback callback
);
860 TRACE_EVENT_API_ATOMIC_WORD
* bucket
;
861 const char* bucket_name
;
862 TraceSampleCallback callback
;
865 // This object must be created on the IO thread.
866 class TraceSamplingThread
: public PlatformThread::Delegate
{
868 TraceSamplingThread();
869 virtual ~TraceSamplingThread();
871 // Implementation of PlatformThread::Delegate:
872 virtual void ThreadMain() OVERRIDE
;
874 static void DefaultSamplingCallback(TraceBucketData
* bucekt_data
);
877 void WaitSamplingEventForTesting();
880 friend class TraceLog
;
883 // Not thread-safe. Once the ThreadMain has been called, this can no longer
885 void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
886 const char* const name
,
887 TraceSampleCallback callback
);
888 // Splits a combined "category\0name" into the two component parts.
889 static void ExtractCategoryAndName(const char* combined
,
890 const char** category
,
892 std::vector
<TraceBucketData
> sample_buckets_
;
893 bool thread_running_
;
894 CancellationFlag cancellation_flag_
;
895 WaitableEvent waitable_event_for_testing_
;
899 TraceSamplingThread::TraceSamplingThread()
900 : thread_running_(false),
901 waitable_event_for_testing_(false, false) {
904 TraceSamplingThread::~TraceSamplingThread() {
907 void TraceSamplingThread::ThreadMain() {
908 PlatformThread::SetName("Sampling Thread");
909 thread_running_
= true;
910 const int kSamplingFrequencyMicroseconds
= 1000;
911 while (!cancellation_flag_
.IsSet()) {
912 PlatformThread::Sleep(
913 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds
));
915 waitable_event_for_testing_
.Signal();
920 void TraceSamplingThread::DefaultSamplingCallback(
921 TraceBucketData
* bucket_data
) {
922 TRACE_EVENT_API_ATOMIC_WORD category_and_name
=
923 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data
->bucket
);
924 if (!category_and_name
)
926 const char* const combined
=
927 reinterpret_cast<const char* const>(category_and_name
);
928 const char* category_group
;
930 ExtractCategoryAndName(combined
, &category_group
, &name
);
931 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE
,
932 TraceLog::GetCategoryGroupEnabled(category_group
),
933 name
, 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
936 void TraceSamplingThread::GetSamples() {
937 for (size_t i
= 0; i
< sample_buckets_
.size(); ++i
) {
938 TraceBucketData
* bucket_data
= &sample_buckets_
[i
];
939 bucket_data
->callback
.Run(bucket_data
);
943 void TraceSamplingThread::RegisterSampleBucket(
944 TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
945 const char* const name
,
946 TraceSampleCallback callback
) {
947 // Access to sample_buckets_ doesn't cause races with the sampling thread
948 // that uses the sample_buckets_, because it is guaranteed that
949 // RegisterSampleBucket is called before the sampling thread is created.
950 DCHECK(!thread_running_
);
951 sample_buckets_
.push_back(TraceBucketData(bucket
, name
, callback
));
955 void TraceSamplingThread::ExtractCategoryAndName(const char* combined
,
956 const char** category
,
958 *category
= combined
;
959 *name
= &combined
[strlen(combined
) + 1];
962 void TraceSamplingThread::Stop() {
963 cancellation_flag_
.Set();
966 void TraceSamplingThread::WaitSamplingEventForTesting() {
967 waitable_event_for_testing_
.Wait();
970 TraceBucketData::TraceBucketData(base::subtle::AtomicWord
* bucket
,
972 TraceSampleCallback callback
)
978 TraceBucketData::~TraceBucketData() {
981 ////////////////////////////////////////////////////////////////////////////////
985 ////////////////////////////////////////////////////////////////////////////////
987 bool TraceOptions::SetFromString(const std::string
& options_string
) {
988 record_mode
= RECORD_UNTIL_FULL
;
989 enable_sampling
= false;
990 enable_systrace
= false;
992 std::vector
<std::string
> split
;
993 std::vector
<std::string
>::iterator iter
;
994 base::SplitString(options_string
, ',', &split
);
995 for (iter
= split
.begin(); iter
!= split
.end(); ++iter
) {
996 if (*iter
== kRecordUntilFull
) {
997 record_mode
= RECORD_UNTIL_FULL
;
998 } else if (*iter
== kRecordContinuously
) {
999 record_mode
= RECORD_CONTINUOUSLY
;
1000 } else if (*iter
== kTraceToConsole
) {
1001 record_mode
= ECHO_TO_CONSOLE
;
1002 } else if (*iter
== kRecordAsMuchAsPossible
) {
1003 record_mode
= RECORD_AS_MUCH_AS_POSSIBLE
;
1004 } else if (*iter
== kEnableSampling
) {
1005 enable_sampling
= true;
1006 } else if (*iter
== kEnableSystrace
) {
1007 enable_systrace
= true;
1015 std::string
TraceOptions::ToString() const {
1017 switch (record_mode
) {
1018 case RECORD_UNTIL_FULL
:
1019 ret
= kRecordUntilFull
;
1021 case RECORD_CONTINUOUSLY
:
1022 ret
= kRecordContinuously
;
1024 case ECHO_TO_CONSOLE
:
1025 ret
= kTraceToConsole
;
1027 case RECORD_AS_MUCH_AS_POSSIBLE
:
1028 ret
= kRecordAsMuchAsPossible
;
1033 if (enable_sampling
)
1034 ret
= ret
+ "," + kEnableSampling
;
1035 if (enable_systrace
)
1036 ret
= ret
+ "," + kEnableSystrace
;
1040 ////////////////////////////////////////////////////////////////////////////////
1044 ////////////////////////////////////////////////////////////////////////////////
1046 class TraceLog::ThreadLocalEventBuffer
1047 : public MessageLoop::DestructionObserver
{
1049 ThreadLocalEventBuffer(TraceLog
* trace_log
);
1050 virtual ~ThreadLocalEventBuffer();
1052 TraceEvent
* AddTraceEvent(TraceEventHandle
* handle
);
1054 void ReportOverhead(const TimeTicks
& event_timestamp
,
1055 const TimeTicks
& event_thread_timestamp
);
1057 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) {
1058 if (!chunk_
|| handle
.chunk_seq
!= chunk_
->seq() ||
1059 handle
.chunk_index
!= chunk_index_
)
1062 return chunk_
->GetEventAt(handle
.event_index
);
1065 int generation() const { return generation_
; }
1068 // MessageLoop::DestructionObserver
1069 virtual void WillDestroyCurrentMessageLoop() OVERRIDE
;
1071 void FlushWhileLocked();
1073 void CheckThisIsCurrentBuffer() const {
1074 DCHECK(trace_log_
->thread_local_event_buffer_
.Get() == this);
1077 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1078 // as long as the thread exists.
1079 TraceLog
* trace_log_
;
1080 scoped_ptr
<TraceBufferChunk
> chunk_
;
1081 size_t chunk_index_
;
1083 TimeDelta overhead_
;
1086 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer
);
1089 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog
* trace_log
)
1090 : trace_log_(trace_log
),
1093 generation_(trace_log
->generation()) {
1094 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
1095 // the following message_loop won't be NULL.
1096 MessageLoop
* message_loop
= MessageLoop::current();
1097 message_loop
->AddDestructionObserver(this);
1099 AutoLock
lock(trace_log
->lock_
);
1100 trace_log
->thread_message_loops_
.insert(message_loop
);
1103 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
1104 CheckThisIsCurrentBuffer();
1105 MessageLoop::current()->RemoveDestructionObserver(this);
1107 // Zero event_count_ happens in either of the following cases:
1108 // - no event generated for the thread;
1109 // - the thread has no message loop;
1110 // - trace_event_overhead is disabled.
1112 InitializeMetadataEvent(AddTraceEvent(NULL
),
1113 static_cast<int>(base::PlatformThread::CurrentId()),
1114 "overhead", "average_overhead",
1115 overhead_
.InMillisecondsF() / event_count_
);
1119 AutoLock
lock(trace_log_
->lock_
);
1121 trace_log_
->thread_message_loops_
.erase(MessageLoop::current());
1123 trace_log_
->thread_local_event_buffer_
.Set(NULL
);
1126 TraceEvent
* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
1127 TraceEventHandle
* handle
) {
1128 CheckThisIsCurrentBuffer();
1130 if (chunk_
&& chunk_
->IsFull()) {
1131 AutoLock
lock(trace_log_
->lock_
);
1136 AutoLock
lock(trace_log_
->lock_
);
1137 chunk_
= trace_log_
->logged_events_
->GetChunk(&chunk_index_
);
1138 trace_log_
->CheckIfBufferIsFullWhileLocked();
1144 TraceEvent
* trace_event
= chunk_
->AddTraceEvent(&event_index
);
1145 if (trace_event
&& handle
)
1146 MakeHandle(chunk_
->seq(), chunk_index_
, event_index
, handle
);
1151 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1152 const TimeTicks
& event_timestamp
,
1153 const TimeTicks
& event_thread_timestamp
) {
1154 if (!g_category_group_enabled
[g_category_trace_event_overhead
])
1157 CheckThisIsCurrentBuffer();
1160 TimeTicks thread_now
= ThreadNow();
1161 TimeTicks now
= trace_log_
->OffsetNow();
1162 TimeDelta overhead
= now
- event_timestamp
;
1163 if (overhead
.InMicroseconds() >= kOverheadReportThresholdInMicroseconds
) {
1164 TraceEvent
* trace_event
= AddTraceEvent(NULL
);
1166 trace_event
->Initialize(
1167 static_cast<int>(PlatformThread::CurrentId()),
1168 event_timestamp
, event_thread_timestamp
,
1169 TRACE_EVENT_PHASE_COMPLETE
,
1170 &g_category_group_enabled
[g_category_trace_event_overhead
],
1171 "overhead", 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
1172 trace_event
->UpdateDuration(now
, thread_now
);
1175 overhead_
+= overhead
;
1178 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1182 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1186 trace_log_
->lock_
.AssertAcquired();
1187 if (trace_log_
->CheckGeneration(generation_
)) {
1188 // Return the chunk to the buffer only if the generation matches.
1189 trace_log_
->logged_events_
->ReturnChunk(chunk_index_
, chunk_
.Pass());
1191 // Otherwise this method may be called from the destructor, or TraceLog will
1192 // find the generation mismatch and delete this buffer soon.
1196 TraceLog
* TraceLog::GetInstance() {
1197 return Singleton
<TraceLog
, LeakySingletonTraits
<TraceLog
> >::get();
1200 TraceLog::TraceLog()
1202 num_traces_recorded_(0),
1204 dispatching_to_observer_list_(false),
1205 process_sort_index_(0),
1206 process_id_hash_(0),
1209 trace_options_(kInternalRecordUntilFull
),
1210 sampling_thread_handle_(0),
1211 category_filter_(CategoryFilter::kDefaultCategoryFilterString
),
1212 event_callback_category_filter_(
1213 CategoryFilter::kDefaultCategoryFilterString
),
1214 thread_shared_chunk_index_(0),
1216 // Trace is enabled or disabled on one thread while other threads are
1217 // accessing the enabled flag. We don't care whether edge-case events are
1218 // traced or not, so we allow races on the enabled flag to keep the trace
1220 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1221 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1222 // sizeof(g_category_group_enabled),
1223 // "trace_event category enabled");
1224 for (int i
= 0; i
< MAX_CATEGORY_GROUPS
; ++i
) {
1225 ANNOTATE_BENIGN_RACE(&g_category_group_enabled
[i
],
1226 "trace_event category enabled");
1228 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
1231 SetProcessID(static_cast<int>(GetCurrentProcId()));
1233 // NaCl also shouldn't access the command line.
1234 if (CommandLine::InitializedForCurrentProcess() &&
1235 CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole
)) {
1236 std::string filter
= CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1237 switches::kTraceToConsole
);
1238 if (filter
.empty()) {
1239 filter
= kEchoToConsoleCategoryFilter
;
1242 filter
.append(kEchoToConsoleCategoryFilter
);
1245 LOG(ERROR
) << "Start " << switches::kTraceToConsole
1246 << " with CategoryFilter '" << filter
<< "'.";
1247 SetEnabled(CategoryFilter(filter
),
1249 TraceOptions(ECHO_TO_CONSOLE
));
1253 logged_events_
.reset(CreateTraceBuffer());
1256 TraceLog::~TraceLog() {
1259 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1260 const char* category_group
) {
1261 TraceLog
* tracelog
= GetInstance();
1263 DCHECK(!g_category_group_enabled
[g_category_already_shutdown
]);
1264 return &g_category_group_enabled
[g_category_already_shutdown
];
1266 return tracelog
->GetCategoryGroupEnabledInternal(category_group
);
1269 const char* TraceLog::GetCategoryGroupName(
1270 const unsigned char* category_group_enabled
) {
1271 // Calculate the index of the category group by finding
1272 // category_group_enabled in g_category_group_enabled array.
1273 uintptr_t category_begin
=
1274 reinterpret_cast<uintptr_t>(g_category_group_enabled
);
1275 uintptr_t category_ptr
= reinterpret_cast<uintptr_t>(category_group_enabled
);
1276 DCHECK(category_ptr
>= category_begin
&&
1277 category_ptr
< reinterpret_cast<uintptr_t>(
1278 g_category_group_enabled
+ MAX_CATEGORY_GROUPS
)) <<
1279 "out of bounds category pointer";
1280 uintptr_t category_index
=
1281 (category_ptr
- category_begin
) / sizeof(g_category_group_enabled
[0]);
1282 return g_category_groups
[category_index
];
1285 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index
) {
1286 unsigned char enabled_flag
= 0;
1287 const char* category_group
= g_category_groups
[category_index
];
1288 if (mode_
== RECORDING_MODE
&&
1289 category_filter_
.IsCategoryGroupEnabled(category_group
))
1290 enabled_flag
|= ENABLED_FOR_RECORDING
;
1291 else if (mode_
== MONITORING_MODE
&&
1292 category_filter_
.IsCategoryGroupEnabled(category_group
))
1293 enabled_flag
|= ENABLED_FOR_MONITORING
;
1294 if (event_callback_
&&
1295 event_callback_category_filter_
.IsCategoryGroupEnabled(category_group
))
1296 enabled_flag
|= ENABLED_FOR_EVENT_CALLBACK
;
1297 g_category_group_enabled
[category_index
] = enabled_flag
;
1300 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1301 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1302 for (size_t i
= 0; i
< category_index
; i
++)
1303 UpdateCategoryGroupEnabledFlag(i
);
1306 void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() {
1307 ResetTraceEventSyntheticDelays();
1308 const CategoryFilter::StringList
& delays
=
1309 category_filter_
.GetSyntheticDelayValues();
1310 CategoryFilter::StringList::const_iterator ci
;
1311 for (ci
= delays
.begin(); ci
!= delays
.end(); ++ci
) {
1312 StringTokenizer
tokens(*ci
, ";");
1313 if (!tokens
.GetNext())
1315 TraceEventSyntheticDelay
* delay
=
1316 TraceEventSyntheticDelay::Lookup(tokens
.token());
1317 while (tokens
.GetNext()) {
1318 std::string token
= tokens
.token();
1320 double target_duration
= strtod(token
.c_str(), &duration_end
);
1321 if (duration_end
!= token
.c_str()) {
1322 delay
->SetTargetDuration(
1323 TimeDelta::FromMicroseconds(target_duration
* 1e6
));
1324 } else if (token
== "static") {
1325 delay
->SetMode(TraceEventSyntheticDelay::STATIC
);
1326 } else if (token
== "oneshot") {
1327 delay
->SetMode(TraceEventSyntheticDelay::ONE_SHOT
);
1328 } else if (token
== "alternating") {
1329 delay
->SetMode(TraceEventSyntheticDelay::ALTERNATING
);
1335 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1336 const char* category_group
) {
1337 DCHECK(!strchr(category_group
, '"')) <<
1338 "Category groups may not contain double quote";
1339 // The g_category_groups is append only, avoid using a lock for the fast path.
1340 size_t current_category_index
= base::subtle::Acquire_Load(&g_category_index
);
1342 // Search for pre-existing category group.
1343 for (size_t i
= 0; i
< current_category_index
; ++i
) {
1344 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1345 return &g_category_group_enabled
[i
];
1349 unsigned char* category_group_enabled
= NULL
;
1350 // This is the slow path: the lock is not held in the case above, so more
1351 // than one thread could have reached here trying to add the same category.
1352 // Only hold to lock when actually appending a new category, and
1353 // check the categories groups again.
1354 AutoLock
lock(lock_
);
1355 size_t category_index
= base::subtle::Acquire_Load(&g_category_index
);
1356 for (size_t i
= 0; i
< category_index
; ++i
) {
1357 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1358 return &g_category_group_enabled
[i
];
1362 // Create a new category group.
1363 DCHECK(category_index
< MAX_CATEGORY_GROUPS
) <<
1364 "must increase MAX_CATEGORY_GROUPS";
1365 if (category_index
< MAX_CATEGORY_GROUPS
) {
1366 // Don't hold on to the category_group pointer, so that we can create
1367 // category groups with strings not known at compile time (this is
1368 // required by SetWatchEvent).
1369 const char* new_group
= strdup(category_group
);
1370 ANNOTATE_LEAKING_OBJECT_PTR(new_group
);
1371 g_category_groups
[category_index
] = new_group
;
1372 DCHECK(!g_category_group_enabled
[category_index
]);
1373 // Note that if both included and excluded patterns in the
1374 // CategoryFilter are empty, we exclude nothing,
1375 // thereby enabling this category group.
1376 UpdateCategoryGroupEnabledFlag(category_index
);
1377 category_group_enabled
= &g_category_group_enabled
[category_index
];
1378 // Update the max index now.
1379 base::subtle::Release_Store(&g_category_index
, category_index
+ 1);
1381 category_group_enabled
=
1382 &g_category_group_enabled
[g_category_categories_exhausted
];
1384 return category_group_enabled
;
1387 void TraceLog::GetKnownCategoryGroups(
1388 std::vector
<std::string
>* category_groups
) {
1389 AutoLock
lock(lock_
);
1390 category_groups
->push_back(
1391 g_category_groups
[g_category_trace_event_overhead
]);
1392 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1393 for (size_t i
= g_num_builtin_categories
; i
< category_index
; i
++)
1394 category_groups
->push_back(g_category_groups
[i
]);
1397 void TraceLog::SetEnabled(const CategoryFilter
& category_filter
,
1399 const TraceOptions
& options
) {
1400 std::vector
<EnabledStateObserver
*> observer_list
;
1402 AutoLock
lock(lock_
);
1404 // Can't enable tracing when Flush() is in progress.
1405 DCHECK(!flush_message_loop_proxy_
.get());
1407 InternalTraceOptions new_options
=
1408 GetInternalOptionsFromTraceOptions(options
);
1410 InternalTraceOptions old_options
= trace_options();
1413 if (new_options
!= old_options
) {
1414 DLOG(ERROR
) << "Attempting to re-enable tracing with a different "
1415 << "set of options.";
1418 if (mode
!= mode_
) {
1419 DLOG(ERROR
) << "Attempting to re-enable tracing with a different mode.";
1422 category_filter_
.Merge(category_filter
);
1423 UpdateCategoryGroupEnabledFlags();
1427 if (dispatching_to_observer_list_
) {
1429 "Cannot manipulate TraceLog::Enabled state from an observer.";
1435 if (new_options
!= old_options
) {
1436 subtle::NoBarrier_Store(&trace_options_
, new_options
);
1437 UseNextTraceBuffer();
1440 num_traces_recorded_
++;
1442 category_filter_
= CategoryFilter(category_filter
);
1443 UpdateCategoryGroupEnabledFlags();
1444 UpdateSyntheticDelaysFromCategoryFilter();
1446 if (new_options
& kInternalEnableSampling
) {
1447 sampling_thread_
.reset(new TraceSamplingThread
);
1448 sampling_thread_
->RegisterSampleBucket(
1451 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1452 sampling_thread_
->RegisterSampleBucket(
1455 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1456 sampling_thread_
->RegisterSampleBucket(
1459 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1460 if (!PlatformThread::Create(
1461 0, sampling_thread_
.get(), &sampling_thread_handle_
)) {
1462 DCHECK(false) << "failed to create thread";
1466 dispatching_to_observer_list_
= true;
1467 observer_list
= enabled_state_observer_list_
;
1469 // Notify observers outside the lock in case they trigger trace events.
1470 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1471 observer_list
[i
]->OnTraceLogEnabled();
1474 AutoLock
lock(lock_
);
1475 dispatching_to_observer_list_
= false;
1479 TraceLog::InternalTraceOptions
TraceLog::GetInternalOptionsFromTraceOptions(
1480 const TraceOptions
& options
) {
1481 InternalTraceOptions ret
=
1482 options
.enable_sampling
? kInternalEnableSampling
: kInternalNone
;
1483 switch (options
.record_mode
) {
1484 case RECORD_UNTIL_FULL
:
1485 return ret
| kInternalRecordUntilFull
;
1486 case RECORD_CONTINUOUSLY
:
1487 return ret
| kInternalRecordContinuously
;
1488 case ECHO_TO_CONSOLE
:
1489 return ret
| kInternalEchoToConsole
;
1490 case RECORD_AS_MUCH_AS_POSSIBLE
:
1491 return ret
| kInternalRecordAsMuchAsPossible
;
1494 return kInternalNone
;
1497 CategoryFilter
TraceLog::GetCurrentCategoryFilter() {
1498 AutoLock
lock(lock_
);
1499 return category_filter_
;
1502 TraceOptions
TraceLog::GetCurrentTraceOptions() const {
1504 InternalTraceOptions option
= trace_options();
1505 ret
.enable_sampling
= (option
& kInternalEnableSampling
) != 0;
1506 if (option
& kInternalRecordUntilFull
)
1507 ret
.record_mode
= RECORD_UNTIL_FULL
;
1508 else if (option
& kInternalRecordContinuously
)
1509 ret
.record_mode
= RECORD_CONTINUOUSLY
;
1510 else if (option
& kInternalEchoToConsole
)
1511 ret
.record_mode
= ECHO_TO_CONSOLE
;
1512 else if (option
& kInternalRecordAsMuchAsPossible
)
1513 ret
.record_mode
= RECORD_AS_MUCH_AS_POSSIBLE
;
1519 void TraceLog::SetDisabled() {
1520 AutoLock
lock(lock_
);
1521 SetDisabledWhileLocked();
1524 void TraceLog::SetDisabledWhileLocked() {
1525 lock_
.AssertAcquired();
1530 if (dispatching_to_observer_list_
) {
1532 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1538 if (sampling_thread_
.get()) {
1539 // Stop the sampling thread.
1540 sampling_thread_
->Stop();
1542 PlatformThread::Join(sampling_thread_handle_
);
1544 sampling_thread_handle_
= PlatformThreadHandle();
1545 sampling_thread_
.reset();
1548 category_filter_
.Clear();
1549 subtle::NoBarrier_Store(&watch_category_
, 0);
1550 watch_event_name_
= "";
1551 UpdateCategoryGroupEnabledFlags();
1552 AddMetadataEventsWhileLocked();
1554 dispatching_to_observer_list_
= true;
1555 std::vector
<EnabledStateObserver
*> observer_list
=
1556 enabled_state_observer_list_
;
1559 // Dispatch to observers outside the lock in case the observer triggers a
1561 AutoUnlock
unlock(lock_
);
1562 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1563 observer_list
[i
]->OnTraceLogDisabled();
1565 dispatching_to_observer_list_
= false;
1568 int TraceLog::GetNumTracesRecorded() {
1569 AutoLock
lock(lock_
);
1572 return num_traces_recorded_
;
1575 void TraceLog::AddEnabledStateObserver(EnabledStateObserver
* listener
) {
1576 enabled_state_observer_list_
.push_back(listener
);
1579 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver
* listener
) {
1580 std::vector
<EnabledStateObserver
*>::iterator it
=
1581 std::find(enabled_state_observer_list_
.begin(),
1582 enabled_state_observer_list_
.end(),
1584 if (it
!= enabled_state_observer_list_
.end())
1585 enabled_state_observer_list_
.erase(it
);
1588 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver
* listener
) const {
1589 std::vector
<EnabledStateObserver
*>::const_iterator it
=
1590 std::find(enabled_state_observer_list_
.begin(),
1591 enabled_state_observer_list_
.end(),
1593 return it
!= enabled_state_observer_list_
.end();
1596 float TraceLog::GetBufferPercentFull() const {
1597 AutoLock
lock(lock_
);
1598 return static_cast<float>(static_cast<double>(logged_events_
->Size()) /
1599 logged_events_
->Capacity());
1602 bool TraceLog::BufferIsFull() const {
1603 AutoLock
lock(lock_
);
1604 return logged_events_
->IsFull();
1607 TraceBuffer
* TraceLog::CreateTraceBuffer() {
1608 InternalTraceOptions options
= trace_options();
1609 if (options
& kInternalRecordContinuously
)
1610 return new TraceBufferRingBuffer(kTraceEventRingBufferChunks
);
1611 else if ((options
& kInternalEnableSampling
) && mode_
== MONITORING_MODE
)
1612 return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks
);
1613 else if (options
& kInternalEchoToConsole
)
1614 return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks
);
1615 else if (options
& kInternalRecordAsMuchAsPossible
)
1616 return CreateTraceBufferVectorOfSize(kTraceEventVectorBigBufferChunks
);
1617 return CreateTraceBufferVectorOfSize(kTraceEventVectorBufferChunks
);
1620 TraceBuffer
* TraceLog::CreateTraceBufferVectorOfSize(size_t max_chunks
) {
1621 return new TraceBufferVector(max_chunks
);
1624 TraceEvent
* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1625 TraceEventHandle
* handle
, bool check_buffer_is_full
) {
1626 lock_
.AssertAcquired();
1628 if (thread_shared_chunk_
&& thread_shared_chunk_
->IsFull()) {
1629 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1630 thread_shared_chunk_
.Pass());
1633 if (!thread_shared_chunk_
) {
1634 thread_shared_chunk_
= logged_events_
->GetChunk(
1635 &thread_shared_chunk_index_
);
1636 if (check_buffer_is_full
)
1637 CheckIfBufferIsFullWhileLocked();
1639 if (!thread_shared_chunk_
)
1643 TraceEvent
* trace_event
= thread_shared_chunk_
->AddTraceEvent(&event_index
);
1644 if (trace_event
&& handle
) {
1645 MakeHandle(thread_shared_chunk_
->seq(), thread_shared_chunk_index_
,
1646 event_index
, handle
);
1651 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1652 lock_
.AssertAcquired();
1653 if (logged_events_
->IsFull()) {
1654 if (buffer_limit_reached_timestamp_
.is_null()) {
1655 buffer_limit_reached_timestamp_
= OffsetNow();
1657 SetDisabledWhileLocked();
1661 void TraceLog::SetEventCallbackEnabled(const CategoryFilter
& category_filter
,
1663 AutoLock
lock(lock_
);
1664 subtle::NoBarrier_Store(&event_callback_
,
1665 reinterpret_cast<subtle::AtomicWord
>(cb
));
1666 event_callback_category_filter_
= category_filter
;
1667 UpdateCategoryGroupEnabledFlags();
1670 void TraceLog::SetEventCallbackDisabled() {
1671 AutoLock
lock(lock_
);
1672 subtle::NoBarrier_Store(&event_callback_
, 0);
1673 UpdateCategoryGroupEnabledFlags();
1676 // Flush() works as the following:
1677 // 1. Flush() is called in threadA whose message loop is saved in
1678 // flush_message_loop_proxy_;
1679 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1680 // loop to flush the thread local buffers; otherwise finish the flush;
1681 // 3. FlushCurrentThread() deletes the thread local event buffer:
1682 // - The last batch of events of the thread are flushed into the main buffer;
1683 // - The message loop will be removed from thread_message_loops_;
1684 // If this is the last message loop, finish the flush;
1685 // 4. If any thread hasn't finish its flush in time, finish the flush.
1686 void TraceLog::Flush(const TraceLog::OutputCallback
& cb
) {
1688 // Can't flush when tracing is enabled because otherwise PostTask would
1689 // - generate more trace events;
1690 // - deschedule the calling thread on some platforms causing inaccurate
1691 // timing of the trace events.
1692 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
1694 cb
.Run(empty_result
, false);
1695 LOG(WARNING
) << "Ignored TraceLog::Flush called when tracing is enabled";
1699 int generation
= this->generation();
1700 // Copy of thread_message_loops_ to be used without locking.
1701 std::vector
<scoped_refptr
<SingleThreadTaskRunner
> >
1702 thread_message_loop_task_runners
;
1704 AutoLock
lock(lock_
);
1705 DCHECK(!flush_message_loop_proxy_
.get());
1706 flush_message_loop_proxy_
= MessageLoopProxy::current();
1707 DCHECK(!thread_message_loops_
.size() || flush_message_loop_proxy_
.get());
1708 flush_output_callback_
= cb
;
1710 if (thread_shared_chunk_
) {
1711 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1712 thread_shared_chunk_
.Pass());
1715 if (thread_message_loops_
.size()) {
1716 for (hash_set
<MessageLoop
*>::const_iterator it
=
1717 thread_message_loops_
.begin();
1718 it
!= thread_message_loops_
.end(); ++it
) {
1719 thread_message_loop_task_runners
.push_back((*it
)->task_runner());
1724 if (thread_message_loop_task_runners
.size()) {
1725 for (size_t i
= 0; i
< thread_message_loop_task_runners
.size(); ++i
) {
1726 thread_message_loop_task_runners
[i
]->PostTask(
1728 Bind(&TraceLog::FlushCurrentThread
, Unretained(this), generation
));
1730 flush_message_loop_proxy_
->PostDelayedTask(
1732 Bind(&TraceLog::OnFlushTimeout
, Unretained(this), generation
),
1733 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs
));
1737 FinishFlush(generation
);
1740 void TraceLog::ConvertTraceEventsToTraceFormat(
1741 scoped_ptr
<TraceBuffer
> logged_events
,
1742 const TraceLog::OutputCallback
& flush_output_callback
) {
1744 if (flush_output_callback
.is_null())
1747 // The callback need to be called at least once even if there is no events
1748 // to let the caller know the completion of flush.
1749 bool has_more_events
= true;
1751 scoped_refptr
<RefCountedString
> json_events_str_ptr
=
1752 new RefCountedString();
1754 for (size_t i
= 0; i
< kTraceEventBatchChunks
; ++i
) {
1755 const TraceBufferChunk
* chunk
= logged_events
->NextChunk();
1757 has_more_events
= false;
1760 for (size_t j
= 0; j
< chunk
->size(); ++j
) {
1762 json_events_str_ptr
->data().append(",");
1763 chunk
->GetEventAt(j
)->AppendAsJSON(&(json_events_str_ptr
->data()));
1767 flush_output_callback
.Run(json_events_str_ptr
, has_more_events
);
1768 } while (has_more_events
);
1771 void TraceLog::FinishFlush(int generation
) {
1772 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1773 OutputCallback flush_output_callback
;
1775 if (!CheckGeneration(generation
))
1779 AutoLock
lock(lock_
);
1781 previous_logged_events
.swap(logged_events_
);
1782 UseNextTraceBuffer();
1783 thread_message_loops_
.clear();
1785 flush_message_loop_proxy_
= NULL
;
1786 flush_output_callback
= flush_output_callback_
;
1787 flush_output_callback_
.Reset();
1790 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1791 flush_output_callback
);
1794 // Run in each thread holding a local event buffer.
1795 void TraceLog::FlushCurrentThread(int generation
) {
1797 AutoLock
lock(lock_
);
1798 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
.get()) {
1799 // This is late. The corresponding flush has finished.
1804 // This will flush the thread local buffer.
1805 delete thread_local_event_buffer_
.Get();
1807 AutoLock
lock(lock_
);
1808 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
.get() ||
1809 thread_message_loops_
.size())
1812 flush_message_loop_proxy_
->PostTask(
1814 Bind(&TraceLog::FinishFlush
, Unretained(this), generation
));
1817 void TraceLog::OnFlushTimeout(int generation
) {
1819 AutoLock
lock(lock_
);
1820 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
.get()) {
1821 // Flush has finished before timeout.
1826 "The following threads haven't finished flush in time. "
1827 "If this happens stably for some thread, please call "
1828 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1829 "the thread to avoid its trace events from being lost.";
1830 for (hash_set
<MessageLoop
*>::const_iterator it
=
1831 thread_message_loops_
.begin();
1832 it
!= thread_message_loops_
.end(); ++it
) {
1833 LOG(WARNING
) << "Thread: " << (*it
)->thread_name();
1836 FinishFlush(generation
);
1839 void TraceLog::FlushButLeaveBufferIntact(
1840 const TraceLog::OutputCallback
& flush_output_callback
) {
1841 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1843 AutoLock
lock(lock_
);
1844 AddMetadataEventsWhileLocked();
1845 if (thread_shared_chunk_
) {
1846 // Return the chunk to the main buffer to flush the sampling data.
1847 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1848 thread_shared_chunk_
.Pass());
1850 previous_logged_events
= logged_events_
->CloneForIteration().Pass();
1853 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1854 flush_output_callback
);
1857 void TraceLog::UseNextTraceBuffer() {
1858 logged_events_
.reset(CreateTraceBuffer());
1859 subtle::NoBarrier_AtomicIncrement(&generation_
, 1);
1860 thread_shared_chunk_
.reset();
1861 thread_shared_chunk_index_
= 0;
1864 TraceEventHandle
TraceLog::AddTraceEvent(
1866 const unsigned char* category_group_enabled
,
1868 unsigned long long id
,
1870 const char** arg_names
,
1871 const unsigned char* arg_types
,
1872 const unsigned long long* arg_values
,
1873 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1874 unsigned char flags
) {
1875 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1876 base::TimeTicks now
= base::TimeTicks::NowFromSystemTraceTime();
1877 return AddTraceEventWithThreadIdAndTimestamp(phase
, category_group_enabled
,
1878 name
, id
, thread_id
, now
,
1879 num_args
, arg_names
,
1880 arg_types
, arg_values
,
1881 convertable_values
, flags
);
1884 TraceEventHandle
TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1886 const unsigned char* category_group_enabled
,
1888 unsigned long long id
,
1890 const TimeTicks
& timestamp
,
1892 const char** arg_names
,
1893 const unsigned char* arg_types
,
1894 const unsigned long long* arg_values
,
1895 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1896 unsigned char flags
) {
1897 TraceEventHandle handle
= { 0, 0, 0 };
1898 if (!*category_group_enabled
)
1901 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1902 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1903 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1904 if (thread_is_in_trace_event_
.Get())
1907 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
1911 if (flags
& TRACE_EVENT_FLAG_MANGLE_ID
)
1912 id
^= process_id_hash_
;
1914 TimeTicks now
= OffsetTimestamp(timestamp
);
1915 TimeTicks thread_now
= ThreadNow();
1917 ThreadLocalEventBuffer
* thread_local_event_buffer
= NULL
;
1918 // A ThreadLocalEventBuffer needs the message loop
1919 // - to know when the thread exits;
1920 // - to handle the final flush.
1921 // For a thread without a message loop or the message loop may be blocked, the
1922 // trace events will be added into the main buffer directly.
1923 if (!thread_blocks_message_loop_
.Get() && MessageLoop::current()) {
1924 thread_local_event_buffer
= thread_local_event_buffer_
.Get();
1925 if (thread_local_event_buffer
&&
1926 !CheckGeneration(thread_local_event_buffer
->generation())) {
1927 delete thread_local_event_buffer
;
1928 thread_local_event_buffer
= NULL
;
1930 if (!thread_local_event_buffer
) {
1931 thread_local_event_buffer
= new ThreadLocalEventBuffer(this);
1932 thread_local_event_buffer_
.Set(thread_local_event_buffer
);
1936 // Check and update the current thread name only if the event is for the
1937 // current thread to avoid locks in most cases.
1938 if (thread_id
== static_cast<int>(PlatformThread::CurrentId())) {
1939 const char* new_name
= ThreadIdNameManager::GetInstance()->
1941 // Check if the thread name has been set or changed since the previous
1942 // call (if any), but don't bother if the new name is empty. Note this will
1943 // not detect a thread name change within the same char* buffer address: we
1944 // favor common case performance over corner case correctness.
1945 if (new_name
!= g_current_thread_name
.Get().Get() &&
1946 new_name
&& *new_name
) {
1947 g_current_thread_name
.Get().Set(new_name
);
1949 AutoLock
thread_info_lock(thread_info_lock_
);
1951 hash_map
<int, std::string
>::iterator existing_name
=
1952 thread_names_
.find(thread_id
);
1953 if (existing_name
== thread_names_
.end()) {
1954 // This is a new thread id, and a new name.
1955 thread_names_
[thread_id
] = new_name
;
1957 // This is a thread id that we've seen before, but potentially with a
1959 std::vector
<StringPiece
> existing_names
;
1960 Tokenize(existing_name
->second
, ",", &existing_names
);
1961 bool found
= std::find(existing_names
.begin(),
1962 existing_names
.end(),
1963 new_name
) != existing_names
.end();
1965 if (existing_names
.size())
1966 existing_name
->second
.push_back(',');
1967 existing_name
->second
.append(new_name
);
1973 std::string console_message
;
1974 if (*category_group_enabled
&
1975 (ENABLED_FOR_RECORDING
| ENABLED_FOR_MONITORING
)) {
1976 OptionalAutoLock
lock(lock_
);
1978 TraceEvent
* trace_event
= NULL
;
1979 if (thread_local_event_buffer
) {
1980 trace_event
= thread_local_event_buffer
->AddTraceEvent(&handle
);
1982 lock
.EnsureAcquired();
1983 trace_event
= AddEventToThreadSharedChunkWhileLocked(&handle
, true);
1987 trace_event
->Initialize(thread_id
, now
, thread_now
, phase
,
1988 category_group_enabled
, name
, id
,
1989 num_args
, arg_names
, arg_types
, arg_values
,
1990 convertable_values
, flags
);
1992 #if defined(OS_ANDROID)
1993 trace_event
->SendToATrace();
1997 if (trace_options() & kInternalEchoToConsole
) {
1998 console_message
= EventToConsoleMessage(
1999 phase
== TRACE_EVENT_PHASE_COMPLETE
? TRACE_EVENT_PHASE_BEGIN
: phase
,
2000 timestamp
, trace_event
);
2004 if (console_message
.size())
2005 LOG(ERROR
) << console_message
;
2007 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
2008 &watch_category_
)) == category_group_enabled
) {
2009 bool event_name_matches
;
2010 WatchEventCallback watch_event_callback_copy
;
2012 AutoLock
lock(lock_
);
2013 event_name_matches
= watch_event_name_
== name
;
2014 watch_event_callback_copy
= watch_event_callback_
;
2016 if (event_name_matches
) {
2017 if (!watch_event_callback_copy
.is_null())
2018 watch_event_callback_copy
.Run();
2022 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
2023 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
2024 subtle::NoBarrier_Load(&event_callback_
));
2025 if (event_callback
) {
2027 phase
== TRACE_EVENT_PHASE_COMPLETE
?
2028 TRACE_EVENT_PHASE_BEGIN
: phase
,
2029 category_group_enabled
, name
, id
,
2030 num_args
, arg_names
, arg_types
, arg_values
,
2035 if (thread_local_event_buffer
)
2036 thread_local_event_buffer
->ReportOverhead(now
, thread_now
);
2041 // May be called when a COMPELETE event ends and the unfinished event has been
2042 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
2043 std::string
TraceLog::EventToConsoleMessage(unsigned char phase
,
2044 const TimeTicks
& timestamp
,
2045 TraceEvent
* trace_event
) {
2046 AutoLock
thread_info_lock(thread_info_lock_
);
2048 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
2049 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
2050 DCHECK(phase
!= TRACE_EVENT_PHASE_COMPLETE
);
2053 int thread_id
= trace_event
?
2054 trace_event
->thread_id() : PlatformThread::CurrentId();
2055 if (phase
== TRACE_EVENT_PHASE_END
) {
2056 duration
= timestamp
- thread_event_start_times_
[thread_id
].top();
2057 thread_event_start_times_
[thread_id
].pop();
2060 std::string thread_name
= thread_names_
[thread_id
];
2061 if (thread_colors_
.find(thread_name
) == thread_colors_
.end())
2062 thread_colors_
[thread_name
] = (thread_colors_
.size() % 6) + 1;
2064 std::ostringstream log
;
2065 log
<< base::StringPrintf("%s: \x1b[0;3%dm",
2066 thread_name
.c_str(),
2067 thread_colors_
[thread_name
]);
2070 if (thread_event_start_times_
.find(thread_id
) !=
2071 thread_event_start_times_
.end())
2072 depth
= thread_event_start_times_
[thread_id
].size();
2074 for (size_t i
= 0; i
< depth
; ++i
)
2078 trace_event
->AppendPrettyPrinted(&log
);
2079 if (phase
== TRACE_EVENT_PHASE_END
)
2080 log
<< base::StringPrintf(" (%.3f ms)", duration
.InMillisecondsF());
2084 if (phase
== TRACE_EVENT_PHASE_BEGIN
)
2085 thread_event_start_times_
[thread_id
].push(timestamp
);
2090 void TraceLog::AddTraceEventEtw(char phase
,
2093 const char* extra
) {
2095 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
2097 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
2098 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
2101 void TraceLog::AddTraceEventEtw(char phase
,
2104 const std::string
& extra
) {
2106 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
2108 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
2109 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
2112 void TraceLog::UpdateTraceEventDuration(
2113 const unsigned char* category_group_enabled
,
2115 TraceEventHandle handle
) {
2116 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
2117 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
2118 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
2119 if (thread_is_in_trace_event_
.Get())
2122 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
2124 TimeTicks thread_now
= ThreadNow();
2125 TimeTicks now
= OffsetNow();
2127 std::string console_message
;
2128 if (*category_group_enabled
& ENABLED_FOR_RECORDING
) {
2129 OptionalAutoLock
lock(lock_
);
2131 TraceEvent
* trace_event
= GetEventByHandleInternal(handle
, &lock
);
2133 DCHECK(trace_event
->phase() == TRACE_EVENT_PHASE_COMPLETE
);
2134 trace_event
->UpdateDuration(now
, thread_now
);
2135 #if defined(OS_ANDROID)
2136 trace_event
->SendToATrace();
2140 if (trace_options() & kInternalEchoToConsole
) {
2141 console_message
= EventToConsoleMessage(TRACE_EVENT_PHASE_END
,
2146 if (console_message
.size())
2147 LOG(ERROR
) << console_message
;
2149 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
2150 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
2151 subtle::NoBarrier_Load(&event_callback_
));
2152 if (event_callback
) {
2153 event_callback(now
, TRACE_EVENT_PHASE_END
, category_group_enabled
, name
,
2154 trace_event_internal::kNoEventId
, 0, NULL
, NULL
, NULL
,
2155 TRACE_EVENT_FLAG_NONE
);
2160 void TraceLog::SetWatchEvent(const std::string
& category_name
,
2161 const std::string
& event_name
,
2162 const WatchEventCallback
& callback
) {
2163 const unsigned char* category
= GetCategoryGroupEnabled(
2164 category_name
.c_str());
2165 AutoLock
lock(lock_
);
2166 subtle::NoBarrier_Store(&watch_category_
,
2167 reinterpret_cast<subtle::AtomicWord
>(category
));
2168 watch_event_name_
= event_name
;
2169 watch_event_callback_
= callback
;
2172 void TraceLog::CancelWatchEvent() {
2173 AutoLock
lock(lock_
);
2174 subtle::NoBarrier_Store(&watch_category_
, 0);
2175 watch_event_name_
= "";
2176 watch_event_callback_
.Reset();
2179 void TraceLog::AddMetadataEventsWhileLocked() {
2180 lock_
.AssertAcquired();
2182 #if !defined(OS_NACL) // NaCl shouldn't expose the process id.
2183 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2185 "num_cpus", "number",
2186 base::SysInfo::NumberOfProcessors());
2190 int current_thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
2191 if (process_sort_index_
!= 0) {
2192 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2194 "process_sort_index", "sort_index",
2195 process_sort_index_
);
2198 if (process_name_
.size()) {
2199 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2201 "process_name", "name",
2205 if (process_labels_
.size() > 0) {
2206 std::vector
<std::string
> labels
;
2207 for(base::hash_map
<int, std::string
>::iterator it
= process_labels_
.begin();
2208 it
!= process_labels_
.end();
2210 labels
.push_back(it
->second
);
2212 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2214 "process_labels", "labels",
2215 JoinString(labels
, ','));
2218 // Thread sort indices.
2219 for(hash_map
<int, int>::iterator it
= thread_sort_indices_
.begin();
2220 it
!= thread_sort_indices_
.end();
2222 if (it
->second
== 0)
2224 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2226 "thread_sort_index", "sort_index",
2231 AutoLock
thread_info_lock(thread_info_lock_
);
2232 for(hash_map
<int, std::string
>::iterator it
= thread_names_
.begin();
2233 it
!= thread_names_
.end();
2235 if (it
->second
.empty())
2237 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2239 "thread_name", "name",
2243 // If buffer is full, add a metadata record to report this.
2244 if (!buffer_limit_reached_timestamp_
.is_null()) {
2245 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2247 "trace_buffer_overflowed",
2249 buffer_limit_reached_timestamp_
);
2253 void TraceLog::WaitSamplingEventForTesting() {
2254 if (!sampling_thread_
)
2256 sampling_thread_
->WaitSamplingEventForTesting();
2259 void TraceLog::DeleteForTesting() {
2260 DeleteTraceLogForTesting::Delete();
2263 TraceEvent
* TraceLog::GetEventByHandle(TraceEventHandle handle
) {
2264 return GetEventByHandleInternal(handle
, NULL
);
2267 TraceEvent
* TraceLog::GetEventByHandleInternal(TraceEventHandle handle
,
2268 OptionalAutoLock
* lock
) {
2269 if (!handle
.chunk_seq
)
2272 if (thread_local_event_buffer_
.Get()) {
2273 TraceEvent
* trace_event
=
2274 thread_local_event_buffer_
.Get()->GetEventByHandle(handle
);
2279 // The event has been out-of-control of the thread local buffer.
2280 // Try to get the event from the main buffer with a lock.
2282 lock
->EnsureAcquired();
2284 if (thread_shared_chunk_
&&
2285 handle
.chunk_index
== thread_shared_chunk_index_
) {
2286 return handle
.chunk_seq
== thread_shared_chunk_
->seq() ?
2287 thread_shared_chunk_
->GetEventAt(handle
.event_index
) : NULL
;
2290 return logged_events_
->GetEventByHandle(handle
);
2293 void TraceLog::SetProcessID(int process_id
) {
2294 process_id_
= process_id
;
2295 // Create a FNV hash from the process ID for XORing.
2296 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2297 unsigned long long offset_basis
= 14695981039346656037ull;
2298 unsigned long long fnv_prime
= 1099511628211ull;
2299 unsigned long long pid
= static_cast<unsigned long long>(process_id_
);
2300 process_id_hash_
= (offset_basis
^ pid
) * fnv_prime
;
2303 void TraceLog::SetProcessSortIndex(int sort_index
) {
2304 AutoLock
lock(lock_
);
2305 process_sort_index_
= sort_index
;
2308 void TraceLog::SetProcessName(const std::string
& process_name
) {
2309 AutoLock
lock(lock_
);
2310 process_name_
= process_name
;
2313 void TraceLog::UpdateProcessLabel(
2314 int label_id
, const std::string
& current_label
) {
2315 if(!current_label
.length())
2316 return RemoveProcessLabel(label_id
);
2318 AutoLock
lock(lock_
);
2319 process_labels_
[label_id
] = current_label
;
2322 void TraceLog::RemoveProcessLabel(int label_id
) {
2323 AutoLock
lock(lock_
);
2324 base::hash_map
<int, std::string
>::iterator it
= process_labels_
.find(
2326 if (it
== process_labels_
.end())
2329 process_labels_
.erase(it
);
2332 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id
, int sort_index
) {
2333 AutoLock
lock(lock_
);
2334 thread_sort_indices_
[static_cast<int>(thread_id
)] = sort_index
;
2337 void TraceLog::SetTimeOffset(TimeDelta offset
) {
2338 time_offset_
= offset
;
2341 size_t TraceLog::GetObserverCountForTest() const {
2342 return enabled_state_observer_list_
.size();
2345 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2346 thread_blocks_message_loop_
.Set(true);
2347 if (thread_local_event_buffer_
.Get()) {
2348 // This will flush the thread local buffer.
2349 delete thread_local_event_buffer_
.Get();
2353 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2354 const std::string
& str
) {
2355 return str
.empty() ||
2357 str
.at(str
.length() - 1) == ' ';
2360 bool CategoryFilter::DoesCategoryGroupContainCategory(
2361 const char* category_group
,
2362 const char* category
) const {
2364 CStringTokenizer
category_group_tokens(category_group
,
2365 category_group
+ strlen(category_group
), ",");
2366 while (category_group_tokens
.GetNext()) {
2367 std::string category_group_token
= category_group_tokens
.token();
2368 // Don't allow empty tokens, nor tokens with leading or trailing space.
2369 DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2370 category_group_token
))
2371 << "Disallowed category string";
2372 if (MatchPattern(category_group_token
.c_str(), category
))
2378 CategoryFilter::CategoryFilter(const std::string
& filter_string
) {
2379 if (!filter_string
.empty())
2380 Initialize(filter_string
);
2382 Initialize(CategoryFilter::kDefaultCategoryFilterString
);
2385 CategoryFilter::CategoryFilter() {
2386 Initialize(CategoryFilter::kDefaultCategoryFilterString
);
2389 CategoryFilter::CategoryFilter(const CategoryFilter
& cf
)
2390 : included_(cf
.included_
),
2391 disabled_(cf
.disabled_
),
2392 excluded_(cf
.excluded_
),
2393 delays_(cf
.delays_
) {
2396 CategoryFilter::~CategoryFilter() {
2399 CategoryFilter
& CategoryFilter::operator=(const CategoryFilter
& rhs
) {
2403 included_
= rhs
.included_
;
2404 disabled_
= rhs
.disabled_
;
2405 excluded_
= rhs
.excluded_
;
2406 delays_
= rhs
.delays_
;
2410 void CategoryFilter::Initialize(const std::string
& filter_string
) {
2411 // Tokenize list of categories, delimited by ','.
2412 StringTokenizer
tokens(filter_string
, ",");
2413 // Add each token to the appropriate list (included_,excluded_).
2414 while (tokens
.GetNext()) {
2415 std::string category
= tokens
.token();
2416 // Ignore empty categories.
2417 if (category
.empty())
2419 // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
2420 if (category
.find(kSyntheticDelayCategoryFilterPrefix
) == 0 &&
2421 category
.at(category
.size() - 1) == ')') {
2422 category
= category
.substr(
2423 strlen(kSyntheticDelayCategoryFilterPrefix
),
2424 category
.size() - strlen(kSyntheticDelayCategoryFilterPrefix
) - 1);
2425 size_t name_length
= category
.find(';');
2426 if (name_length
!= std::string::npos
&& name_length
> 0 &&
2427 name_length
!= category
.size() - 1) {
2428 delays_
.push_back(category
);
2430 } else if (category
.at(0) == '-') {
2431 // Excluded categories start with '-'.
2432 // Remove '-' from category string.
2433 category
= category
.substr(1);
2434 excluded_
.push_back(category
);
2435 } else if (category
.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
2436 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
2437 disabled_
.push_back(category
);
2439 included_
.push_back(category
);
2444 void CategoryFilter::WriteString(const StringList
& values
,
2446 bool included
) const {
2447 bool prepend_comma
= !out
->empty();
2449 for (StringList::const_iterator ci
= values
.begin();
2450 ci
!= values
.end(); ++ci
) {
2451 if (token_cnt
> 0 || prepend_comma
)
2452 StringAppendF(out
, ",");
2453 StringAppendF(out
, "%s%s", (included
? "" : "-"), ci
->c_str());
2458 void CategoryFilter::WriteString(const StringList
& delays
,
2459 std::string
* out
) const {
2460 bool prepend_comma
= !out
->empty();
2462 for (StringList::const_iterator ci
= delays
.begin();
2463 ci
!= delays
.end(); ++ci
) {
2464 if (token_cnt
> 0 || prepend_comma
)
2465 StringAppendF(out
, ",");
2466 StringAppendF(out
, "%s%s)", kSyntheticDelayCategoryFilterPrefix
,
2472 std::string
CategoryFilter::ToString() const {
2473 std::string filter_string
;
2474 WriteString(included_
, &filter_string
, true);
2475 WriteString(disabled_
, &filter_string
, true);
2476 WriteString(excluded_
, &filter_string
, false);
2477 WriteString(delays_
, &filter_string
);
2478 return filter_string
;
2481 bool CategoryFilter::IsCategoryGroupEnabled(
2482 const char* category_group_name
) const {
2483 // TraceLog should call this method only as part of enabling/disabling
2485 StringList::const_iterator ci
;
2487 // Check the disabled- filters and the disabled-* wildcard first so that a
2488 // "*" filter does not include the disabled.
2489 for (ci
= disabled_
.begin(); ci
!= disabled_
.end(); ++ci
) {
2490 if (DoesCategoryGroupContainCategory(category_group_name
, ci
->c_str()))
2493 if (DoesCategoryGroupContainCategory(category_group_name
,
2494 TRACE_DISABLED_BY_DEFAULT("*")))
2497 for (ci
= included_
.begin(); ci
!= included_
.end(); ++ci
) {
2498 if (DoesCategoryGroupContainCategory(category_group_name
, ci
->c_str()))
2502 for (ci
= excluded_
.begin(); ci
!= excluded_
.end(); ++ci
) {
2503 if (DoesCategoryGroupContainCategory(category_group_name
, ci
->c_str()))
2506 // If the category group is not excluded, and there are no included patterns
2507 // we consider this pattern enabled.
2508 return included_
.empty();
2511 bool CategoryFilter::HasIncludedPatterns() const {
2512 return !included_
.empty();
2515 void CategoryFilter::Merge(const CategoryFilter
& nested_filter
) {
2516 // Keep included patterns only if both filters have an included entry.
2517 // Otherwise, one of the filter was specifying "*" and we want to honour the
2519 if (HasIncludedPatterns() && nested_filter
.HasIncludedPatterns()) {
2520 included_
.insert(included_
.end(),
2521 nested_filter
.included_
.begin(),
2522 nested_filter
.included_
.end());
2527 disabled_
.insert(disabled_
.end(),
2528 nested_filter
.disabled_
.begin(),
2529 nested_filter
.disabled_
.end());
2530 excluded_
.insert(excluded_
.end(),
2531 nested_filter
.excluded_
.begin(),
2532 nested_filter
.excluded_
.end());
2533 delays_
.insert(delays_
.end(),
2534 nested_filter
.delays_
.begin(),
2535 nested_filter
.delays_
.end());
2538 void CategoryFilter::Clear() {
2544 const CategoryFilter::StringList
&
2545 CategoryFilter::GetSyntheticDelayValues() const {
2549 } // namespace debug
2552 namespace trace_event_internal
{
2554 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2555 const char* category_group
, const char* name
) {
2556 // The single atom works because for now the category_group can only be "gpu".
2557 DCHECK(strcmp(category_group
, "gpu") == 0);
2558 static TRACE_EVENT_API_ATOMIC_WORD atomic
= 0;
2559 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2560 category_group
, atomic
, category_group_enabled_
);
2562 if (*category_group_enabled_
) {
2564 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2565 TRACE_EVENT_PHASE_COMPLETE
, category_group_enabled_
, name
,
2566 trace_event_internal::kNoEventId
,
2567 static_cast<int>(base::PlatformThread::CurrentId()),
2568 base::TimeTicks::NowFromSystemTraceTime(),
2569 0, NULL
, NULL
, NULL
, NULL
, TRACE_EVENT_FLAG_NONE
);
2573 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2574 if (*category_group_enabled_
) {
2575 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_
,
2576 name_
, event_handle_
);
2580 } // namespace trace_event_internal