1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/trace_event_impl.h"
9 #include "base/base_switches.h"
10 #include "base/bind.h"
11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/float_util.h"
14 #include "base/format_macros.h"
15 #include "base/json/string_escape.h"
16 #include "base/lazy_instance.h"
17 #include "base/memory/singleton.h"
18 #include "base/message_loop/message_loop.h"
19 #include "base/process/process_metrics.h"
20 #include "base/stl_util.h"
21 #include "base/strings/string_number_conversions.h"
22 #include "base/strings/string_split.h"
23 #include "base/strings/string_tokenizer.h"
24 #include "base/strings/string_util.h"
25 #include "base/strings/stringprintf.h"
26 #include "base/strings/utf_string_conversions.h"
27 #include "base/synchronization/cancellation_flag.h"
28 #include "base/synchronization/waitable_event.h"
29 #include "base/sys_info.h"
30 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
31 #include "base/threading/platform_thread.h"
32 #include "base/threading/thread_id_name_manager.h"
33 #include "base/time/time.h"
34 #include "base/trace_event/trace_event.h"
35 #include "base/trace_event/trace_event_synthetic_delay.h"
38 #include "base/trace_event/trace_event_win.h"
41 class DeleteTraceLogForTesting
{
43 static void Delete() {
44 Singleton
<base::trace_event::TraceLog
,
45 LeakySingletonTraits
<base::trace_event::TraceLog
>>::OnExit(0);
49 // The thread buckets for the sampling profiler.
50 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state
[3];
53 namespace trace_event
{
57 // The overhead of TraceEvent above this threshold will be reported in the
59 const int kOverheadReportThresholdInMicroseconds
= 50;
61 // String options that can be used to initialize TraceOptions.
62 const char kRecordUntilFull
[] = "record-until-full";
63 const char kRecordContinuously
[] = "record-continuously";
64 const char kRecordAsMuchAsPossible
[] = "record-as-much-as-possible";
65 const char kTraceToConsole
[] = "trace-to-console";
66 const char kEnableSampling
[] = "enable-sampling";
67 const char kEnableSystrace
[] = "enable-systrace";
69 // Controls the number of trace events we will buffer in-memory
70 // before throwing them away.
71 const size_t kTraceBufferChunkSize
= TraceBufferChunk::kTraceBufferChunkSize
;
72 const size_t kTraceEventVectorBigBufferChunks
=
73 512000000 / kTraceBufferChunkSize
;
74 const size_t kTraceEventVectorBufferChunks
= 256000 / kTraceBufferChunkSize
;
75 const size_t kTraceEventRingBufferChunks
= kTraceEventVectorBufferChunks
/ 4;
76 const size_t kTraceEventBatchChunks
= 1000 / kTraceBufferChunkSize
;
77 // Can store results for 30 seconds with 1 ms sampling interval.
78 const size_t kMonitorTraceEventBufferChunks
= 30000 / kTraceBufferChunkSize
;
79 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
80 const size_t kEchoToConsoleTraceEventBufferChunks
= 256;
82 const int kThreadFlushTimeoutMs
= 3000;
85 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
86 const char kEchoToConsoleCategoryFilter
[] = "-ipc,-task";
89 const char kSyntheticDelayCategoryFilterPrefix
[] = "DELAY(";
91 #define MAX_CATEGORY_GROUPS 100
93 // Parallel arrays g_category_groups and g_category_group_enabled are separate
94 // so that a pointer to a member of g_category_group_enabled can be easily
95 // converted to an index into g_category_groups. This allows macros to deal
96 // only with char enabled pointers from g_category_group_enabled, and we can
97 // convert internally to determine the category name from the char enabled
99 const char* g_category_groups
[MAX_CATEGORY_GROUPS
] = {
101 "tracing already shutdown",
102 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
104 // For reporting trace_event overhead. For thread local event buffers only.
105 "trace_event_overhead"};
107 // The enabled flag is char instead of bool so that the API can be used from C.
108 unsigned char g_category_group_enabled
[MAX_CATEGORY_GROUPS
] = { 0 };
109 // Indexes here have to match the g_category_groups array indexes above.
110 const int g_category_already_shutdown
= 1;
111 const int g_category_categories_exhausted
= 2;
112 const int g_category_metadata
= 3;
113 const int g_category_trace_event_overhead
= 4;
114 const int g_num_builtin_categories
= 5;
115 // Skip default categories.
116 base::subtle::AtomicWord g_category_index
= g_num_builtin_categories
;
118 // The name of the current thread. This is used to decide if the current
119 // thread name has changed. We combine all the seen thread names into the
120 // output name for the thread.
121 LazyInstance
<ThreadLocalPointer
<const char> >::Leaky
122 g_current_thread_name
= LAZY_INSTANCE_INITIALIZER
;
124 TimeTicks
ThreadNow() {
125 return TimeTicks::IsThreadNowSupported() ?
126 TimeTicks::ThreadNow() : TimeTicks();
129 class TraceBufferRingBuffer
: public TraceBuffer
{
131 TraceBufferRingBuffer(size_t max_chunks
)
132 : max_chunks_(max_chunks
),
133 recyclable_chunks_queue_(new size_t[queue_capacity()]),
135 queue_tail_(max_chunks
),
136 current_iteration_index_(0),
137 current_chunk_seq_(1) {
138 chunks_
.reserve(max_chunks
);
139 for (size_t i
= 0; i
< max_chunks
; ++i
)
140 recyclable_chunks_queue_
[i
] = i
;
143 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
144 // Because the number of threads is much less than the number of chunks,
145 // the queue should never be empty.
146 DCHECK(!QueueIsEmpty());
148 *index
= recyclable_chunks_queue_
[queue_head_
];
149 queue_head_
= NextQueueIndex(queue_head_
);
150 current_iteration_index_
= queue_head_
;
152 if (*index
>= chunks_
.size())
153 chunks_
.resize(*index
+ 1);
155 TraceBufferChunk
* chunk
= chunks_
[*index
];
156 chunks_
[*index
] = NULL
; // Put NULL in the slot of a in-flight chunk.
158 chunk
->Reset(current_chunk_seq_
++);
160 chunk
= new TraceBufferChunk(current_chunk_seq_
++);
162 return scoped_ptr
<TraceBufferChunk
>(chunk
);
165 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
> chunk
) override
{
166 // When this method is called, the queue should not be full because it
167 // can contain all chunks including the one to be returned.
168 DCHECK(!QueueIsFull());
170 DCHECK_LT(index
, chunks_
.size());
171 DCHECK(!chunks_
[index
]);
172 chunks_
[index
] = chunk
.release();
173 recyclable_chunks_queue_
[queue_tail_
] = index
;
174 queue_tail_
= NextQueueIndex(queue_tail_
);
177 bool IsFull() const override
{ return false; }
179 size_t Size() const override
{
180 // This is approximate because not all of the chunks are full.
181 return chunks_
.size() * kTraceBufferChunkSize
;
184 size_t Capacity() const override
{
185 return max_chunks_
* kTraceBufferChunkSize
;
188 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
189 if (handle
.chunk_index
>= chunks_
.size())
191 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
192 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
194 return chunk
->GetEventAt(handle
.event_index
);
197 const TraceBufferChunk
* NextChunk() override
{
201 while (current_iteration_index_
!= queue_tail_
) {
202 size_t chunk_index
= recyclable_chunks_queue_
[current_iteration_index_
];
203 current_iteration_index_
= NextQueueIndex(current_iteration_index_
);
204 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
206 DCHECK(chunks_
[chunk_index
]);
207 return chunks_
[chunk_index
];
212 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
213 scoped_ptr
<ClonedTraceBuffer
> cloned_buffer(new ClonedTraceBuffer());
214 for (size_t queue_index
= queue_head_
; queue_index
!= queue_tail_
;
215 queue_index
= NextQueueIndex(queue_index
)) {
216 size_t chunk_index
= recyclable_chunks_queue_
[queue_index
];
217 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
219 TraceBufferChunk
* chunk
= chunks_
[chunk_index
];
220 cloned_buffer
->chunks_
.push_back(chunk
? chunk
->Clone().release() : NULL
);
222 return cloned_buffer
.Pass();
226 class ClonedTraceBuffer
: public TraceBuffer
{
228 ClonedTraceBuffer() : current_iteration_index_(0) {}
230 // The only implemented method.
231 const TraceBufferChunk
* NextChunk() override
{
232 return current_iteration_index_
< chunks_
.size() ?
233 chunks_
[current_iteration_index_
++] : NULL
;
236 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
238 return scoped_ptr
<TraceBufferChunk
>();
240 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
>) override
{
243 bool IsFull() const override
{ return false; }
244 size_t Size() const override
{ return 0; }
245 size_t Capacity() const override
{ return 0; }
246 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
249 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
251 return scoped_ptr
<TraceBuffer
>();
254 size_t current_iteration_index_
;
255 ScopedVector
<TraceBufferChunk
> chunks_
;
258 bool QueueIsEmpty() const {
259 return queue_head_
== queue_tail_
;
262 size_t QueueSize() const {
263 return queue_tail_
> queue_head_
? queue_tail_
- queue_head_
:
264 queue_tail_
+ queue_capacity() - queue_head_
;
267 bool QueueIsFull() const {
268 return QueueSize() == queue_capacity() - 1;
271 size_t queue_capacity() const {
272 // One extra space to help distinguish full state and empty state.
273 return max_chunks_
+ 1;
276 size_t NextQueueIndex(size_t index
) const {
278 if (index
>= queue_capacity())
284 ScopedVector
<TraceBufferChunk
> chunks_
;
286 scoped_ptr
<size_t[]> recyclable_chunks_queue_
;
290 size_t current_iteration_index_
;
291 uint32 current_chunk_seq_
;
293 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer
);
296 class TraceBufferVector
: public TraceBuffer
{
298 TraceBufferVector(size_t max_chunks
)
299 : in_flight_chunk_count_(0),
300 current_iteration_index_(0),
301 max_chunks_(max_chunks
) {
302 chunks_
.reserve(max_chunks_
);
305 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
306 // This function may be called when adding normal events or indirectly from
307 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
308 // have to add the metadata events and flush thread-local buffers even if
309 // the buffer is full.
310 *index
= chunks_
.size();
311 chunks_
.push_back(NULL
); // Put NULL in the slot of a in-flight chunk.
312 ++in_flight_chunk_count_
;
313 // + 1 because zero chunk_seq is not allowed.
314 return scoped_ptr
<TraceBufferChunk
>(
315 new TraceBufferChunk(static_cast<uint32
>(*index
) + 1));
318 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
> chunk
) override
{
319 DCHECK_GT(in_flight_chunk_count_
, 0u);
320 DCHECK_LT(index
, chunks_
.size());
321 DCHECK(!chunks_
[index
]);
322 --in_flight_chunk_count_
;
323 chunks_
[index
] = chunk
.release();
326 bool IsFull() const override
{ return chunks_
.size() >= max_chunks_
; }
328 size_t Size() const override
{
329 // This is approximate because not all of the chunks are full.
330 return chunks_
.size() * kTraceBufferChunkSize
;
333 size_t Capacity() const override
{
334 return max_chunks_
* kTraceBufferChunkSize
;
337 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
338 if (handle
.chunk_index
>= chunks_
.size())
340 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
341 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
343 return chunk
->GetEventAt(handle
.event_index
);
346 const TraceBufferChunk
* NextChunk() override
{
347 while (current_iteration_index_
< chunks_
.size()) {
348 // Skip in-flight chunks.
349 const TraceBufferChunk
* chunk
= chunks_
[current_iteration_index_
++];
356 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
358 return scoped_ptr
<TraceBuffer
>();
362 size_t in_flight_chunk_count_
;
363 size_t current_iteration_index_
;
365 ScopedVector
<TraceBufferChunk
> chunks_
;
367 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector
);
370 template <typename T
>
371 void InitializeMetadataEvent(TraceEvent
* trace_event
,
373 const char* metadata_name
, const char* arg_name
,
379 unsigned char arg_type
;
380 unsigned long long arg_value
;
381 ::trace_event_internal::SetTraceValue(value
, &arg_type
, &arg_value
);
382 trace_event
->Initialize(thread_id
,
383 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA
,
384 &g_category_group_enabled
[g_category_metadata
],
385 metadata_name
, ::trace_event_internal::kNoEventId
,
386 num_args
, &arg_name
, &arg_type
, &arg_value
, NULL
,
387 TRACE_EVENT_FLAG_NONE
);
390 class AutoThreadLocalBoolean
{
392 explicit AutoThreadLocalBoolean(ThreadLocalBoolean
* thread_local_boolean
)
393 : thread_local_boolean_(thread_local_boolean
) {
394 DCHECK(!thread_local_boolean_
->Get());
395 thread_local_boolean_
->Set(true);
397 ~AutoThreadLocalBoolean() {
398 thread_local_boolean_
->Set(false);
402 ThreadLocalBoolean
* thread_local_boolean_
;
403 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean
);
408 void TraceBufferChunk::Reset(uint32 new_seq
) {
409 for (size_t i
= 0; i
< next_free_
; ++i
)
415 TraceEvent
* TraceBufferChunk::AddTraceEvent(size_t* event_index
) {
417 *event_index
= next_free_
++;
418 return &chunk_
[*event_index
];
421 scoped_ptr
<TraceBufferChunk
> TraceBufferChunk::Clone() const {
422 scoped_ptr
<TraceBufferChunk
> cloned_chunk(new TraceBufferChunk(seq_
));
423 cloned_chunk
->next_free_
= next_free_
;
424 for (size_t i
= 0; i
< next_free_
; ++i
)
425 cloned_chunk
->chunk_
[i
].CopyFrom(chunk_
[i
]);
426 return cloned_chunk
.Pass();
429 // A helper class that allows the lock to be acquired in the middle of the scope
430 // and unlocks at the end of scope if locked.
431 class TraceLog::OptionalAutoLock
{
433 explicit OptionalAutoLock(Lock
& lock
)
438 ~OptionalAutoLock() {
443 void EnsureAcquired() {
453 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock
);
456 // Use this function instead of TraceEventHandle constructor to keep the
457 // overhead of ScopedTracer (trace_event.h) constructor minimum.
458 void MakeHandle(uint32 chunk_seq
, size_t chunk_index
, size_t event_index
,
459 TraceEventHandle
* handle
) {
461 DCHECK(chunk_index
< (1u << 16));
462 DCHECK(event_index
< (1u << 16));
463 handle
->chunk_seq
= chunk_seq
;
464 handle
->chunk_index
= static_cast<uint16
>(chunk_index
);
465 handle
->event_index
= static_cast<uint16
>(event_index
);
468 ////////////////////////////////////////////////////////////////////////////////
472 ////////////////////////////////////////////////////////////////////////////////
476 size_t GetAllocLength(const char* str
) { return str
? strlen(str
) + 1 : 0; }
478 // Copies |*member| into |*buffer|, sets |*member| to point to this new
479 // location, and then advances |*buffer| by the amount written.
480 void CopyTraceEventParameter(char** buffer
,
484 size_t written
= strlcpy(*buffer
, *member
, end
- *buffer
) + 1;
485 DCHECK_LE(static_cast<int>(written
), end
- *buffer
);
493 TraceEvent::TraceEvent()
494 : duration_(TimeDelta::FromInternalValue(-1)),
496 category_group_enabled_(NULL
),
499 phase_(TRACE_EVENT_PHASE_BEGIN
),
501 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
502 arg_names_
[i
] = NULL
;
503 memset(arg_values_
, 0, sizeof(arg_values_
));
506 TraceEvent::~TraceEvent() {
509 void TraceEvent::CopyFrom(const TraceEvent
& other
) {
510 timestamp_
= other
.timestamp_
;
511 thread_timestamp_
= other
.thread_timestamp_
;
512 duration_
= other
.duration_
;
514 category_group_enabled_
= other
.category_group_enabled_
;
516 thread_id_
= other
.thread_id_
;
517 phase_
= other
.phase_
;
518 flags_
= other
.flags_
;
519 parameter_copy_storage_
= other
.parameter_copy_storage_
;
521 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
) {
522 arg_names_
[i
] = other
.arg_names_
[i
];
523 arg_types_
[i
] = other
.arg_types_
[i
];
524 arg_values_
[i
] = other
.arg_values_
[i
];
525 convertable_values_
[i
] = other
.convertable_values_
[i
];
529 void TraceEvent::Initialize(
532 TimeTicks thread_timestamp
,
534 const unsigned char* category_group_enabled
,
536 unsigned long long id
,
538 const char** arg_names
,
539 const unsigned char* arg_types
,
540 const unsigned long long* arg_values
,
541 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
542 unsigned char flags
) {
543 timestamp_
= timestamp
;
544 thread_timestamp_
= thread_timestamp
;
545 duration_
= TimeDelta::FromInternalValue(-1);
547 category_group_enabled_
= category_group_enabled
;
549 thread_id_
= thread_id
;
553 // Clamp num_args since it may have been set by a third_party library.
554 num_args
= (num_args
> kTraceMaxNumArgs
) ? kTraceMaxNumArgs
: num_args
;
556 for (; i
< num_args
; ++i
) {
557 arg_names_
[i
] = arg_names
[i
];
558 arg_types_
[i
] = arg_types
[i
];
560 if (arg_types
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
561 convertable_values_
[i
] = convertable_values
[i
];
563 arg_values_
[i
].as_uint
= arg_values
[i
];
565 for (; i
< kTraceMaxNumArgs
; ++i
) {
566 arg_names_
[i
] = NULL
;
567 arg_values_
[i
].as_uint
= 0u;
568 convertable_values_
[i
] = NULL
;
569 arg_types_
[i
] = TRACE_VALUE_TYPE_UINT
;
572 bool copy
= !!(flags
& TRACE_EVENT_FLAG_COPY
);
573 size_t alloc_size
= 0;
575 alloc_size
+= GetAllocLength(name
);
576 for (i
= 0; i
< num_args
; ++i
) {
577 alloc_size
+= GetAllocLength(arg_names_
[i
]);
578 if (arg_types_
[i
] == TRACE_VALUE_TYPE_STRING
)
579 arg_types_
[i
] = TRACE_VALUE_TYPE_COPY_STRING
;
583 bool arg_is_copy
[kTraceMaxNumArgs
];
584 for (i
= 0; i
< num_args
; ++i
) {
585 // No copying of convertable types, we retain ownership.
586 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
589 // We only take a copy of arg_vals if they are of type COPY_STRING.
590 arg_is_copy
[i
] = (arg_types_
[i
] == TRACE_VALUE_TYPE_COPY_STRING
);
592 alloc_size
+= GetAllocLength(arg_values_
[i
].as_string
);
596 parameter_copy_storage_
= new RefCountedString
;
597 parameter_copy_storage_
->data().resize(alloc_size
);
598 char* ptr
= string_as_array(¶meter_copy_storage_
->data());
599 const char* end
= ptr
+ alloc_size
;
601 CopyTraceEventParameter(&ptr
, &name_
, end
);
602 for (i
= 0; i
< num_args
; ++i
) {
603 CopyTraceEventParameter(&ptr
, &arg_names_
[i
], end
);
606 for (i
= 0; i
< num_args
; ++i
) {
607 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
610 CopyTraceEventParameter(&ptr
, &arg_values_
[i
].as_string
, end
);
612 DCHECK_EQ(end
, ptr
) << "Overrun by " << ptr
- end
;
616 void TraceEvent::Reset() {
617 // Only reset fields that won't be initialized in Initialize(), or that may
618 // hold references to other objects.
619 duration_
= TimeDelta::FromInternalValue(-1);
620 parameter_copy_storage_
= NULL
;
621 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
622 convertable_values_
[i
] = NULL
;
625 void TraceEvent::UpdateDuration(const TimeTicks
& now
,
626 const TimeTicks
& thread_now
) {
627 DCHECK(duration_
.ToInternalValue() == -1);
628 duration_
= now
- timestamp_
;
629 thread_duration_
= thread_now
- thread_timestamp_
;
633 void TraceEvent::AppendValueAsJSON(unsigned char type
,
634 TraceEvent::TraceValue value
,
637 case TRACE_VALUE_TYPE_BOOL
:
638 *out
+= value
.as_bool
? "true" : "false";
640 case TRACE_VALUE_TYPE_UINT
:
641 StringAppendF(out
, "%" PRIu64
, static_cast<uint64
>(value
.as_uint
));
643 case TRACE_VALUE_TYPE_INT
:
644 StringAppendF(out
, "%" PRId64
, static_cast<int64
>(value
.as_int
));
646 case TRACE_VALUE_TYPE_DOUBLE
: {
647 // FIXME: base/json/json_writer.cc is using the same code,
648 // should be made into a common method.
650 double val
= value
.as_double
;
652 real
= DoubleToString(val
);
653 // Ensure that the number has a .0 if there's no decimal or 'e'. This
654 // makes sure that when we read the JSON back, it's interpreted as a
655 // real rather than an int.
656 if (real
.find('.') == std::string::npos
&&
657 real
.find('e') == std::string::npos
&&
658 real
.find('E') == std::string::npos
) {
661 // The JSON spec requires that non-integer values in the range (-1,1)
662 // have a zero before the decimal point - ".52" is not valid, "0.52" is.
663 if (real
[0] == '.') {
665 } else if (real
.length() > 1 && real
[0] == '-' && real
[1] == '.') {
666 // "-.1" bad "-0.1" good
669 } else if (IsNaN(val
)){
670 // The JSON spec doesn't allow NaN and Infinity (since these are
671 // objects in EcmaScript). Use strings instead.
673 } else if (val
< 0) {
674 real
= "\"-Infinity\"";
676 real
= "\"Infinity\"";
678 StringAppendF(out
, "%s", real
.c_str());
681 case TRACE_VALUE_TYPE_POINTER
:
682 // JSON only supports double and int numbers.
683 // So as not to lose bits from a 64-bit pointer, output as a hex string.
684 StringAppendF(out
, "\"0x%" PRIx64
"\"", static_cast<uint64
>(
685 reinterpret_cast<intptr_t>(
688 case TRACE_VALUE_TYPE_STRING
:
689 case TRACE_VALUE_TYPE_COPY_STRING
:
690 EscapeJSONString(value
.as_string
? value
.as_string
: "NULL", true, out
);
693 NOTREACHED() << "Don't know how to print this value";
698 void TraceEvent::AppendAsJSON(std::string
* out
) const {
699 int64 time_int64
= timestamp_
.ToInternalValue();
700 int process_id
= TraceLog::GetInstance()->process_id();
701 // Category group checked at category creation time.
702 DCHECK(!strchr(name_
, '"'));
704 "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
","
705 "\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":{",
710 TraceLog::GetCategoryGroupName(category_group_enabled_
),
713 // Output argument names and values, stop at first NULL argument name.
714 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
718 *out
+= arg_names_
[i
];
721 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
722 convertable_values_
[i
]->AppendAsTraceFormat(out
);
724 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], out
);
728 if (phase_
== TRACE_EVENT_PHASE_COMPLETE
) {
729 int64 duration
= duration_
.ToInternalValue();
731 StringAppendF(out
, ",\"dur\":%" PRId64
, duration
);
732 if (!thread_timestamp_
.is_null()) {
733 int64 thread_duration
= thread_duration_
.ToInternalValue();
734 if (thread_duration
!= -1)
735 StringAppendF(out
, ",\"tdur\":%" PRId64
, thread_duration
);
739 // Output tts if thread_timestamp is valid.
740 if (!thread_timestamp_
.is_null()) {
741 int64 thread_time_int64
= thread_timestamp_
.ToInternalValue();
742 StringAppendF(out
, ",\"tts\":%" PRId64
, thread_time_int64
);
745 // If id_ is set, print it out as a hex string so we don't loose any
746 // bits (it might be a 64-bit pointer).
747 if (flags_
& TRACE_EVENT_FLAG_HAS_ID
)
748 StringAppendF(out
, ",\"id\":\"0x%" PRIx64
"\"", static_cast<uint64
>(id_
));
750 // Instant events also output their scope.
751 if (phase_
== TRACE_EVENT_PHASE_INSTANT
) {
753 switch (flags_
& TRACE_EVENT_FLAG_SCOPE_MASK
) {
754 case TRACE_EVENT_SCOPE_GLOBAL
:
755 scope
= TRACE_EVENT_SCOPE_NAME_GLOBAL
;
758 case TRACE_EVENT_SCOPE_PROCESS
:
759 scope
= TRACE_EVENT_SCOPE_NAME_PROCESS
;
762 case TRACE_EVENT_SCOPE_THREAD
:
763 scope
= TRACE_EVENT_SCOPE_NAME_THREAD
;
766 StringAppendF(out
, ",\"s\":\"%c\"", scope
);
772 void TraceEvent::AppendPrettyPrinted(std::ostringstream
* out
) const {
773 *out
<< name_
<< "[";
774 *out
<< TraceLog::GetCategoryGroupName(category_group_enabled_
);
778 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
781 *out
<< arg_names_
[i
] << ":";
782 std::string value_as_text
;
784 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
785 convertable_values_
[i
]->AppendAsTraceFormat(&value_as_text
);
787 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], &value_as_text
);
789 *out
<< value_as_text
;
795 ////////////////////////////////////////////////////////////////////////////////
799 ////////////////////////////////////////////////////////////////////////////////
801 TraceResultBuffer::OutputCallback
802 TraceResultBuffer::SimpleOutput::GetCallback() {
803 return Bind(&SimpleOutput::Append
, Unretained(this));
806 void TraceResultBuffer::SimpleOutput::Append(
807 const std::string
& json_trace_output
) {
808 json_output
+= json_trace_output
;
811 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
814 TraceResultBuffer::~TraceResultBuffer() {
817 void TraceResultBuffer::SetOutputCallback(
818 const OutputCallback
& json_chunk_callback
) {
819 output_callback_
= json_chunk_callback
;
822 void TraceResultBuffer::Start() {
823 append_comma_
= false;
824 output_callback_
.Run("[");
827 void TraceResultBuffer::AddFragment(const std::string
& trace_fragment
) {
829 output_callback_
.Run(",");
830 append_comma_
= true;
831 output_callback_
.Run(trace_fragment
);
834 void TraceResultBuffer::Finish() {
835 output_callback_
.Run("]");
838 ////////////////////////////////////////////////////////////////////////////////
840 // TraceSamplingThread
842 ////////////////////////////////////////////////////////////////////////////////
843 class TraceBucketData
;
844 typedef base::Callback
<void(TraceBucketData
*)> TraceSampleCallback
;
846 class TraceBucketData
{
848 TraceBucketData(base::subtle::AtomicWord
* bucket
,
850 TraceSampleCallback callback
);
853 TRACE_EVENT_API_ATOMIC_WORD
* bucket
;
854 const char* bucket_name
;
855 TraceSampleCallback callback
;
858 // This object must be created on the IO thread.
859 class TraceSamplingThread
: public PlatformThread::Delegate
{
861 TraceSamplingThread();
862 ~TraceSamplingThread() override
;
864 // Implementation of PlatformThread::Delegate:
865 void ThreadMain() override
;
867 static void DefaultSamplingCallback(TraceBucketData
* bucekt_data
);
870 void WaitSamplingEventForTesting();
873 friend class TraceLog
;
876 // Not thread-safe. Once the ThreadMain has been called, this can no longer
878 void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
879 const char* const name
,
880 TraceSampleCallback callback
);
881 // Splits a combined "category\0name" into the two component parts.
882 static void ExtractCategoryAndName(const char* combined
,
883 const char** category
,
885 std::vector
<TraceBucketData
> sample_buckets_
;
886 bool thread_running_
;
887 CancellationFlag cancellation_flag_
;
888 WaitableEvent waitable_event_for_testing_
;
892 TraceSamplingThread::TraceSamplingThread()
893 : thread_running_(false),
894 waitable_event_for_testing_(false, false) {
897 TraceSamplingThread::~TraceSamplingThread() {
900 void TraceSamplingThread::ThreadMain() {
901 PlatformThread::SetName("Sampling Thread");
902 thread_running_
= true;
903 const int kSamplingFrequencyMicroseconds
= 1000;
904 while (!cancellation_flag_
.IsSet()) {
905 PlatformThread::Sleep(
906 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds
));
908 waitable_event_for_testing_
.Signal();
913 void TraceSamplingThread::DefaultSamplingCallback(
914 TraceBucketData
* bucket_data
) {
915 TRACE_EVENT_API_ATOMIC_WORD category_and_name
=
916 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data
->bucket
);
917 if (!category_and_name
)
919 const char* const combined
=
920 reinterpret_cast<const char* const>(category_and_name
);
921 const char* category_group
;
923 ExtractCategoryAndName(combined
, &category_group
, &name
);
924 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE
,
925 TraceLog::GetCategoryGroupEnabled(category_group
),
926 name
, 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
929 void TraceSamplingThread::GetSamples() {
930 for (size_t i
= 0; i
< sample_buckets_
.size(); ++i
) {
931 TraceBucketData
* bucket_data
= &sample_buckets_
[i
];
932 bucket_data
->callback
.Run(bucket_data
);
936 void TraceSamplingThread::RegisterSampleBucket(
937 TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
938 const char* const name
,
939 TraceSampleCallback callback
) {
940 // Access to sample_buckets_ doesn't cause races with the sampling thread
941 // that uses the sample_buckets_, because it is guaranteed that
942 // RegisterSampleBucket is called before the sampling thread is created.
943 DCHECK(!thread_running_
);
944 sample_buckets_
.push_back(TraceBucketData(bucket
, name
, callback
));
948 void TraceSamplingThread::ExtractCategoryAndName(const char* combined
,
949 const char** category
,
951 *category
= combined
;
952 *name
= &combined
[strlen(combined
) + 1];
955 void TraceSamplingThread::Stop() {
956 cancellation_flag_
.Set();
959 void TraceSamplingThread::WaitSamplingEventForTesting() {
960 waitable_event_for_testing_
.Wait();
963 TraceBucketData::TraceBucketData(base::subtle::AtomicWord
* bucket
,
965 TraceSampleCallback callback
)
971 TraceBucketData::~TraceBucketData() {
974 ////////////////////////////////////////////////////////////////////////////////
978 ////////////////////////////////////////////////////////////////////////////////
980 bool TraceOptions::SetFromString(const std::string
& options_string
) {
981 record_mode
= RECORD_UNTIL_FULL
;
982 enable_sampling
= false;
983 enable_systrace
= false;
985 std::vector
<std::string
> split
;
986 std::vector
<std::string
>::iterator iter
;
987 base::SplitString(options_string
, ',', &split
);
988 for (iter
= split
.begin(); iter
!= split
.end(); ++iter
) {
989 if (*iter
== kRecordUntilFull
) {
990 record_mode
= RECORD_UNTIL_FULL
;
991 } else if (*iter
== kRecordContinuously
) {
992 record_mode
= RECORD_CONTINUOUSLY
;
993 } else if (*iter
== kTraceToConsole
) {
994 record_mode
= ECHO_TO_CONSOLE
;
995 } else if (*iter
== kRecordAsMuchAsPossible
) {
996 record_mode
= RECORD_AS_MUCH_AS_POSSIBLE
;
997 } else if (*iter
== kEnableSampling
) {
998 enable_sampling
= true;
999 } else if (*iter
== kEnableSystrace
) {
1000 enable_systrace
= true;
1008 std::string
TraceOptions::ToString() const {
1010 switch (record_mode
) {
1011 case RECORD_UNTIL_FULL
:
1012 ret
= kRecordUntilFull
;
1014 case RECORD_CONTINUOUSLY
:
1015 ret
= kRecordContinuously
;
1017 case ECHO_TO_CONSOLE
:
1018 ret
= kTraceToConsole
;
1020 case RECORD_AS_MUCH_AS_POSSIBLE
:
1021 ret
= kRecordAsMuchAsPossible
;
1026 if (enable_sampling
)
1027 ret
= ret
+ "," + kEnableSampling
;
1028 if (enable_systrace
)
1029 ret
= ret
+ "," + kEnableSystrace
;
1033 ////////////////////////////////////////////////////////////////////////////////
1037 ////////////////////////////////////////////////////////////////////////////////
1039 class TraceLog::ThreadLocalEventBuffer
1040 : public MessageLoop::DestructionObserver
{
1042 ThreadLocalEventBuffer(TraceLog
* trace_log
);
1043 ~ThreadLocalEventBuffer() override
;
1045 TraceEvent
* AddTraceEvent(TraceEventHandle
* handle
);
1047 void ReportOverhead(const TimeTicks
& event_timestamp
,
1048 const TimeTicks
& event_thread_timestamp
);
1050 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) {
1051 if (!chunk_
|| handle
.chunk_seq
!= chunk_
->seq() ||
1052 handle
.chunk_index
!= chunk_index_
)
1055 return chunk_
->GetEventAt(handle
.event_index
);
1058 int generation() const { return generation_
; }
1061 // MessageLoop::DestructionObserver
1062 void WillDestroyCurrentMessageLoop() override
;
1064 void FlushWhileLocked();
1066 void CheckThisIsCurrentBuffer() const {
1067 DCHECK(trace_log_
->thread_local_event_buffer_
.Get() == this);
1070 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1071 // as long as the thread exists.
1072 TraceLog
* trace_log_
;
1073 scoped_ptr
<TraceBufferChunk
> chunk_
;
1074 size_t chunk_index_
;
1076 TimeDelta overhead_
;
1079 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer
);
1082 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog
* trace_log
)
1083 : trace_log_(trace_log
),
1086 generation_(trace_log
->generation()) {
1087 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
1088 // the following message_loop won't be NULL.
1089 MessageLoop
* message_loop
= MessageLoop::current();
1090 message_loop
->AddDestructionObserver(this);
1092 AutoLock
lock(trace_log
->lock_
);
1093 trace_log
->thread_message_loops_
.insert(message_loop
);
1096 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
1097 CheckThisIsCurrentBuffer();
1098 MessageLoop::current()->RemoveDestructionObserver(this);
1100 // Zero event_count_ happens in either of the following cases:
1101 // - no event generated for the thread;
1102 // - the thread has no message loop;
1103 // - trace_event_overhead is disabled.
1105 InitializeMetadataEvent(AddTraceEvent(NULL
),
1106 static_cast<int>(base::PlatformThread::CurrentId()),
1107 "overhead", "average_overhead",
1108 overhead_
.InMillisecondsF() / event_count_
);
1112 AutoLock
lock(trace_log_
->lock_
);
1114 trace_log_
->thread_message_loops_
.erase(MessageLoop::current());
1116 trace_log_
->thread_local_event_buffer_
.Set(NULL
);
1119 TraceEvent
* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
1120 TraceEventHandle
* handle
) {
1121 CheckThisIsCurrentBuffer();
1123 if (chunk_
&& chunk_
->IsFull()) {
1124 AutoLock
lock(trace_log_
->lock_
);
1129 AutoLock
lock(trace_log_
->lock_
);
1130 chunk_
= trace_log_
->logged_events_
->GetChunk(&chunk_index_
);
1131 trace_log_
->CheckIfBufferIsFullWhileLocked();
1137 TraceEvent
* trace_event
= chunk_
->AddTraceEvent(&event_index
);
1138 if (trace_event
&& handle
)
1139 MakeHandle(chunk_
->seq(), chunk_index_
, event_index
, handle
);
1144 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1145 const TimeTicks
& event_timestamp
,
1146 const TimeTicks
& event_thread_timestamp
) {
1147 if (!g_category_group_enabled
[g_category_trace_event_overhead
])
1150 CheckThisIsCurrentBuffer();
1153 TimeTicks thread_now
= ThreadNow();
1154 TimeTicks now
= trace_log_
->OffsetNow();
1155 TimeDelta overhead
= now
- event_timestamp
;
1156 if (overhead
.InMicroseconds() >= kOverheadReportThresholdInMicroseconds
) {
1157 TraceEvent
* trace_event
= AddTraceEvent(NULL
);
1159 trace_event
->Initialize(
1160 static_cast<int>(PlatformThread::CurrentId()),
1161 event_timestamp
, event_thread_timestamp
,
1162 TRACE_EVENT_PHASE_COMPLETE
,
1163 &g_category_group_enabled
[g_category_trace_event_overhead
],
1164 "overhead", 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
1165 trace_event
->UpdateDuration(now
, thread_now
);
1168 overhead_
+= overhead
;
1171 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1175 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1179 trace_log_
->lock_
.AssertAcquired();
1180 if (trace_log_
->CheckGeneration(generation_
)) {
1181 // Return the chunk to the buffer only if the generation matches.
1182 trace_log_
->logged_events_
->ReturnChunk(chunk_index_
, chunk_
.Pass());
1184 // Otherwise this method may be called from the destructor, or TraceLog will
1185 // find the generation mismatch and delete this buffer soon.
1188 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {
1191 TraceLogStatus::~TraceLogStatus() {
1195 TraceLog
* TraceLog::GetInstance() {
1196 return Singleton
<TraceLog
, LeakySingletonTraits
<TraceLog
> >::get();
1199 TraceLog::TraceLog()
1201 num_traces_recorded_(0),
1203 dispatching_to_observer_list_(false),
1204 process_sort_index_(0),
1205 process_id_hash_(0),
1208 trace_options_(kInternalRecordUntilFull
),
1209 sampling_thread_handle_(0),
1210 category_filter_(CategoryFilter::kDefaultCategoryFilterString
),
1211 event_callback_category_filter_(
1212 CategoryFilter::kDefaultCategoryFilterString
),
1213 thread_shared_chunk_index_(0),
1215 // Trace is enabled or disabled on one thread while other threads are
1216 // accessing the enabled flag. We don't care whether edge-case events are
1217 // traced or not, so we allow races on the enabled flag to keep the trace
1219 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1220 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1221 // sizeof(g_category_group_enabled),
1222 // "trace_event category enabled");
1223 for (int i
= 0; i
< MAX_CATEGORY_GROUPS
; ++i
) {
1224 ANNOTATE_BENIGN_RACE(&g_category_group_enabled
[i
],
1225 "trace_event category enabled");
1227 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
1230 SetProcessID(static_cast<int>(GetCurrentProcId()));
1232 // NaCl also shouldn't access the command line.
1233 if (CommandLine::InitializedForCurrentProcess() &&
1234 CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole
)) {
1235 std::string filter
= CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1236 switches::kTraceToConsole
);
1237 if (filter
.empty()) {
1238 filter
= kEchoToConsoleCategoryFilter
;
1241 filter
.append(kEchoToConsoleCategoryFilter
);
1244 LOG(ERROR
) << "Start " << switches::kTraceToConsole
1245 << " with CategoryFilter '" << filter
<< "'.";
1246 SetEnabled(CategoryFilter(filter
),
1248 TraceOptions(ECHO_TO_CONSOLE
));
1252 logged_events_
.reset(CreateTraceBuffer());
1255 TraceLog::~TraceLog() {
1258 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1259 const char* category_group
) {
1260 TraceLog
* tracelog
= GetInstance();
1262 DCHECK(!g_category_group_enabled
[g_category_already_shutdown
]);
1263 return &g_category_group_enabled
[g_category_already_shutdown
];
1265 return tracelog
->GetCategoryGroupEnabledInternal(category_group
);
1268 const char* TraceLog::GetCategoryGroupName(
1269 const unsigned char* category_group_enabled
) {
1270 // Calculate the index of the category group by finding
1271 // category_group_enabled in g_category_group_enabled array.
1272 uintptr_t category_begin
=
1273 reinterpret_cast<uintptr_t>(g_category_group_enabled
);
1274 uintptr_t category_ptr
= reinterpret_cast<uintptr_t>(category_group_enabled
);
1275 DCHECK(category_ptr
>= category_begin
&&
1276 category_ptr
< reinterpret_cast<uintptr_t>(
1277 g_category_group_enabled
+ MAX_CATEGORY_GROUPS
)) <<
1278 "out of bounds category pointer";
1279 uintptr_t category_index
=
1280 (category_ptr
- category_begin
) / sizeof(g_category_group_enabled
[0]);
1281 return g_category_groups
[category_index
];
1284 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index
) {
1285 unsigned char enabled_flag
= 0;
1286 const char* category_group
= g_category_groups
[category_index
];
1287 if (mode_
== RECORDING_MODE
&&
1288 category_filter_
.IsCategoryGroupEnabled(category_group
))
1289 enabled_flag
|= ENABLED_FOR_RECORDING
;
1290 else if (mode_
== MONITORING_MODE
&&
1291 category_filter_
.IsCategoryGroupEnabled(category_group
))
1292 enabled_flag
|= ENABLED_FOR_MONITORING
;
1293 if (event_callback_
&&
1294 event_callback_category_filter_
.IsCategoryGroupEnabled(category_group
))
1295 enabled_flag
|= ENABLED_FOR_EVENT_CALLBACK
;
1296 g_category_group_enabled
[category_index
] = enabled_flag
;
1299 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1300 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1301 for (size_t i
= 0; i
< category_index
; i
++)
1302 UpdateCategoryGroupEnabledFlag(i
);
1305 void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() {
1306 ResetTraceEventSyntheticDelays();
1307 const CategoryFilter::StringList
& delays
=
1308 category_filter_
.GetSyntheticDelayValues();
1309 CategoryFilter::StringList::const_iterator ci
;
1310 for (ci
= delays
.begin(); ci
!= delays
.end(); ++ci
) {
1311 StringTokenizer
tokens(*ci
, ";");
1312 if (!tokens
.GetNext())
1314 TraceEventSyntheticDelay
* delay
=
1315 TraceEventSyntheticDelay::Lookup(tokens
.token());
1316 while (tokens
.GetNext()) {
1317 std::string token
= tokens
.token();
1319 double target_duration
= strtod(token
.c_str(), &duration_end
);
1320 if (duration_end
!= token
.c_str()) {
1321 delay
->SetTargetDuration(TimeDelta::FromMicroseconds(
1322 static_cast<int64
>(target_duration
* 1e6
)));
1323 } else if (token
== "static") {
1324 delay
->SetMode(TraceEventSyntheticDelay::STATIC
);
1325 } else if (token
== "oneshot") {
1326 delay
->SetMode(TraceEventSyntheticDelay::ONE_SHOT
);
1327 } else if (token
== "alternating") {
1328 delay
->SetMode(TraceEventSyntheticDelay::ALTERNATING
);
1334 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1335 const char* category_group
) {
1336 DCHECK(!strchr(category_group
, '"')) <<
1337 "Category groups may not contain double quote";
1338 // The g_category_groups is append only, avoid using a lock for the fast path.
1339 size_t current_category_index
= base::subtle::Acquire_Load(&g_category_index
);
1341 // Search for pre-existing category group.
1342 for (size_t i
= 0; i
< current_category_index
; ++i
) {
1343 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1344 return &g_category_group_enabled
[i
];
1348 unsigned char* category_group_enabled
= NULL
;
1349 // This is the slow path: the lock is not held in the case above, so more
1350 // than one thread could have reached here trying to add the same category.
1351 // Only hold to lock when actually appending a new category, and
1352 // check the categories groups again.
1353 AutoLock
lock(lock_
);
1354 size_t category_index
= base::subtle::Acquire_Load(&g_category_index
);
1355 for (size_t i
= 0; i
< category_index
; ++i
) {
1356 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1357 return &g_category_group_enabled
[i
];
1361 // Create a new category group.
1362 DCHECK(category_index
< MAX_CATEGORY_GROUPS
) <<
1363 "must increase MAX_CATEGORY_GROUPS";
1364 if (category_index
< MAX_CATEGORY_GROUPS
) {
1365 // Don't hold on to the category_group pointer, so that we can create
1366 // category groups with strings not known at compile time (this is
1367 // required by SetWatchEvent).
1368 const char* new_group
= strdup(category_group
);
1369 ANNOTATE_LEAKING_OBJECT_PTR(new_group
);
1370 g_category_groups
[category_index
] = new_group
;
1371 DCHECK(!g_category_group_enabled
[category_index
]);
1372 // Note that if both included and excluded patterns in the
1373 // CategoryFilter are empty, we exclude nothing,
1374 // thereby enabling this category group.
1375 UpdateCategoryGroupEnabledFlag(category_index
);
1376 category_group_enabled
= &g_category_group_enabled
[category_index
];
1377 // Update the max index now.
1378 base::subtle::Release_Store(&g_category_index
, category_index
+ 1);
1380 category_group_enabled
=
1381 &g_category_group_enabled
[g_category_categories_exhausted
];
1383 return category_group_enabled
;
1386 void TraceLog::GetKnownCategoryGroups(
1387 std::vector
<std::string
>* category_groups
) {
1388 AutoLock
lock(lock_
);
1389 category_groups
->push_back(
1390 g_category_groups
[g_category_trace_event_overhead
]);
1391 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1392 for (size_t i
= g_num_builtin_categories
; i
< category_index
; i
++)
1393 category_groups
->push_back(g_category_groups
[i
]);
1396 void TraceLog::SetEnabled(const CategoryFilter
& category_filter
,
1398 const TraceOptions
& options
) {
1399 std::vector
<EnabledStateObserver
*> observer_list
;
1401 AutoLock
lock(lock_
);
1403 // Can't enable tracing when Flush() is in progress.
1404 DCHECK(!flush_message_loop_proxy_
.get());
1406 InternalTraceOptions new_options
=
1407 GetInternalOptionsFromTraceOptions(options
);
1409 InternalTraceOptions old_options
= trace_options();
1412 if (new_options
!= old_options
) {
1413 DLOG(ERROR
) << "Attempting to re-enable tracing with a different "
1414 << "set of options.";
1417 if (mode
!= mode_
) {
1418 DLOG(ERROR
) << "Attempting to re-enable tracing with a different mode.";
1421 category_filter_
.Merge(category_filter
);
1422 UpdateCategoryGroupEnabledFlags();
1426 if (dispatching_to_observer_list_
) {
1428 "Cannot manipulate TraceLog::Enabled state from an observer.";
1434 if (new_options
!= old_options
) {
1435 subtle::NoBarrier_Store(&trace_options_
, new_options
);
1436 UseNextTraceBuffer();
1439 num_traces_recorded_
++;
1441 category_filter_
= CategoryFilter(category_filter
);
1442 UpdateCategoryGroupEnabledFlags();
1443 UpdateSyntheticDelaysFromCategoryFilter();
1445 if (new_options
& kInternalEnableSampling
) {
1446 sampling_thread_
.reset(new TraceSamplingThread
);
1447 sampling_thread_
->RegisterSampleBucket(
1450 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1451 sampling_thread_
->RegisterSampleBucket(
1454 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1455 sampling_thread_
->RegisterSampleBucket(
1458 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1459 if (!PlatformThread::Create(
1460 0, sampling_thread_
.get(), &sampling_thread_handle_
)) {
1461 DCHECK(false) << "failed to create thread";
1465 dispatching_to_observer_list_
= true;
1466 observer_list
= enabled_state_observer_list_
;
1468 // Notify observers outside the lock in case they trigger trace events.
1469 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1470 observer_list
[i
]->OnTraceLogEnabled();
1473 AutoLock
lock(lock_
);
1474 dispatching_to_observer_list_
= false;
1478 TraceLog::InternalTraceOptions
TraceLog::GetInternalOptionsFromTraceOptions(
1479 const TraceOptions
& options
) {
1480 InternalTraceOptions ret
=
1481 options
.enable_sampling
? kInternalEnableSampling
: kInternalNone
;
1482 switch (options
.record_mode
) {
1483 case RECORD_UNTIL_FULL
:
1484 return ret
| kInternalRecordUntilFull
;
1485 case RECORD_CONTINUOUSLY
:
1486 return ret
| kInternalRecordContinuously
;
1487 case ECHO_TO_CONSOLE
:
1488 return ret
| kInternalEchoToConsole
;
1489 case RECORD_AS_MUCH_AS_POSSIBLE
:
1490 return ret
| kInternalRecordAsMuchAsPossible
;
1493 return kInternalNone
;
1496 CategoryFilter
TraceLog::GetCurrentCategoryFilter() {
1497 AutoLock
lock(lock_
);
1498 return category_filter_
;
1501 TraceOptions
TraceLog::GetCurrentTraceOptions() const {
1503 InternalTraceOptions option
= trace_options();
1504 ret
.enable_sampling
= (option
& kInternalEnableSampling
) != 0;
1505 if (option
& kInternalRecordUntilFull
)
1506 ret
.record_mode
= RECORD_UNTIL_FULL
;
1507 else if (option
& kInternalRecordContinuously
)
1508 ret
.record_mode
= RECORD_CONTINUOUSLY
;
1509 else if (option
& kInternalEchoToConsole
)
1510 ret
.record_mode
= ECHO_TO_CONSOLE
;
1511 else if (option
& kInternalRecordAsMuchAsPossible
)
1512 ret
.record_mode
= RECORD_AS_MUCH_AS_POSSIBLE
;
1518 void TraceLog::SetDisabled() {
1519 AutoLock
lock(lock_
);
1520 SetDisabledWhileLocked();
1523 void TraceLog::SetDisabledWhileLocked() {
1524 lock_
.AssertAcquired();
1529 if (dispatching_to_observer_list_
) {
1531 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1537 if (sampling_thread_
.get()) {
1538 // Stop the sampling thread.
1539 sampling_thread_
->Stop();
1541 PlatformThread::Join(sampling_thread_handle_
);
1543 sampling_thread_handle_
= PlatformThreadHandle();
1544 sampling_thread_
.reset();
1547 category_filter_
.Clear();
1548 subtle::NoBarrier_Store(&watch_category_
, 0);
1549 watch_event_name_
= "";
1550 UpdateCategoryGroupEnabledFlags();
1551 AddMetadataEventsWhileLocked();
1553 dispatching_to_observer_list_
= true;
1554 std::vector
<EnabledStateObserver
*> observer_list
=
1555 enabled_state_observer_list_
;
1558 // Dispatch to observers outside the lock in case the observer triggers a
1560 AutoUnlock
unlock(lock_
);
1561 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1562 observer_list
[i
]->OnTraceLogDisabled();
1564 dispatching_to_observer_list_
= false;
1567 int TraceLog::GetNumTracesRecorded() {
1568 AutoLock
lock(lock_
);
1571 return num_traces_recorded_
;
1574 void TraceLog::AddEnabledStateObserver(EnabledStateObserver
* listener
) {
1575 enabled_state_observer_list_
.push_back(listener
);
1578 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver
* listener
) {
1579 std::vector
<EnabledStateObserver
*>::iterator it
=
1580 std::find(enabled_state_observer_list_
.begin(),
1581 enabled_state_observer_list_
.end(),
1583 if (it
!= enabled_state_observer_list_
.end())
1584 enabled_state_observer_list_
.erase(it
);
1587 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver
* listener
) const {
1588 std::vector
<EnabledStateObserver
*>::const_iterator it
=
1589 std::find(enabled_state_observer_list_
.begin(),
1590 enabled_state_observer_list_
.end(),
1592 return it
!= enabled_state_observer_list_
.end();
1595 TraceLogStatus
TraceLog::GetStatus() const {
1596 AutoLock
lock(lock_
);
1597 TraceLogStatus result
;
1598 result
.event_capacity
= logged_events_
->Capacity();
1599 result
.event_count
= logged_events_
->Size();
1603 bool TraceLog::BufferIsFull() const {
1604 AutoLock
lock(lock_
);
1605 return logged_events_
->IsFull();
1608 TraceBuffer
* TraceLog::CreateTraceBuffer() {
1609 InternalTraceOptions options
= trace_options();
1610 if (options
& kInternalRecordContinuously
)
1611 return new TraceBufferRingBuffer(kTraceEventRingBufferChunks
);
1612 else if ((options
& kInternalEnableSampling
) && mode_
== MONITORING_MODE
)
1613 return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks
);
1614 else if (options
& kInternalEchoToConsole
)
1615 return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks
);
1616 else if (options
& kInternalRecordAsMuchAsPossible
)
1617 return CreateTraceBufferVectorOfSize(kTraceEventVectorBigBufferChunks
);
1618 return CreateTraceBufferVectorOfSize(kTraceEventVectorBufferChunks
);
1621 TraceBuffer
* TraceLog::CreateTraceBufferVectorOfSize(size_t max_chunks
) {
1622 return new TraceBufferVector(max_chunks
);
1625 TraceEvent
* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1626 TraceEventHandle
* handle
, bool check_buffer_is_full
) {
1627 lock_
.AssertAcquired();
1629 if (thread_shared_chunk_
&& thread_shared_chunk_
->IsFull()) {
1630 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1631 thread_shared_chunk_
.Pass());
1634 if (!thread_shared_chunk_
) {
1635 thread_shared_chunk_
= logged_events_
->GetChunk(
1636 &thread_shared_chunk_index_
);
1637 if (check_buffer_is_full
)
1638 CheckIfBufferIsFullWhileLocked();
1640 if (!thread_shared_chunk_
)
1644 TraceEvent
* trace_event
= thread_shared_chunk_
->AddTraceEvent(&event_index
);
1645 if (trace_event
&& handle
) {
1646 MakeHandle(thread_shared_chunk_
->seq(), thread_shared_chunk_index_
,
1647 event_index
, handle
);
1652 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1653 lock_
.AssertAcquired();
1654 if (logged_events_
->IsFull()) {
1655 if (buffer_limit_reached_timestamp_
.is_null()) {
1656 buffer_limit_reached_timestamp_
= OffsetNow();
1658 SetDisabledWhileLocked();
1662 void TraceLog::SetEventCallbackEnabled(const CategoryFilter
& category_filter
,
1664 AutoLock
lock(lock_
);
1665 subtle::NoBarrier_Store(&event_callback_
,
1666 reinterpret_cast<subtle::AtomicWord
>(cb
));
1667 event_callback_category_filter_
= category_filter
;
1668 UpdateCategoryGroupEnabledFlags();
1671 void TraceLog::SetEventCallbackDisabled() {
1672 AutoLock
lock(lock_
);
1673 subtle::NoBarrier_Store(&event_callback_
, 0);
1674 UpdateCategoryGroupEnabledFlags();
1677 // Flush() works as the following:
1678 // 1. Flush() is called in threadA whose message loop is saved in
1679 // flush_message_loop_proxy_;
1680 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1681 // loop to flush the thread local buffers; otherwise finish the flush;
1682 // 3. FlushCurrentThread() deletes the thread local event buffer:
1683 // - The last batch of events of the thread are flushed into the main buffer;
1684 // - The message loop will be removed from thread_message_loops_;
1685 // If this is the last message loop, finish the flush;
1686 // 4. If any thread hasn't finish its flush in time, finish the flush.
1687 void TraceLog::Flush(const TraceLog::OutputCallback
& cb
) {
1689 // Can't flush when tracing is enabled because otherwise PostTask would
1690 // - generate more trace events;
1691 // - deschedule the calling thread on some platforms causing inaccurate
1692 // timing of the trace events.
1693 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
1695 cb
.Run(empty_result
, false);
1696 LOG(WARNING
) << "Ignored TraceLog::Flush called when tracing is enabled";
1700 int generation
= this->generation();
1701 // Copy of thread_message_loops_ to be used without locking.
1702 std::vector
<scoped_refptr
<SingleThreadTaskRunner
> >
1703 thread_message_loop_task_runners
;
1705 AutoLock
lock(lock_
);
1706 DCHECK(!flush_message_loop_proxy_
.get());
1707 flush_message_loop_proxy_
= MessageLoopProxy::current();
1708 DCHECK(!thread_message_loops_
.size() || flush_message_loop_proxy_
.get());
1709 flush_output_callback_
= cb
;
1711 if (thread_shared_chunk_
) {
1712 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1713 thread_shared_chunk_
.Pass());
1716 if (thread_message_loops_
.size()) {
1717 for (hash_set
<MessageLoop
*>::const_iterator it
=
1718 thread_message_loops_
.begin();
1719 it
!= thread_message_loops_
.end(); ++it
) {
1720 thread_message_loop_task_runners
.push_back((*it
)->task_runner());
1725 if (thread_message_loop_task_runners
.size()) {
1726 for (size_t i
= 0; i
< thread_message_loop_task_runners
.size(); ++i
) {
1727 thread_message_loop_task_runners
[i
]->PostTask(
1729 Bind(&TraceLog::FlushCurrentThread
, Unretained(this), generation
));
1731 flush_message_loop_proxy_
->PostDelayedTask(
1733 Bind(&TraceLog::OnFlushTimeout
, Unretained(this), generation
),
1734 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs
));
1738 FinishFlush(generation
);
1741 void TraceLog::ConvertTraceEventsToTraceFormat(
1742 scoped_ptr
<TraceBuffer
> logged_events
,
1743 const TraceLog::OutputCallback
& flush_output_callback
) {
1745 if (flush_output_callback
.is_null())
1748 // The callback need to be called at least once even if there is no events
1749 // to let the caller know the completion of flush.
1750 bool has_more_events
= true;
1752 scoped_refptr
<RefCountedString
> json_events_str_ptr
=
1753 new RefCountedString();
1755 for (size_t i
= 0; i
< kTraceEventBatchChunks
; ++i
) {
1756 const TraceBufferChunk
* chunk
= logged_events
->NextChunk();
1758 has_more_events
= false;
1761 for (size_t j
= 0; j
< chunk
->size(); ++j
) {
1763 json_events_str_ptr
->data().append(",\n");
1764 chunk
->GetEventAt(j
)->AppendAsJSON(&(json_events_str_ptr
->data()));
1768 flush_output_callback
.Run(json_events_str_ptr
, has_more_events
);
1769 } while (has_more_events
);
1772 void TraceLog::FinishFlush(int generation
) {
1773 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1774 OutputCallback flush_output_callback
;
1776 if (!CheckGeneration(generation
))
1780 AutoLock
lock(lock_
);
1782 previous_logged_events
.swap(logged_events_
);
1783 UseNextTraceBuffer();
1784 thread_message_loops_
.clear();
1786 flush_message_loop_proxy_
= NULL
;
1787 flush_output_callback
= flush_output_callback_
;
1788 flush_output_callback_
.Reset();
1791 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1792 flush_output_callback
);
1795 // Run in each thread holding a local event buffer.
1796 void TraceLog::FlushCurrentThread(int generation
) {
1798 AutoLock
lock(lock_
);
1799 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
.get()) {
1800 // This is late. The corresponding flush has finished.
1805 // This will flush the thread local buffer.
1806 delete thread_local_event_buffer_
.Get();
1808 AutoLock
lock(lock_
);
1809 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
.get() ||
1810 thread_message_loops_
.size())
1813 flush_message_loop_proxy_
->PostTask(
1815 Bind(&TraceLog::FinishFlush
, Unretained(this), generation
));
1818 void TraceLog::OnFlushTimeout(int generation
) {
1820 AutoLock
lock(lock_
);
1821 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
.get()) {
1822 // Flush has finished before timeout.
1827 "The following threads haven't finished flush in time. "
1828 "If this happens stably for some thread, please call "
1829 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1830 "the thread to avoid its trace events from being lost.";
1831 for (hash_set
<MessageLoop
*>::const_iterator it
=
1832 thread_message_loops_
.begin();
1833 it
!= thread_message_loops_
.end(); ++it
) {
1834 LOG(WARNING
) << "Thread: " << (*it
)->thread_name();
1837 FinishFlush(generation
);
1840 void TraceLog::FlushButLeaveBufferIntact(
1841 const TraceLog::OutputCallback
& flush_output_callback
) {
1842 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1844 AutoLock
lock(lock_
);
1845 AddMetadataEventsWhileLocked();
1846 if (thread_shared_chunk_
) {
1847 // Return the chunk to the main buffer to flush the sampling data.
1848 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1849 thread_shared_chunk_
.Pass());
1851 previous_logged_events
= logged_events_
->CloneForIteration().Pass();
1854 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1855 flush_output_callback
);
1858 void TraceLog::UseNextTraceBuffer() {
1859 logged_events_
.reset(CreateTraceBuffer());
1860 subtle::NoBarrier_AtomicIncrement(&generation_
, 1);
1861 thread_shared_chunk_
.reset();
1862 thread_shared_chunk_index_
= 0;
1865 TraceEventHandle
TraceLog::AddTraceEvent(
1867 const unsigned char* category_group_enabled
,
1869 unsigned long long id
,
1871 const char** arg_names
,
1872 const unsigned char* arg_types
,
1873 const unsigned long long* arg_values
,
1874 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1875 unsigned char flags
) {
1876 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1877 base::TimeTicks now
= base::TimeTicks::NowFromSystemTraceTime();
1878 return AddTraceEventWithThreadIdAndTimestamp(phase
, category_group_enabled
,
1879 name
, id
, thread_id
, now
,
1880 num_args
, arg_names
,
1881 arg_types
, arg_values
,
1882 convertable_values
, flags
);
1885 TraceEventHandle
TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1887 const unsigned char* category_group_enabled
,
1889 unsigned long long id
,
1891 const TimeTicks
& timestamp
,
1893 const char** arg_names
,
1894 const unsigned char* arg_types
,
1895 const unsigned long long* arg_values
,
1896 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1897 unsigned char flags
) {
1898 TraceEventHandle handle
= { 0, 0, 0 };
1899 if (!*category_group_enabled
)
1902 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1903 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1904 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1905 if (thread_is_in_trace_event_
.Get())
1908 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
1911 DCHECK(!timestamp
.is_null());
1913 if (flags
& TRACE_EVENT_FLAG_MANGLE_ID
)
1914 id
^= process_id_hash_
;
1916 TimeTicks offset_event_timestamp
= OffsetTimestamp(timestamp
);
1917 TimeTicks now
= OffsetNow();
1918 TimeTicks thread_now
= ThreadNow();
1920 ThreadLocalEventBuffer
* thread_local_event_buffer
= NULL
;
1921 // A ThreadLocalEventBuffer needs the message loop
1922 // - to know when the thread exits;
1923 // - to handle the final flush.
1924 // For a thread without a message loop or the message loop may be blocked, the
1925 // trace events will be added into the main buffer directly.
1926 if (!thread_blocks_message_loop_
.Get() && MessageLoop::current()) {
1927 thread_local_event_buffer
= thread_local_event_buffer_
.Get();
1928 if (thread_local_event_buffer
&&
1929 !CheckGeneration(thread_local_event_buffer
->generation())) {
1930 delete thread_local_event_buffer
;
1931 thread_local_event_buffer
= NULL
;
1933 if (!thread_local_event_buffer
) {
1934 thread_local_event_buffer
= new ThreadLocalEventBuffer(this);
1935 thread_local_event_buffer_
.Set(thread_local_event_buffer
);
1939 // Check and update the current thread name only if the event is for the
1940 // current thread to avoid locks in most cases.
1941 if (thread_id
== static_cast<int>(PlatformThread::CurrentId())) {
1942 const char* new_name
= ThreadIdNameManager::GetInstance()->
1944 // Check if the thread name has been set or changed since the previous
1945 // call (if any), but don't bother if the new name is empty. Note this will
1946 // not detect a thread name change within the same char* buffer address: we
1947 // favor common case performance over corner case correctness.
1948 if (new_name
!= g_current_thread_name
.Get().Get() &&
1949 new_name
&& *new_name
) {
1950 g_current_thread_name
.Get().Set(new_name
);
1952 AutoLock
thread_info_lock(thread_info_lock_
);
1954 hash_map
<int, std::string
>::iterator existing_name
=
1955 thread_names_
.find(thread_id
);
1956 if (existing_name
== thread_names_
.end()) {
1957 // This is a new thread id, and a new name.
1958 thread_names_
[thread_id
] = new_name
;
1960 // This is a thread id that we've seen before, but potentially with a
1962 std::vector
<StringPiece
> existing_names
;
1963 Tokenize(existing_name
->second
, ",", &existing_names
);
1964 bool found
= std::find(existing_names
.begin(),
1965 existing_names
.end(),
1966 new_name
) != existing_names
.end();
1968 if (existing_names
.size())
1969 existing_name
->second
.push_back(',');
1970 existing_name
->second
.append(new_name
);
1976 std::string console_message
;
1977 if (*category_group_enabled
&
1978 (ENABLED_FOR_RECORDING
| ENABLED_FOR_MONITORING
)) {
1979 OptionalAutoLock
lock(lock_
);
1981 TraceEvent
* trace_event
= NULL
;
1982 if (thread_local_event_buffer
) {
1983 trace_event
= thread_local_event_buffer
->AddTraceEvent(&handle
);
1985 lock
.EnsureAcquired();
1986 trace_event
= AddEventToThreadSharedChunkWhileLocked(&handle
, true);
1990 trace_event
->Initialize(thread_id
, offset_event_timestamp
, thread_now
,
1991 phase
, category_group_enabled
, name
, id
,
1992 num_args
, arg_names
, arg_types
, arg_values
,
1993 convertable_values
, flags
);
1995 #if defined(OS_ANDROID)
1996 trace_event
->SendToATrace();
2000 if (trace_options() & kInternalEchoToConsole
) {
2001 console_message
= EventToConsoleMessage(
2002 phase
== TRACE_EVENT_PHASE_COMPLETE
? TRACE_EVENT_PHASE_BEGIN
: phase
,
2003 timestamp
, trace_event
);
2007 if (console_message
.size())
2008 LOG(ERROR
) << console_message
;
2010 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
2011 &watch_category_
)) == category_group_enabled
) {
2012 bool event_name_matches
;
2013 WatchEventCallback watch_event_callback_copy
;
2015 AutoLock
lock(lock_
);
2016 event_name_matches
= watch_event_name_
== name
;
2017 watch_event_callback_copy
= watch_event_callback_
;
2019 if (event_name_matches
) {
2020 if (!watch_event_callback_copy
.is_null())
2021 watch_event_callback_copy
.Run();
2025 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
2026 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
2027 subtle::NoBarrier_Load(&event_callback_
));
2028 if (event_callback
) {
2029 event_callback(offset_event_timestamp
,
2030 phase
== TRACE_EVENT_PHASE_COMPLETE
?
2031 TRACE_EVENT_PHASE_BEGIN
: phase
,
2032 category_group_enabled
, name
, id
,
2033 num_args
, arg_names
, arg_types
, arg_values
,
2038 // Use |now| instead of |offset_event_timestamp| to compute overhead, because
2039 // event timestamp may be not the real time that we started to add the event
2040 // (e.g. event with zero timestamp or that was generated some time ago).
2041 if (thread_local_event_buffer
)
2042 thread_local_event_buffer
->ReportOverhead(now
, thread_now
);
2047 // May be called when a COMPELETE event ends and the unfinished event has been
2048 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
2049 std::string
TraceLog::EventToConsoleMessage(unsigned char phase
,
2050 const TimeTicks
& timestamp
,
2051 TraceEvent
* trace_event
) {
2052 AutoLock
thread_info_lock(thread_info_lock_
);
2054 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
2055 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
2056 DCHECK(phase
!= TRACE_EVENT_PHASE_COMPLETE
);
2059 int thread_id
= trace_event
?
2060 trace_event
->thread_id() : PlatformThread::CurrentId();
2061 if (phase
== TRACE_EVENT_PHASE_END
) {
2062 duration
= timestamp
- thread_event_start_times_
[thread_id
].top();
2063 thread_event_start_times_
[thread_id
].pop();
2066 std::string thread_name
= thread_names_
[thread_id
];
2067 if (thread_colors_
.find(thread_name
) == thread_colors_
.end())
2068 thread_colors_
[thread_name
] = (thread_colors_
.size() % 6) + 1;
2070 std::ostringstream log
;
2071 log
<< base::StringPrintf("%s: \x1b[0;3%dm",
2072 thread_name
.c_str(),
2073 thread_colors_
[thread_name
]);
2076 if (thread_event_start_times_
.find(thread_id
) !=
2077 thread_event_start_times_
.end())
2078 depth
= thread_event_start_times_
[thread_id
].size();
2080 for (size_t i
= 0; i
< depth
; ++i
)
2084 trace_event
->AppendPrettyPrinted(&log
);
2085 if (phase
== TRACE_EVENT_PHASE_END
)
2086 log
<< base::StringPrintf(" (%.3f ms)", duration
.InMillisecondsF());
2090 if (phase
== TRACE_EVENT_PHASE_BEGIN
)
2091 thread_event_start_times_
[thread_id
].push(timestamp
);
2096 void TraceLog::AddTraceEventEtw(char phase
,
2099 const char* extra
) {
2101 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
2103 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
2104 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
2107 void TraceLog::AddTraceEventEtw(char phase
,
2110 const std::string
& extra
) {
2112 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
2114 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
2115 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
2118 void TraceLog::UpdateTraceEventDuration(
2119 const unsigned char* category_group_enabled
,
2121 TraceEventHandle handle
) {
2122 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
2123 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
2124 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
2125 if (thread_is_in_trace_event_
.Get())
2128 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
2130 TimeTicks thread_now
= ThreadNow();
2131 TimeTicks now
= OffsetNow();
2133 std::string console_message
;
2134 if (*category_group_enabled
& ENABLED_FOR_RECORDING
) {
2135 OptionalAutoLock
lock(lock_
);
2137 TraceEvent
* trace_event
= GetEventByHandleInternal(handle
, &lock
);
2139 DCHECK(trace_event
->phase() == TRACE_EVENT_PHASE_COMPLETE
);
2140 trace_event
->UpdateDuration(now
, thread_now
);
2141 #if defined(OS_ANDROID)
2142 trace_event
->SendToATrace();
2146 if (trace_options() & kInternalEchoToConsole
) {
2147 console_message
= EventToConsoleMessage(TRACE_EVENT_PHASE_END
,
2152 if (console_message
.size())
2153 LOG(ERROR
) << console_message
;
2155 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
2156 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
2157 subtle::NoBarrier_Load(&event_callback_
));
2158 if (event_callback
) {
2159 event_callback(now
, TRACE_EVENT_PHASE_END
, category_group_enabled
, name
,
2160 trace_event_internal::kNoEventId
, 0, NULL
, NULL
, NULL
,
2161 TRACE_EVENT_FLAG_NONE
);
2166 void TraceLog::SetWatchEvent(const std::string
& category_name
,
2167 const std::string
& event_name
,
2168 const WatchEventCallback
& callback
) {
2169 const unsigned char* category
= GetCategoryGroupEnabled(
2170 category_name
.c_str());
2171 AutoLock
lock(lock_
);
2172 subtle::NoBarrier_Store(&watch_category_
,
2173 reinterpret_cast<subtle::AtomicWord
>(category
));
2174 watch_event_name_
= event_name
;
2175 watch_event_callback_
= callback
;
2178 void TraceLog::CancelWatchEvent() {
2179 AutoLock
lock(lock_
);
2180 subtle::NoBarrier_Store(&watch_category_
, 0);
2181 watch_event_name_
= "";
2182 watch_event_callback_
.Reset();
2185 void TraceLog::AddMetadataEventsWhileLocked() {
2186 lock_
.AssertAcquired();
2188 #if !defined(OS_NACL) // NaCl shouldn't expose the process id.
2189 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2191 "num_cpus", "number",
2192 base::SysInfo::NumberOfProcessors());
2196 int current_thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
2197 if (process_sort_index_
!= 0) {
2198 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2200 "process_sort_index", "sort_index",
2201 process_sort_index_
);
2204 if (process_name_
.size()) {
2205 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2207 "process_name", "name",
2211 if (process_labels_
.size() > 0) {
2212 std::vector
<std::string
> labels
;
2213 for(base::hash_map
<int, std::string
>::iterator it
= process_labels_
.begin();
2214 it
!= process_labels_
.end();
2216 labels
.push_back(it
->second
);
2218 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2220 "process_labels", "labels",
2221 JoinString(labels
, ','));
2224 // Thread sort indices.
2225 for(hash_map
<int, int>::iterator it
= thread_sort_indices_
.begin();
2226 it
!= thread_sort_indices_
.end();
2228 if (it
->second
== 0)
2230 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2232 "thread_sort_index", "sort_index",
2237 AutoLock
thread_info_lock(thread_info_lock_
);
2238 for(hash_map
<int, std::string
>::iterator it
= thread_names_
.begin();
2239 it
!= thread_names_
.end();
2241 if (it
->second
.empty())
2243 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2245 "thread_name", "name",
2249 // If buffer is full, add a metadata record to report this.
2250 if (!buffer_limit_reached_timestamp_
.is_null()) {
2251 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2253 "trace_buffer_overflowed",
2255 buffer_limit_reached_timestamp_
);
2259 void TraceLog::WaitSamplingEventForTesting() {
2260 if (!sampling_thread_
)
2262 sampling_thread_
->WaitSamplingEventForTesting();
2265 void TraceLog::DeleteForTesting() {
2266 DeleteTraceLogForTesting::Delete();
2269 TraceEvent
* TraceLog::GetEventByHandle(TraceEventHandle handle
) {
2270 return GetEventByHandleInternal(handle
, NULL
);
2273 TraceEvent
* TraceLog::GetEventByHandleInternal(TraceEventHandle handle
,
2274 OptionalAutoLock
* lock
) {
2275 if (!handle
.chunk_seq
)
2278 if (thread_local_event_buffer_
.Get()) {
2279 TraceEvent
* trace_event
=
2280 thread_local_event_buffer_
.Get()->GetEventByHandle(handle
);
2285 // The event has been out-of-control of the thread local buffer.
2286 // Try to get the event from the main buffer with a lock.
2288 lock
->EnsureAcquired();
2290 if (thread_shared_chunk_
&&
2291 handle
.chunk_index
== thread_shared_chunk_index_
) {
2292 return handle
.chunk_seq
== thread_shared_chunk_
->seq() ?
2293 thread_shared_chunk_
->GetEventAt(handle
.event_index
) : NULL
;
2296 return logged_events_
->GetEventByHandle(handle
);
2299 void TraceLog::SetProcessID(int process_id
) {
2300 process_id_
= process_id
;
2301 // Create a FNV hash from the process ID for XORing.
2302 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2303 unsigned long long offset_basis
= 14695981039346656037ull;
2304 unsigned long long fnv_prime
= 1099511628211ull;
2305 unsigned long long pid
= static_cast<unsigned long long>(process_id_
);
2306 process_id_hash_
= (offset_basis
^ pid
) * fnv_prime
;
2309 void TraceLog::SetProcessSortIndex(int sort_index
) {
2310 AutoLock
lock(lock_
);
2311 process_sort_index_
= sort_index
;
2314 void TraceLog::SetProcessName(const std::string
& process_name
) {
2315 AutoLock
lock(lock_
);
2316 process_name_
= process_name
;
2319 void TraceLog::UpdateProcessLabel(
2320 int label_id
, const std::string
& current_label
) {
2321 if(!current_label
.length())
2322 return RemoveProcessLabel(label_id
);
2324 AutoLock
lock(lock_
);
2325 process_labels_
[label_id
] = current_label
;
2328 void TraceLog::RemoveProcessLabel(int label_id
) {
2329 AutoLock
lock(lock_
);
2330 base::hash_map
<int, std::string
>::iterator it
= process_labels_
.find(
2332 if (it
== process_labels_
.end())
2335 process_labels_
.erase(it
);
2338 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id
, int sort_index
) {
2339 AutoLock
lock(lock_
);
2340 thread_sort_indices_
[static_cast<int>(thread_id
)] = sort_index
;
2343 void TraceLog::SetTimeOffset(TimeDelta offset
) {
2344 time_offset_
= offset
;
2347 size_t TraceLog::GetObserverCountForTest() const {
2348 return enabled_state_observer_list_
.size();
2351 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2352 thread_blocks_message_loop_
.Set(true);
2353 if (thread_local_event_buffer_
.Get()) {
2354 // This will flush the thread local buffer.
2355 delete thread_local_event_buffer_
.Get();
2359 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2360 const std::string
& str
) {
2361 return str
.empty() ||
2363 str
.at(str
.length() - 1) == ' ';
2366 CategoryFilter::CategoryFilter(const std::string
& filter_string
) {
2367 if (!filter_string
.empty())
2368 Initialize(filter_string
);
2370 Initialize(CategoryFilter::kDefaultCategoryFilterString
);
2373 CategoryFilter::CategoryFilter() {
2374 Initialize(CategoryFilter::kDefaultCategoryFilterString
);
2377 CategoryFilter::CategoryFilter(const CategoryFilter
& cf
)
2378 : included_(cf
.included_
),
2379 disabled_(cf
.disabled_
),
2380 excluded_(cf
.excluded_
),
2381 delays_(cf
.delays_
) {
2384 CategoryFilter::~CategoryFilter() {
2387 CategoryFilter
& CategoryFilter::operator=(const CategoryFilter
& rhs
) {
2391 included_
= rhs
.included_
;
2392 disabled_
= rhs
.disabled_
;
2393 excluded_
= rhs
.excluded_
;
2394 delays_
= rhs
.delays_
;
2398 void CategoryFilter::Initialize(const std::string
& filter_string
) {
2399 // Tokenize list of categories, delimited by ','.
2400 StringTokenizer
tokens(filter_string
, ",");
2401 // Add each token to the appropriate list (included_,excluded_).
2402 while (tokens
.GetNext()) {
2403 std::string category
= tokens
.token();
2404 // Ignore empty categories.
2405 if (category
.empty())
2407 // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
2408 if (category
.find(kSyntheticDelayCategoryFilterPrefix
) == 0 &&
2409 category
.at(category
.size() - 1) == ')') {
2410 category
= category
.substr(
2411 strlen(kSyntheticDelayCategoryFilterPrefix
),
2412 category
.size() - strlen(kSyntheticDelayCategoryFilterPrefix
) - 1);
2413 size_t name_length
= category
.find(';');
2414 if (name_length
!= std::string::npos
&& name_length
> 0 &&
2415 name_length
!= category
.size() - 1) {
2416 delays_
.push_back(category
);
2418 } else if (category
.at(0) == '-') {
2419 // Excluded categories start with '-'.
2420 // Remove '-' from category string.
2421 category
= category
.substr(1);
2422 excluded_
.push_back(category
);
2423 } else if (category
.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
2424 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
2425 disabled_
.push_back(category
);
2427 included_
.push_back(category
);
2432 void CategoryFilter::WriteString(const StringList
& values
,
2434 bool included
) const {
2435 bool prepend_comma
= !out
->empty();
2437 for (StringList::const_iterator ci
= values
.begin();
2438 ci
!= values
.end(); ++ci
) {
2439 if (token_cnt
> 0 || prepend_comma
)
2440 StringAppendF(out
, ",");
2441 StringAppendF(out
, "%s%s", (included
? "" : "-"), ci
->c_str());
2446 void CategoryFilter::WriteString(const StringList
& delays
,
2447 std::string
* out
) const {
2448 bool prepend_comma
= !out
->empty();
2450 for (StringList::const_iterator ci
= delays
.begin();
2451 ci
!= delays
.end(); ++ci
) {
2452 if (token_cnt
> 0 || prepend_comma
)
2453 StringAppendF(out
, ",");
2454 StringAppendF(out
, "%s%s)", kSyntheticDelayCategoryFilterPrefix
,
2460 std::string
CategoryFilter::ToString() const {
2461 std::string filter_string
;
2462 WriteString(included_
, &filter_string
, true);
2463 WriteString(disabled_
, &filter_string
, true);
2464 WriteString(excluded_
, &filter_string
, false);
2465 WriteString(delays_
, &filter_string
);
2466 return filter_string
;
2469 bool CategoryFilter::IsCategoryGroupEnabled(
2470 const char* category_group_name
) const {
2471 // TraceLog should call this method only as part of enabling/disabling
2474 bool had_enabled_by_default
= false;
2475 DCHECK(category_group_name
);
2476 CStringTokenizer
category_group_tokens(
2477 category_group_name
, category_group_name
+ strlen(category_group_name
),
2479 while (category_group_tokens
.GetNext()) {
2480 std::string category_group_token
= category_group_tokens
.token();
2481 // Don't allow empty tokens, nor tokens with leading or trailing space.
2482 DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2483 category_group_token
))
2484 << "Disallowed category string";
2485 if (IsCategoryEnabled(category_group_token
.c_str())) {
2488 if (!MatchPattern(category_group_token
.c_str(),
2489 TRACE_DISABLED_BY_DEFAULT("*")))
2490 had_enabled_by_default
= true;
2492 // Do a second pass to check for explicitly disabled categories
2493 // (those explicitly enabled have priority due to first pass).
2494 category_group_tokens
.Reset();
2495 while (category_group_tokens
.GetNext()) {
2496 std::string category_group_token
= category_group_tokens
.token();
2497 for (StringList::const_iterator ci
= excluded_
.begin();
2498 ci
!= excluded_
.end(); ++ci
) {
2499 if (MatchPattern(category_group_token
.c_str(), ci
->c_str()))
2503 // If the category group is not excluded, and there are no included patterns
2504 // we consider this category group enabled, as long as it had categories
2505 // other than disabled-by-default.
2506 return included_
.empty() && had_enabled_by_default
;
2509 bool CategoryFilter::IsCategoryEnabled(const char* category_name
) const {
2510 StringList::const_iterator ci
;
2512 // Check the disabled- filters and the disabled-* wildcard first so that a
2513 // "*" filter does not include the disabled.
2514 for (ci
= disabled_
.begin(); ci
!= disabled_
.end(); ++ci
) {
2515 if (MatchPattern(category_name
, ci
->c_str()))
2519 if (MatchPattern(category_name
, TRACE_DISABLED_BY_DEFAULT("*")))
2522 for (ci
= included_
.begin(); ci
!= included_
.end(); ++ci
) {
2523 if (MatchPattern(category_name
, ci
->c_str()))
2530 bool CategoryFilter::HasIncludedPatterns() const {
2531 return !included_
.empty();
2534 void CategoryFilter::Merge(const CategoryFilter
& nested_filter
) {
2535 // Keep included patterns only if both filters have an included entry.
2536 // Otherwise, one of the filter was specifying "*" and we want to honour the
2538 if (HasIncludedPatterns() && nested_filter
.HasIncludedPatterns()) {
2539 included_
.insert(included_
.end(),
2540 nested_filter
.included_
.begin(),
2541 nested_filter
.included_
.end());
2546 disabled_
.insert(disabled_
.end(),
2547 nested_filter
.disabled_
.begin(),
2548 nested_filter
.disabled_
.end());
2549 excluded_
.insert(excluded_
.end(),
2550 nested_filter
.excluded_
.begin(),
2551 nested_filter
.excluded_
.end());
2552 delays_
.insert(delays_
.end(),
2553 nested_filter
.delays_
.begin(),
2554 nested_filter
.delays_
.end());
2557 void CategoryFilter::Clear() {
2563 const CategoryFilter::StringList
&
2564 CategoryFilter::GetSyntheticDelayValues() const {
2568 } // namespace trace_event
2571 namespace trace_event_internal
{
2573 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2574 const char* category_group
, const char* name
) {
2575 // The single atom works because for now the category_group can only be "gpu".
2576 DCHECK(strcmp(category_group
, "gpu") == 0);
2577 static TRACE_EVENT_API_ATOMIC_WORD atomic
= 0;
2578 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2579 category_group
, atomic
, category_group_enabled_
);
2581 if (*category_group_enabled_
) {
2583 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2584 TRACE_EVENT_PHASE_COMPLETE
, category_group_enabled_
, name
,
2585 trace_event_internal::kNoEventId
,
2586 static_cast<int>(base::PlatformThread::CurrentId()),
2587 base::TimeTicks::NowFromSystemTraceTime(),
2588 0, NULL
, NULL
, NULL
, NULL
, TRACE_EVENT_FLAG_NONE
);
2592 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2593 if (*category_group_enabled_
) {
2594 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_
,
2595 name_
, event_handle_
);
2599 } // namespace trace_event_internal