1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/trace_event_impl.h"
9 #include "base/base_switches.h"
10 #include "base/bind.h"
11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/float_util.h"
14 #include "base/format_macros.h"
15 #include "base/json/string_escape.h"
16 #include "base/lazy_instance.h"
17 #include "base/memory/singleton.h"
18 #include "base/message_loop/message_loop.h"
19 #include "base/process/process_metrics.h"
20 #include "base/stl_util.h"
21 #include "base/strings/string_number_conversions.h"
22 #include "base/strings/string_split.h"
23 #include "base/strings/string_tokenizer.h"
24 #include "base/strings/string_util.h"
25 #include "base/strings/stringprintf.h"
26 #include "base/strings/utf_string_conversions.h"
27 #include "base/synchronization/cancellation_flag.h"
28 #include "base/synchronization/waitable_event.h"
29 #include "base/sys_info.h"
30 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
31 #include "base/threading/platform_thread.h"
32 #include "base/threading/thread_id_name_manager.h"
33 #include "base/threading/worker_pool.h"
34 #include "base/time/time.h"
35 #include "base/trace_event/trace_event.h"
36 #include "base/trace_event/trace_event_synthetic_delay.h"
39 #include "base/trace_event/trace_event_win.h"
42 class DeleteTraceLogForTesting
{
44 static void Delete() {
45 Singleton
<base::trace_event::TraceLog
,
46 LeakySingletonTraits
<base::trace_event::TraceLog
>>::OnExit(0);
50 // The thread buckets for the sampling profiler.
51 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state
[3];
54 namespace trace_event
{
58 // The overhead of TraceEvent above this threshold will be reported in the
60 const int kOverheadReportThresholdInMicroseconds
= 50;
62 // String options that can be used to initialize TraceOptions.
63 const char kRecordUntilFull
[] = "record-until-full";
64 const char kRecordContinuously
[] = "record-continuously";
65 const char kRecordAsMuchAsPossible
[] = "record-as-much-as-possible";
66 const char kTraceToConsole
[] = "trace-to-console";
67 const char kEnableSampling
[] = "enable-sampling";
68 const char kEnableSystrace
[] = "enable-systrace";
70 // Controls the number of trace events we will buffer in-memory
71 // before throwing them away.
72 const size_t kTraceBufferChunkSize
= TraceBufferChunk::kTraceBufferChunkSize
;
73 const size_t kTraceEventVectorBigBufferChunks
=
74 512000000 / kTraceBufferChunkSize
;
75 const size_t kTraceEventVectorBufferChunks
= 256000 / kTraceBufferChunkSize
;
76 const size_t kTraceEventRingBufferChunks
= kTraceEventVectorBufferChunks
/ 4;
77 const size_t kTraceEventBufferSizeInBytes
= 100 * 1024;
78 // Can store results for 30 seconds with 1 ms sampling interval.
79 const size_t kMonitorTraceEventBufferChunks
= 30000 / kTraceBufferChunkSize
;
80 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
81 const size_t kEchoToConsoleTraceEventBufferChunks
= 256;
83 const int kThreadFlushTimeoutMs
= 3000;
86 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
87 const char kEchoToConsoleCategoryFilter
[] = "-ipc,-task";
90 const char kSyntheticDelayCategoryFilterPrefix
[] = "DELAY(";
92 #define MAX_CATEGORY_GROUPS 100
94 // Parallel arrays g_category_groups and g_category_group_enabled are separate
95 // so that a pointer to a member of g_category_group_enabled can be easily
96 // converted to an index into g_category_groups. This allows macros to deal
97 // only with char enabled pointers from g_category_group_enabled, and we can
98 // convert internally to determine the category name from the char enabled
100 const char* g_category_groups
[MAX_CATEGORY_GROUPS
] = {
102 "tracing already shutdown",
103 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
105 // For reporting trace_event overhead. For thread local event buffers only.
106 "trace_event_overhead"};
108 // The enabled flag is char instead of bool so that the API can be used from C.
109 unsigned char g_category_group_enabled
[MAX_CATEGORY_GROUPS
] = { 0 };
110 // Indexes here have to match the g_category_groups array indexes above.
111 const int g_category_already_shutdown
= 1;
112 const int g_category_categories_exhausted
= 2;
113 const int g_category_metadata
= 3;
114 const int g_category_trace_event_overhead
= 4;
115 const int g_num_builtin_categories
= 5;
116 // Skip default categories.
117 base::subtle::AtomicWord g_category_index
= g_num_builtin_categories
;
119 // The name of the current thread. This is used to decide if the current
120 // thread name has changed. We combine all the seen thread names into the
121 // output name for the thread.
122 LazyInstance
<ThreadLocalPointer
<const char> >::Leaky
123 g_current_thread_name
= LAZY_INSTANCE_INITIALIZER
;
125 TimeTicks
ThreadNow() {
126 return TimeTicks::IsThreadNowSupported() ?
127 TimeTicks::ThreadNow() : TimeTicks();
130 class TraceBufferRingBuffer
: public TraceBuffer
{
132 TraceBufferRingBuffer(size_t max_chunks
)
133 : max_chunks_(max_chunks
),
134 recyclable_chunks_queue_(new size_t[queue_capacity()]),
136 queue_tail_(max_chunks
),
137 current_iteration_index_(0),
138 current_chunk_seq_(1) {
139 chunks_
.reserve(max_chunks
);
140 for (size_t i
= 0; i
< max_chunks
; ++i
)
141 recyclable_chunks_queue_
[i
] = i
;
144 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
145 // Because the number of threads is much less than the number of chunks,
146 // the queue should never be empty.
147 DCHECK(!QueueIsEmpty());
149 *index
= recyclable_chunks_queue_
[queue_head_
];
150 queue_head_
= NextQueueIndex(queue_head_
);
151 current_iteration_index_
= queue_head_
;
153 if (*index
>= chunks_
.size())
154 chunks_
.resize(*index
+ 1);
156 TraceBufferChunk
* chunk
= chunks_
[*index
];
157 chunks_
[*index
] = NULL
; // Put NULL in the slot of a in-flight chunk.
159 chunk
->Reset(current_chunk_seq_
++);
161 chunk
= new TraceBufferChunk(current_chunk_seq_
++);
163 return scoped_ptr
<TraceBufferChunk
>(chunk
);
166 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
> chunk
) override
{
167 // When this method is called, the queue should not be full because it
168 // can contain all chunks including the one to be returned.
169 DCHECK(!QueueIsFull());
171 DCHECK_LT(index
, chunks_
.size());
172 DCHECK(!chunks_
[index
]);
173 chunks_
[index
] = chunk
.release();
174 recyclable_chunks_queue_
[queue_tail_
] = index
;
175 queue_tail_
= NextQueueIndex(queue_tail_
);
178 bool IsFull() const override
{ return false; }
180 size_t Size() const override
{
181 // This is approximate because not all of the chunks are full.
182 return chunks_
.size() * kTraceBufferChunkSize
;
185 size_t Capacity() const override
{
186 return max_chunks_
* kTraceBufferChunkSize
;
189 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
190 if (handle
.chunk_index
>= chunks_
.size())
192 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
193 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
195 return chunk
->GetEventAt(handle
.event_index
);
198 const TraceBufferChunk
* NextChunk() override
{
202 while (current_iteration_index_
!= queue_tail_
) {
203 size_t chunk_index
= recyclable_chunks_queue_
[current_iteration_index_
];
204 current_iteration_index_
= NextQueueIndex(current_iteration_index_
);
205 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
207 DCHECK(chunks_
[chunk_index
]);
208 return chunks_
[chunk_index
];
213 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
214 scoped_ptr
<ClonedTraceBuffer
> cloned_buffer(new ClonedTraceBuffer());
215 for (size_t queue_index
= queue_head_
; queue_index
!= queue_tail_
;
216 queue_index
= NextQueueIndex(queue_index
)) {
217 size_t chunk_index
= recyclable_chunks_queue_
[queue_index
];
218 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
220 TraceBufferChunk
* chunk
= chunks_
[chunk_index
];
221 cloned_buffer
->chunks_
.push_back(chunk
? chunk
->Clone().release() : NULL
);
223 return cloned_buffer
.Pass();
227 class ClonedTraceBuffer
: public TraceBuffer
{
229 ClonedTraceBuffer() : current_iteration_index_(0) {}
231 // The only implemented method.
232 const TraceBufferChunk
* NextChunk() override
{
233 return current_iteration_index_
< chunks_
.size() ?
234 chunks_
[current_iteration_index_
++] : NULL
;
237 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
239 return scoped_ptr
<TraceBufferChunk
>();
241 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
>) override
{
244 bool IsFull() const override
{ return false; }
245 size_t Size() const override
{ return 0; }
246 size_t Capacity() const override
{ return 0; }
247 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
250 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
252 return scoped_ptr
<TraceBuffer
>();
255 size_t current_iteration_index_
;
256 ScopedVector
<TraceBufferChunk
> chunks_
;
259 bool QueueIsEmpty() const {
260 return queue_head_
== queue_tail_
;
263 size_t QueueSize() const {
264 return queue_tail_
> queue_head_
? queue_tail_
- queue_head_
:
265 queue_tail_
+ queue_capacity() - queue_head_
;
268 bool QueueIsFull() const {
269 return QueueSize() == queue_capacity() - 1;
272 size_t queue_capacity() const {
273 // One extra space to help distinguish full state and empty state.
274 return max_chunks_
+ 1;
277 size_t NextQueueIndex(size_t index
) const {
279 if (index
>= queue_capacity())
285 ScopedVector
<TraceBufferChunk
> chunks_
;
287 scoped_ptr
<size_t[]> recyclable_chunks_queue_
;
291 size_t current_iteration_index_
;
292 uint32 current_chunk_seq_
;
294 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer
);
297 class TraceBufferVector
: public TraceBuffer
{
299 TraceBufferVector(size_t max_chunks
)
300 : in_flight_chunk_count_(0),
301 current_iteration_index_(0),
302 max_chunks_(max_chunks
) {
303 chunks_
.reserve(max_chunks_
);
306 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
307 // This function may be called when adding normal events or indirectly from
308 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
309 // have to add the metadata events and flush thread-local buffers even if
310 // the buffer is full.
311 *index
= chunks_
.size();
312 chunks_
.push_back(NULL
); // Put NULL in the slot of a in-flight chunk.
313 ++in_flight_chunk_count_
;
314 // + 1 because zero chunk_seq is not allowed.
315 return scoped_ptr
<TraceBufferChunk
>(
316 new TraceBufferChunk(static_cast<uint32
>(*index
) + 1));
319 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
> chunk
) override
{
320 DCHECK_GT(in_flight_chunk_count_
, 0u);
321 DCHECK_LT(index
, chunks_
.size());
322 DCHECK(!chunks_
[index
]);
323 --in_flight_chunk_count_
;
324 chunks_
[index
] = chunk
.release();
327 bool IsFull() const override
{ return chunks_
.size() >= max_chunks_
; }
329 size_t Size() const override
{
330 // This is approximate because not all of the chunks are full.
331 return chunks_
.size() * kTraceBufferChunkSize
;
334 size_t Capacity() const override
{
335 return max_chunks_
* kTraceBufferChunkSize
;
338 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
339 if (handle
.chunk_index
>= chunks_
.size())
341 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
342 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
344 return chunk
->GetEventAt(handle
.event_index
);
347 const TraceBufferChunk
* NextChunk() override
{
348 while (current_iteration_index_
< chunks_
.size()) {
349 // Skip in-flight chunks.
350 const TraceBufferChunk
* chunk
= chunks_
[current_iteration_index_
++];
357 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
359 return scoped_ptr
<TraceBuffer
>();
363 size_t in_flight_chunk_count_
;
364 size_t current_iteration_index_
;
366 ScopedVector
<TraceBufferChunk
> chunks_
;
368 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector
);
371 template <typename T
>
372 void InitializeMetadataEvent(TraceEvent
* trace_event
,
374 const char* metadata_name
, const char* arg_name
,
380 unsigned char arg_type
;
381 unsigned long long arg_value
;
382 ::trace_event_internal::SetTraceValue(value
, &arg_type
, &arg_value
);
383 trace_event
->Initialize(thread_id
,
384 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA
,
385 &g_category_group_enabled
[g_category_metadata
],
386 metadata_name
, ::trace_event_internal::kNoEventId
,
387 num_args
, &arg_name
, &arg_type
, &arg_value
, NULL
,
388 TRACE_EVENT_FLAG_NONE
);
391 class AutoThreadLocalBoolean
{
393 explicit AutoThreadLocalBoolean(ThreadLocalBoolean
* thread_local_boolean
)
394 : thread_local_boolean_(thread_local_boolean
) {
395 DCHECK(!thread_local_boolean_
->Get());
396 thread_local_boolean_
->Set(true);
398 ~AutoThreadLocalBoolean() {
399 thread_local_boolean_
->Set(false);
403 ThreadLocalBoolean
* thread_local_boolean_
;
404 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean
);
409 void TraceBufferChunk::Reset(uint32 new_seq
) {
410 for (size_t i
= 0; i
< next_free_
; ++i
)
416 TraceEvent
* TraceBufferChunk::AddTraceEvent(size_t* event_index
) {
418 *event_index
= next_free_
++;
419 return &chunk_
[*event_index
];
422 scoped_ptr
<TraceBufferChunk
> TraceBufferChunk::Clone() const {
423 scoped_ptr
<TraceBufferChunk
> cloned_chunk(new TraceBufferChunk(seq_
));
424 cloned_chunk
->next_free_
= next_free_
;
425 for (size_t i
= 0; i
< next_free_
; ++i
)
426 cloned_chunk
->chunk_
[i
].CopyFrom(chunk_
[i
]);
427 return cloned_chunk
.Pass();
430 // A helper class that allows the lock to be acquired in the middle of the scope
431 // and unlocks at the end of scope if locked.
432 class TraceLog::OptionalAutoLock
{
434 explicit OptionalAutoLock(Lock
* lock
) : lock_(lock
), locked_(false) {}
436 ~OptionalAutoLock() {
441 void EnsureAcquired() {
451 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock
);
454 // Use this function instead of TraceEventHandle constructor to keep the
455 // overhead of ScopedTracer (trace_event.h) constructor minimum.
456 void MakeHandle(uint32 chunk_seq
, size_t chunk_index
, size_t event_index
,
457 TraceEventHandle
* handle
) {
459 DCHECK(chunk_index
< (1u << 16));
460 DCHECK(event_index
< (1u << 16));
461 handle
->chunk_seq
= chunk_seq
;
462 handle
->chunk_index
= static_cast<uint16
>(chunk_index
);
463 handle
->event_index
= static_cast<uint16
>(event_index
);
466 ////////////////////////////////////////////////////////////////////////////////
470 ////////////////////////////////////////////////////////////////////////////////
474 size_t GetAllocLength(const char* str
) { return str
? strlen(str
) + 1 : 0; }
476 // Copies |*member| into |*buffer|, sets |*member| to point to this new
477 // location, and then advances |*buffer| by the amount written.
478 void CopyTraceEventParameter(char** buffer
,
482 size_t written
= strlcpy(*buffer
, *member
, end
- *buffer
) + 1;
483 DCHECK_LE(static_cast<int>(written
), end
- *buffer
);
491 TraceEvent::TraceEvent()
492 : duration_(TimeDelta::FromInternalValue(-1)),
494 category_group_enabled_(NULL
),
497 phase_(TRACE_EVENT_PHASE_BEGIN
),
499 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
500 arg_names_
[i
] = NULL
;
501 memset(arg_values_
, 0, sizeof(arg_values_
));
504 TraceEvent::~TraceEvent() {
507 void TraceEvent::CopyFrom(const TraceEvent
& other
) {
508 timestamp_
= other
.timestamp_
;
509 thread_timestamp_
= other
.thread_timestamp_
;
510 duration_
= other
.duration_
;
512 category_group_enabled_
= other
.category_group_enabled_
;
514 thread_id_
= other
.thread_id_
;
515 phase_
= other
.phase_
;
516 flags_
= other
.flags_
;
517 parameter_copy_storage_
= other
.parameter_copy_storage_
;
519 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
) {
520 arg_names_
[i
] = other
.arg_names_
[i
];
521 arg_types_
[i
] = other
.arg_types_
[i
];
522 arg_values_
[i
] = other
.arg_values_
[i
];
523 convertable_values_
[i
] = other
.convertable_values_
[i
];
527 void TraceEvent::Initialize(
530 TimeTicks thread_timestamp
,
532 const unsigned char* category_group_enabled
,
534 unsigned long long id
,
536 const char** arg_names
,
537 const unsigned char* arg_types
,
538 const unsigned long long* arg_values
,
539 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
540 unsigned char flags
) {
541 timestamp_
= timestamp
;
542 thread_timestamp_
= thread_timestamp
;
543 duration_
= TimeDelta::FromInternalValue(-1);
545 category_group_enabled_
= category_group_enabled
;
547 thread_id_
= thread_id
;
551 // Clamp num_args since it may have been set by a third_party library.
552 num_args
= (num_args
> kTraceMaxNumArgs
) ? kTraceMaxNumArgs
: num_args
;
554 for (; i
< num_args
; ++i
) {
555 arg_names_
[i
] = arg_names
[i
];
556 arg_types_
[i
] = arg_types
[i
];
558 if (arg_types
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
559 convertable_values_
[i
] = convertable_values
[i
];
561 arg_values_
[i
].as_uint
= arg_values
[i
];
563 for (; i
< kTraceMaxNumArgs
; ++i
) {
564 arg_names_
[i
] = NULL
;
565 arg_values_
[i
].as_uint
= 0u;
566 convertable_values_
[i
] = NULL
;
567 arg_types_
[i
] = TRACE_VALUE_TYPE_UINT
;
570 bool copy
= !!(flags
& TRACE_EVENT_FLAG_COPY
);
571 size_t alloc_size
= 0;
573 alloc_size
+= GetAllocLength(name
);
574 for (i
= 0; i
< num_args
; ++i
) {
575 alloc_size
+= GetAllocLength(arg_names_
[i
]);
576 if (arg_types_
[i
] == TRACE_VALUE_TYPE_STRING
)
577 arg_types_
[i
] = TRACE_VALUE_TYPE_COPY_STRING
;
581 bool arg_is_copy
[kTraceMaxNumArgs
];
582 for (i
= 0; i
< num_args
; ++i
) {
583 // No copying of convertable types, we retain ownership.
584 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
587 // We only take a copy of arg_vals if they are of type COPY_STRING.
588 arg_is_copy
[i
] = (arg_types_
[i
] == TRACE_VALUE_TYPE_COPY_STRING
);
590 alloc_size
+= GetAllocLength(arg_values_
[i
].as_string
);
594 parameter_copy_storage_
= new RefCountedString
;
595 parameter_copy_storage_
->data().resize(alloc_size
);
596 char* ptr
= string_as_array(¶meter_copy_storage_
->data());
597 const char* end
= ptr
+ alloc_size
;
599 CopyTraceEventParameter(&ptr
, &name_
, end
);
600 for (i
= 0; i
< num_args
; ++i
) {
601 CopyTraceEventParameter(&ptr
, &arg_names_
[i
], end
);
604 for (i
= 0; i
< num_args
; ++i
) {
605 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
608 CopyTraceEventParameter(&ptr
, &arg_values_
[i
].as_string
, end
);
610 DCHECK_EQ(end
, ptr
) << "Overrun by " << ptr
- end
;
614 void TraceEvent::Reset() {
615 // Only reset fields that won't be initialized in Initialize(), or that may
616 // hold references to other objects.
617 duration_
= TimeDelta::FromInternalValue(-1);
618 parameter_copy_storage_
= NULL
;
619 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
620 convertable_values_
[i
] = NULL
;
623 void TraceEvent::UpdateDuration(const TimeTicks
& now
,
624 const TimeTicks
& thread_now
) {
625 DCHECK_EQ(duration_
.ToInternalValue(), -1);
626 duration_
= now
- timestamp_
;
627 thread_duration_
= thread_now
- thread_timestamp_
;
631 void TraceEvent::AppendValueAsJSON(unsigned char type
,
632 TraceEvent::TraceValue value
,
635 case TRACE_VALUE_TYPE_BOOL
:
636 *out
+= value
.as_bool
? "true" : "false";
638 case TRACE_VALUE_TYPE_UINT
:
639 StringAppendF(out
, "%" PRIu64
, static_cast<uint64
>(value
.as_uint
));
641 case TRACE_VALUE_TYPE_INT
:
642 StringAppendF(out
, "%" PRId64
, static_cast<int64
>(value
.as_int
));
644 case TRACE_VALUE_TYPE_DOUBLE
: {
645 // FIXME: base/json/json_writer.cc is using the same code,
646 // should be made into a common method.
648 double val
= value
.as_double
;
650 real
= DoubleToString(val
);
651 // Ensure that the number has a .0 if there's no decimal or 'e'. This
652 // makes sure that when we read the JSON back, it's interpreted as a
653 // real rather than an int.
654 if (real
.find('.') == std::string::npos
&&
655 real
.find('e') == std::string::npos
&&
656 real
.find('E') == std::string::npos
) {
659 // The JSON spec requires that non-integer values in the range (-1,1)
660 // have a zero before the decimal point - ".52" is not valid, "0.52" is.
661 if (real
[0] == '.') {
663 } else if (real
.length() > 1 && real
[0] == '-' && real
[1] == '.') {
664 // "-.1" bad "-0.1" good
667 } else if (IsNaN(val
)){
668 // The JSON spec doesn't allow NaN and Infinity (since these are
669 // objects in EcmaScript). Use strings instead.
671 } else if (val
< 0) {
672 real
= "\"-Infinity\"";
674 real
= "\"Infinity\"";
676 StringAppendF(out
, "%s", real
.c_str());
679 case TRACE_VALUE_TYPE_POINTER
:
680 // JSON only supports double and int numbers.
681 // So as not to lose bits from a 64-bit pointer, output as a hex string.
682 StringAppendF(out
, "\"0x%" PRIx64
"\"", static_cast<uint64
>(
683 reinterpret_cast<intptr_t>(
686 case TRACE_VALUE_TYPE_STRING
:
687 case TRACE_VALUE_TYPE_COPY_STRING
:
688 EscapeJSONString(value
.as_string
? value
.as_string
: "NULL", true, out
);
691 NOTREACHED() << "Don't know how to print this value";
696 void TraceEvent::AppendAsJSON(std::string
* out
) const {
697 int64 time_int64
= timestamp_
.ToInternalValue();
698 int process_id
= TraceLog::GetInstance()->process_id();
699 // Category group checked at category creation time.
700 DCHECK(!strchr(name_
, '"'));
702 "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
","
703 "\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":{",
708 TraceLog::GetCategoryGroupName(category_group_enabled_
),
711 // Output argument names and values, stop at first NULL argument name.
712 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
716 *out
+= arg_names_
[i
];
719 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
720 convertable_values_
[i
]->AppendAsTraceFormat(out
);
722 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], out
);
726 if (phase_
== TRACE_EVENT_PHASE_COMPLETE
) {
727 int64 duration
= duration_
.ToInternalValue();
729 StringAppendF(out
, ",\"dur\":%" PRId64
, duration
);
730 if (!thread_timestamp_
.is_null()) {
731 int64 thread_duration
= thread_duration_
.ToInternalValue();
732 if (thread_duration
!= -1)
733 StringAppendF(out
, ",\"tdur\":%" PRId64
, thread_duration
);
737 // Output tts if thread_timestamp is valid.
738 if (!thread_timestamp_
.is_null()) {
739 int64 thread_time_int64
= thread_timestamp_
.ToInternalValue();
740 StringAppendF(out
, ",\"tts\":%" PRId64
, thread_time_int64
);
743 // If id_ is set, print it out as a hex string so we don't loose any
744 // bits (it might be a 64-bit pointer).
745 if (flags_
& TRACE_EVENT_FLAG_HAS_ID
)
746 StringAppendF(out
, ",\"id\":\"0x%" PRIx64
"\"", static_cast<uint64
>(id_
));
748 // Instant events also output their scope.
749 if (phase_
== TRACE_EVENT_PHASE_INSTANT
) {
751 switch (flags_
& TRACE_EVENT_FLAG_SCOPE_MASK
) {
752 case TRACE_EVENT_SCOPE_GLOBAL
:
753 scope
= TRACE_EVENT_SCOPE_NAME_GLOBAL
;
756 case TRACE_EVENT_SCOPE_PROCESS
:
757 scope
= TRACE_EVENT_SCOPE_NAME_PROCESS
;
760 case TRACE_EVENT_SCOPE_THREAD
:
761 scope
= TRACE_EVENT_SCOPE_NAME_THREAD
;
764 StringAppendF(out
, ",\"s\":\"%c\"", scope
);
770 void TraceEvent::AppendPrettyPrinted(std::ostringstream
* out
) const {
771 *out
<< name_
<< "[";
772 *out
<< TraceLog::GetCategoryGroupName(category_group_enabled_
);
776 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
779 *out
<< arg_names_
[i
] << ":";
780 std::string value_as_text
;
782 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
783 convertable_values_
[i
]->AppendAsTraceFormat(&value_as_text
);
785 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], &value_as_text
);
787 *out
<< value_as_text
;
793 ////////////////////////////////////////////////////////////////////////////////
797 ////////////////////////////////////////////////////////////////////////////////
799 TraceResultBuffer::OutputCallback
800 TraceResultBuffer::SimpleOutput::GetCallback() {
801 return Bind(&SimpleOutput::Append
, Unretained(this));
804 void TraceResultBuffer::SimpleOutput::Append(
805 const std::string
& json_trace_output
) {
806 json_output
+= json_trace_output
;
809 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
812 TraceResultBuffer::~TraceResultBuffer() {
815 void TraceResultBuffer::SetOutputCallback(
816 const OutputCallback
& json_chunk_callback
) {
817 output_callback_
= json_chunk_callback
;
820 void TraceResultBuffer::Start() {
821 append_comma_
= false;
822 output_callback_
.Run("[");
825 void TraceResultBuffer::AddFragment(const std::string
& trace_fragment
) {
827 output_callback_
.Run(",");
828 append_comma_
= true;
829 output_callback_
.Run(trace_fragment
);
832 void TraceResultBuffer::Finish() {
833 output_callback_
.Run("]");
836 ////////////////////////////////////////////////////////////////////////////////
838 // TraceSamplingThread
840 ////////////////////////////////////////////////////////////////////////////////
841 class TraceBucketData
;
842 typedef base::Callback
<void(TraceBucketData
*)> TraceSampleCallback
;
844 class TraceBucketData
{
846 TraceBucketData(base::subtle::AtomicWord
* bucket
,
848 TraceSampleCallback callback
);
851 TRACE_EVENT_API_ATOMIC_WORD
* bucket
;
852 const char* bucket_name
;
853 TraceSampleCallback callback
;
856 // This object must be created on the IO thread.
857 class TraceSamplingThread
: public PlatformThread::Delegate
{
859 TraceSamplingThread();
860 ~TraceSamplingThread() override
;
862 // Implementation of PlatformThread::Delegate:
863 void ThreadMain() override
;
865 static void DefaultSamplingCallback(TraceBucketData
* bucekt_data
);
868 void WaitSamplingEventForTesting();
871 friend class TraceLog
;
874 // Not thread-safe. Once the ThreadMain has been called, this can no longer
876 void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
877 const char* const name
,
878 TraceSampleCallback callback
);
879 // Splits a combined "category\0name" into the two component parts.
880 static void ExtractCategoryAndName(const char* combined
,
881 const char** category
,
883 std::vector
<TraceBucketData
> sample_buckets_
;
884 bool thread_running_
;
885 CancellationFlag cancellation_flag_
;
886 WaitableEvent waitable_event_for_testing_
;
890 TraceSamplingThread::TraceSamplingThread()
891 : thread_running_(false),
892 waitable_event_for_testing_(false, false) {
895 TraceSamplingThread::~TraceSamplingThread() {
898 void TraceSamplingThread::ThreadMain() {
899 PlatformThread::SetName("Sampling Thread");
900 thread_running_
= true;
901 const int kSamplingFrequencyMicroseconds
= 1000;
902 while (!cancellation_flag_
.IsSet()) {
903 PlatformThread::Sleep(
904 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds
));
906 waitable_event_for_testing_
.Signal();
911 void TraceSamplingThread::DefaultSamplingCallback(
912 TraceBucketData
* bucket_data
) {
913 TRACE_EVENT_API_ATOMIC_WORD category_and_name
=
914 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data
->bucket
);
915 if (!category_and_name
)
917 const char* const combined
=
918 reinterpret_cast<const char* const>(category_and_name
);
919 const char* category_group
;
921 ExtractCategoryAndName(combined
, &category_group
, &name
);
922 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE
,
923 TraceLog::GetCategoryGroupEnabled(category_group
),
924 name
, 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
927 void TraceSamplingThread::GetSamples() {
928 for (size_t i
= 0; i
< sample_buckets_
.size(); ++i
) {
929 TraceBucketData
* bucket_data
= &sample_buckets_
[i
];
930 bucket_data
->callback
.Run(bucket_data
);
934 void TraceSamplingThread::RegisterSampleBucket(
935 TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
936 const char* const name
,
937 TraceSampleCallback callback
) {
938 // Access to sample_buckets_ doesn't cause races with the sampling thread
939 // that uses the sample_buckets_, because it is guaranteed that
940 // RegisterSampleBucket is called before the sampling thread is created.
941 DCHECK(!thread_running_
);
942 sample_buckets_
.push_back(TraceBucketData(bucket
, name
, callback
));
946 void TraceSamplingThread::ExtractCategoryAndName(const char* combined
,
947 const char** category
,
949 *category
= combined
;
950 *name
= &combined
[strlen(combined
) + 1];
953 void TraceSamplingThread::Stop() {
954 cancellation_flag_
.Set();
957 void TraceSamplingThread::WaitSamplingEventForTesting() {
958 waitable_event_for_testing_
.Wait();
961 TraceBucketData::TraceBucketData(base::subtle::AtomicWord
* bucket
,
963 TraceSampleCallback callback
)
969 TraceBucketData::~TraceBucketData() {
972 ////////////////////////////////////////////////////////////////////////////////
976 ////////////////////////////////////////////////////////////////////////////////
978 bool TraceOptions::SetFromString(const std::string
& options_string
) {
979 record_mode
= RECORD_UNTIL_FULL
;
980 enable_sampling
= false;
981 enable_systrace
= false;
983 std::vector
<std::string
> split
;
984 std::vector
<std::string
>::iterator iter
;
985 base::SplitString(options_string
, ',', &split
);
986 for (iter
= split
.begin(); iter
!= split
.end(); ++iter
) {
987 if (*iter
== kRecordUntilFull
) {
988 record_mode
= RECORD_UNTIL_FULL
;
989 } else if (*iter
== kRecordContinuously
) {
990 record_mode
= RECORD_CONTINUOUSLY
;
991 } else if (*iter
== kTraceToConsole
) {
992 record_mode
= ECHO_TO_CONSOLE
;
993 } else if (*iter
== kRecordAsMuchAsPossible
) {
994 record_mode
= RECORD_AS_MUCH_AS_POSSIBLE
;
995 } else if (*iter
== kEnableSampling
) {
996 enable_sampling
= true;
997 } else if (*iter
== kEnableSystrace
) {
998 enable_systrace
= true;
1006 std::string
TraceOptions::ToString() const {
1008 switch (record_mode
) {
1009 case RECORD_UNTIL_FULL
:
1010 ret
= kRecordUntilFull
;
1012 case RECORD_CONTINUOUSLY
:
1013 ret
= kRecordContinuously
;
1015 case ECHO_TO_CONSOLE
:
1016 ret
= kTraceToConsole
;
1018 case RECORD_AS_MUCH_AS_POSSIBLE
:
1019 ret
= kRecordAsMuchAsPossible
;
1024 if (enable_sampling
)
1025 ret
= ret
+ "," + kEnableSampling
;
1026 if (enable_systrace
)
1027 ret
= ret
+ "," + kEnableSystrace
;
1031 ////////////////////////////////////////////////////////////////////////////////
1035 ////////////////////////////////////////////////////////////////////////////////
1037 class TraceLog::ThreadLocalEventBuffer
1038 : public MessageLoop::DestructionObserver
{
1040 ThreadLocalEventBuffer(TraceLog
* trace_log
);
1041 ~ThreadLocalEventBuffer() override
;
1043 TraceEvent
* AddTraceEvent(TraceEventHandle
* handle
);
1045 void ReportOverhead(const TimeTicks
& event_timestamp
,
1046 const TimeTicks
& event_thread_timestamp
);
1048 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) {
1049 if (!chunk_
|| handle
.chunk_seq
!= chunk_
->seq() ||
1050 handle
.chunk_index
!= chunk_index_
)
1053 return chunk_
->GetEventAt(handle
.event_index
);
1056 int generation() const { return generation_
; }
1059 // MessageLoop::DestructionObserver
1060 void WillDestroyCurrentMessageLoop() override
;
1062 void FlushWhileLocked();
1064 void CheckThisIsCurrentBuffer() const {
1065 DCHECK(trace_log_
->thread_local_event_buffer_
.Get() == this);
1068 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1069 // as long as the thread exists.
1070 TraceLog
* trace_log_
;
1071 scoped_ptr
<TraceBufferChunk
> chunk_
;
1072 size_t chunk_index_
;
1074 TimeDelta overhead_
;
1077 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer
);
1080 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog
* trace_log
)
1081 : trace_log_(trace_log
),
1084 generation_(trace_log
->generation()) {
1085 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
1086 // the following message_loop won't be NULL.
1087 MessageLoop
* message_loop
= MessageLoop::current();
1088 message_loop
->AddDestructionObserver(this);
1090 AutoLock
lock(trace_log
->lock_
);
1091 trace_log
->thread_message_loops_
.insert(message_loop
);
1094 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
1095 CheckThisIsCurrentBuffer();
1096 MessageLoop::current()->RemoveDestructionObserver(this);
1098 // Zero event_count_ happens in either of the following cases:
1099 // - no event generated for the thread;
1100 // - the thread has no message loop;
1101 // - trace_event_overhead is disabled.
1103 InitializeMetadataEvent(AddTraceEvent(NULL
),
1104 static_cast<int>(base::PlatformThread::CurrentId()),
1105 "overhead", "average_overhead",
1106 overhead_
.InMillisecondsF() / event_count_
);
1110 AutoLock
lock(trace_log_
->lock_
);
1112 trace_log_
->thread_message_loops_
.erase(MessageLoop::current());
1114 trace_log_
->thread_local_event_buffer_
.Set(NULL
);
1117 TraceEvent
* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
1118 TraceEventHandle
* handle
) {
1119 CheckThisIsCurrentBuffer();
1121 if (chunk_
&& chunk_
->IsFull()) {
1122 AutoLock
lock(trace_log_
->lock_
);
1127 AutoLock
lock(trace_log_
->lock_
);
1128 chunk_
= trace_log_
->logged_events_
->GetChunk(&chunk_index_
);
1129 trace_log_
->CheckIfBufferIsFullWhileLocked();
1135 TraceEvent
* trace_event
= chunk_
->AddTraceEvent(&event_index
);
1136 if (trace_event
&& handle
)
1137 MakeHandle(chunk_
->seq(), chunk_index_
, event_index
, handle
);
1142 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1143 const TimeTicks
& event_timestamp
,
1144 const TimeTicks
& event_thread_timestamp
) {
1145 if (!g_category_group_enabled
[g_category_trace_event_overhead
])
1148 CheckThisIsCurrentBuffer();
1151 TimeTicks thread_now
= ThreadNow();
1152 TimeTicks now
= trace_log_
->OffsetNow();
1153 TimeDelta overhead
= now
- event_timestamp
;
1154 if (overhead
.InMicroseconds() >= kOverheadReportThresholdInMicroseconds
) {
1155 TraceEvent
* trace_event
= AddTraceEvent(NULL
);
1157 trace_event
->Initialize(
1158 static_cast<int>(PlatformThread::CurrentId()),
1159 event_timestamp
, event_thread_timestamp
,
1160 TRACE_EVENT_PHASE_COMPLETE
,
1161 &g_category_group_enabled
[g_category_trace_event_overhead
],
1162 "overhead", 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
1163 trace_event
->UpdateDuration(now
, thread_now
);
1166 overhead_
+= overhead
;
1169 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1173 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1177 trace_log_
->lock_
.AssertAcquired();
1178 if (trace_log_
->CheckGeneration(generation_
)) {
1179 // Return the chunk to the buffer only if the generation matches.
1180 trace_log_
->logged_events_
->ReturnChunk(chunk_index_
, chunk_
.Pass());
1182 // Otherwise this method may be called from the destructor, or TraceLog will
1183 // find the generation mismatch and delete this buffer soon.
1186 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {
1189 TraceLogStatus::~TraceLogStatus() {
1193 TraceLog
* TraceLog::GetInstance() {
1194 return Singleton
<TraceLog
, LeakySingletonTraits
<TraceLog
> >::get();
1197 TraceLog::TraceLog()
1199 num_traces_recorded_(0),
1201 dispatching_to_observer_list_(false),
1202 process_sort_index_(0),
1203 process_id_hash_(0),
1206 trace_options_(kInternalRecordUntilFull
),
1207 sampling_thread_handle_(0),
1208 category_filter_(CategoryFilter::kDefaultCategoryFilterString
),
1209 event_callback_category_filter_(
1210 CategoryFilter::kDefaultCategoryFilterString
),
1211 thread_shared_chunk_index_(0),
1213 use_worker_thread_(false) {
1214 // Trace is enabled or disabled on one thread while other threads are
1215 // accessing the enabled flag. We don't care whether edge-case events are
1216 // traced or not, so we allow races on the enabled flag to keep the trace
1218 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1219 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1220 // sizeof(g_category_group_enabled),
1221 // "trace_event category enabled");
1222 for (int i
= 0; i
< MAX_CATEGORY_GROUPS
; ++i
) {
1223 ANNOTATE_BENIGN_RACE(&g_category_group_enabled
[i
],
1224 "trace_event category enabled");
1226 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
1229 SetProcessID(static_cast<int>(GetCurrentProcId()));
1231 // NaCl also shouldn't access the command line.
1232 if (CommandLine::InitializedForCurrentProcess() &&
1233 CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole
)) {
1234 std::string filter
= CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1235 switches::kTraceToConsole
);
1236 if (filter
.empty()) {
1237 filter
= kEchoToConsoleCategoryFilter
;
1240 filter
.append(kEchoToConsoleCategoryFilter
);
1243 LOG(ERROR
) << "Start " << switches::kTraceToConsole
1244 << " with CategoryFilter '" << filter
<< "'.";
1245 SetEnabled(CategoryFilter(filter
),
1247 TraceOptions(ECHO_TO_CONSOLE
));
1251 logged_events_
.reset(CreateTraceBuffer());
1254 TraceLog::~TraceLog() {
1257 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1258 const char* category_group
) {
1259 TraceLog
* tracelog
= GetInstance();
1261 DCHECK(!g_category_group_enabled
[g_category_already_shutdown
]);
1262 return &g_category_group_enabled
[g_category_already_shutdown
];
1264 return tracelog
->GetCategoryGroupEnabledInternal(category_group
);
1267 const char* TraceLog::GetCategoryGroupName(
1268 const unsigned char* category_group_enabled
) {
1269 // Calculate the index of the category group by finding
1270 // category_group_enabled in g_category_group_enabled array.
1271 uintptr_t category_begin
=
1272 reinterpret_cast<uintptr_t>(g_category_group_enabled
);
1273 uintptr_t category_ptr
= reinterpret_cast<uintptr_t>(category_group_enabled
);
1274 DCHECK(category_ptr
>= category_begin
&&
1275 category_ptr
< reinterpret_cast<uintptr_t>(
1276 g_category_group_enabled
+ MAX_CATEGORY_GROUPS
)) <<
1277 "out of bounds category pointer";
1278 uintptr_t category_index
=
1279 (category_ptr
- category_begin
) / sizeof(g_category_group_enabled
[0]);
1280 return g_category_groups
[category_index
];
1283 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index
) {
1284 unsigned char enabled_flag
= 0;
1285 const char* category_group
= g_category_groups
[category_index
];
1286 if (mode_
== RECORDING_MODE
&&
1287 category_filter_
.IsCategoryGroupEnabled(category_group
))
1288 enabled_flag
|= ENABLED_FOR_RECORDING
;
1289 else if (mode_
== MONITORING_MODE
&&
1290 category_filter_
.IsCategoryGroupEnabled(category_group
))
1291 enabled_flag
|= ENABLED_FOR_MONITORING
;
1292 if (event_callback_
&&
1293 event_callback_category_filter_
.IsCategoryGroupEnabled(category_group
))
1294 enabled_flag
|= ENABLED_FOR_EVENT_CALLBACK
;
1295 g_category_group_enabled
[category_index
] = enabled_flag
;
1298 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1299 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1300 for (size_t i
= 0; i
< category_index
; i
++)
1301 UpdateCategoryGroupEnabledFlag(i
);
1304 void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() {
1305 ResetTraceEventSyntheticDelays();
1306 const CategoryFilter::StringList
& delays
=
1307 category_filter_
.GetSyntheticDelayValues();
1308 CategoryFilter::StringList::const_iterator ci
;
1309 for (ci
= delays
.begin(); ci
!= delays
.end(); ++ci
) {
1310 StringTokenizer
tokens(*ci
, ";");
1311 if (!tokens
.GetNext())
1313 TraceEventSyntheticDelay
* delay
=
1314 TraceEventSyntheticDelay::Lookup(tokens
.token());
1315 while (tokens
.GetNext()) {
1316 std::string token
= tokens
.token();
1318 double target_duration
= strtod(token
.c_str(), &duration_end
);
1319 if (duration_end
!= token
.c_str()) {
1320 delay
->SetTargetDuration(TimeDelta::FromMicroseconds(
1321 static_cast<int64
>(target_duration
* 1e6
)));
1322 } else if (token
== "static") {
1323 delay
->SetMode(TraceEventSyntheticDelay::STATIC
);
1324 } else if (token
== "oneshot") {
1325 delay
->SetMode(TraceEventSyntheticDelay::ONE_SHOT
);
1326 } else if (token
== "alternating") {
1327 delay
->SetMode(TraceEventSyntheticDelay::ALTERNATING
);
1333 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1334 const char* category_group
) {
1335 DCHECK(!strchr(category_group
, '"')) <<
1336 "Category groups may not contain double quote";
1337 // The g_category_groups is append only, avoid using a lock for the fast path.
1338 size_t current_category_index
= base::subtle::Acquire_Load(&g_category_index
);
1340 // Search for pre-existing category group.
1341 for (size_t i
= 0; i
< current_category_index
; ++i
) {
1342 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1343 return &g_category_group_enabled
[i
];
1347 unsigned char* category_group_enabled
= NULL
;
1348 // This is the slow path: the lock is not held in the case above, so more
1349 // than one thread could have reached here trying to add the same category.
1350 // Only hold to lock when actually appending a new category, and
1351 // check the categories groups again.
1352 AutoLock
lock(lock_
);
1353 size_t category_index
= base::subtle::Acquire_Load(&g_category_index
);
1354 for (size_t i
= 0; i
< category_index
; ++i
) {
1355 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1356 return &g_category_group_enabled
[i
];
1360 // Create a new category group.
1361 DCHECK(category_index
< MAX_CATEGORY_GROUPS
) <<
1362 "must increase MAX_CATEGORY_GROUPS";
1363 if (category_index
< MAX_CATEGORY_GROUPS
) {
1364 // Don't hold on to the category_group pointer, so that we can create
1365 // category groups with strings not known at compile time (this is
1366 // required by SetWatchEvent).
1367 const char* new_group
= strdup(category_group
);
1368 ANNOTATE_LEAKING_OBJECT_PTR(new_group
);
1369 g_category_groups
[category_index
] = new_group
;
1370 DCHECK(!g_category_group_enabled
[category_index
]);
1371 // Note that if both included and excluded patterns in the
1372 // CategoryFilter are empty, we exclude nothing,
1373 // thereby enabling this category group.
1374 UpdateCategoryGroupEnabledFlag(category_index
);
1375 category_group_enabled
= &g_category_group_enabled
[category_index
];
1376 // Update the max index now.
1377 base::subtle::Release_Store(&g_category_index
, category_index
+ 1);
1379 category_group_enabled
=
1380 &g_category_group_enabled
[g_category_categories_exhausted
];
1382 return category_group_enabled
;
1385 void TraceLog::GetKnownCategoryGroups(
1386 std::vector
<std::string
>* category_groups
) {
1387 AutoLock
lock(lock_
);
1388 category_groups
->push_back(
1389 g_category_groups
[g_category_trace_event_overhead
]);
1390 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1391 for (size_t i
= g_num_builtin_categories
; i
< category_index
; i
++)
1392 category_groups
->push_back(g_category_groups
[i
]);
1395 void TraceLog::SetEnabled(const CategoryFilter
& category_filter
,
1397 const TraceOptions
& options
) {
1398 std::vector
<EnabledStateObserver
*> observer_list
;
1400 AutoLock
lock(lock_
);
1402 // Can't enable tracing when Flush() is in progress.
1403 DCHECK(!flush_message_loop_proxy_
.get());
1405 InternalTraceOptions new_options
=
1406 GetInternalOptionsFromTraceOptions(options
);
1408 InternalTraceOptions old_options
= trace_options();
1411 if (new_options
!= old_options
) {
1412 DLOG(ERROR
) << "Attempting to re-enable tracing with a different "
1413 << "set of options.";
1416 if (mode
!= mode_
) {
1417 DLOG(ERROR
) << "Attempting to re-enable tracing with a different mode.";
1420 category_filter_
.Merge(category_filter
);
1421 UpdateCategoryGroupEnabledFlags();
1425 if (dispatching_to_observer_list_
) {
1427 "Cannot manipulate TraceLog::Enabled state from an observer.";
1433 if (new_options
!= old_options
) {
1434 subtle::NoBarrier_Store(&trace_options_
, new_options
);
1435 UseNextTraceBuffer();
1438 num_traces_recorded_
++;
1440 category_filter_
= CategoryFilter(category_filter
);
1441 UpdateCategoryGroupEnabledFlags();
1442 UpdateSyntheticDelaysFromCategoryFilter();
1444 if (new_options
& kInternalEnableSampling
) {
1445 sampling_thread_
.reset(new TraceSamplingThread
);
1446 sampling_thread_
->RegisterSampleBucket(
1449 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1450 sampling_thread_
->RegisterSampleBucket(
1453 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1454 sampling_thread_
->RegisterSampleBucket(
1457 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1458 if (!PlatformThread::Create(
1459 0, sampling_thread_
.get(), &sampling_thread_handle_
)) {
1460 DCHECK(false) << "failed to create thread";
1464 dispatching_to_observer_list_
= true;
1465 observer_list
= enabled_state_observer_list_
;
1467 // Notify observers outside the lock in case they trigger trace events.
1468 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1469 observer_list
[i
]->OnTraceLogEnabled();
1472 AutoLock
lock(lock_
);
1473 dispatching_to_observer_list_
= false;
1477 TraceLog::InternalTraceOptions
TraceLog::GetInternalOptionsFromTraceOptions(
1478 const TraceOptions
& options
) {
1479 InternalTraceOptions ret
=
1480 options
.enable_sampling
? kInternalEnableSampling
: kInternalNone
;
1481 switch (options
.record_mode
) {
1482 case RECORD_UNTIL_FULL
:
1483 return ret
| kInternalRecordUntilFull
;
1484 case RECORD_CONTINUOUSLY
:
1485 return ret
| kInternalRecordContinuously
;
1486 case ECHO_TO_CONSOLE
:
1487 return ret
| kInternalEchoToConsole
;
1488 case RECORD_AS_MUCH_AS_POSSIBLE
:
1489 return ret
| kInternalRecordAsMuchAsPossible
;
1492 return kInternalNone
;
1495 CategoryFilter
TraceLog::GetCurrentCategoryFilter() {
1496 AutoLock
lock(lock_
);
1497 return category_filter_
;
1500 TraceOptions
TraceLog::GetCurrentTraceOptions() const {
1502 InternalTraceOptions option
= trace_options();
1503 ret
.enable_sampling
= (option
& kInternalEnableSampling
) != 0;
1504 if (option
& kInternalRecordUntilFull
)
1505 ret
.record_mode
= RECORD_UNTIL_FULL
;
1506 else if (option
& kInternalRecordContinuously
)
1507 ret
.record_mode
= RECORD_CONTINUOUSLY
;
1508 else if (option
& kInternalEchoToConsole
)
1509 ret
.record_mode
= ECHO_TO_CONSOLE
;
1510 else if (option
& kInternalRecordAsMuchAsPossible
)
1511 ret
.record_mode
= RECORD_AS_MUCH_AS_POSSIBLE
;
1517 void TraceLog::SetDisabled() {
1518 AutoLock
lock(lock_
);
1519 SetDisabledWhileLocked();
1522 void TraceLog::SetDisabledWhileLocked() {
1523 lock_
.AssertAcquired();
1528 if (dispatching_to_observer_list_
) {
1530 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1536 if (sampling_thread_
.get()) {
1537 // Stop the sampling thread.
1538 sampling_thread_
->Stop();
1540 PlatformThread::Join(sampling_thread_handle_
);
1542 sampling_thread_handle_
= PlatformThreadHandle();
1543 sampling_thread_
.reset();
1546 category_filter_
.Clear();
1547 subtle::NoBarrier_Store(&watch_category_
, 0);
1548 watch_event_name_
= "";
1549 UpdateCategoryGroupEnabledFlags();
1550 AddMetadataEventsWhileLocked();
1552 dispatching_to_observer_list_
= true;
1553 std::vector
<EnabledStateObserver
*> observer_list
=
1554 enabled_state_observer_list_
;
1557 // Dispatch to observers outside the lock in case the observer triggers a
1559 AutoUnlock
unlock(lock_
);
1560 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1561 observer_list
[i
]->OnTraceLogDisabled();
1563 dispatching_to_observer_list_
= false;
1566 int TraceLog::GetNumTracesRecorded() {
1567 AutoLock
lock(lock_
);
1570 return num_traces_recorded_
;
1573 void TraceLog::AddEnabledStateObserver(EnabledStateObserver
* listener
) {
1574 enabled_state_observer_list_
.push_back(listener
);
1577 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver
* listener
) {
1578 std::vector
<EnabledStateObserver
*>::iterator it
=
1579 std::find(enabled_state_observer_list_
.begin(),
1580 enabled_state_observer_list_
.end(),
1582 if (it
!= enabled_state_observer_list_
.end())
1583 enabled_state_observer_list_
.erase(it
);
1586 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver
* listener
) const {
1587 std::vector
<EnabledStateObserver
*>::const_iterator it
=
1588 std::find(enabled_state_observer_list_
.begin(),
1589 enabled_state_observer_list_
.end(),
1591 return it
!= enabled_state_observer_list_
.end();
1594 TraceLogStatus
TraceLog::GetStatus() const {
1595 AutoLock
lock(lock_
);
1596 TraceLogStatus result
;
1597 result
.event_capacity
= logged_events_
->Capacity();
1598 result
.event_count
= logged_events_
->Size();
1602 bool TraceLog::BufferIsFull() const {
1603 AutoLock
lock(lock_
);
1604 return logged_events_
->IsFull();
1607 TraceBuffer
* TraceLog::CreateTraceBuffer() {
1608 InternalTraceOptions options
= trace_options();
1609 if (options
& kInternalRecordContinuously
)
1610 return new TraceBufferRingBuffer(kTraceEventRingBufferChunks
);
1611 else if ((options
& kInternalEnableSampling
) && mode_
== MONITORING_MODE
)
1612 return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks
);
1613 else if (options
& kInternalEchoToConsole
)
1614 return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks
);
1615 else if (options
& kInternalRecordAsMuchAsPossible
)
1616 return CreateTraceBufferVectorOfSize(kTraceEventVectorBigBufferChunks
);
1617 return CreateTraceBufferVectorOfSize(kTraceEventVectorBufferChunks
);
1620 TraceBuffer
* TraceLog::CreateTraceBufferVectorOfSize(size_t max_chunks
) {
1621 return new TraceBufferVector(max_chunks
);
1624 TraceEvent
* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1625 TraceEventHandle
* handle
, bool check_buffer_is_full
) {
1626 lock_
.AssertAcquired();
1628 if (thread_shared_chunk_
&& thread_shared_chunk_
->IsFull()) {
1629 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1630 thread_shared_chunk_
.Pass());
1633 if (!thread_shared_chunk_
) {
1634 thread_shared_chunk_
= logged_events_
->GetChunk(
1635 &thread_shared_chunk_index_
);
1636 if (check_buffer_is_full
)
1637 CheckIfBufferIsFullWhileLocked();
1639 if (!thread_shared_chunk_
)
1643 TraceEvent
* trace_event
= thread_shared_chunk_
->AddTraceEvent(&event_index
);
1644 if (trace_event
&& handle
) {
1645 MakeHandle(thread_shared_chunk_
->seq(), thread_shared_chunk_index_
,
1646 event_index
, handle
);
1651 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1652 lock_
.AssertAcquired();
1653 if (logged_events_
->IsFull()) {
1654 if (buffer_limit_reached_timestamp_
.is_null()) {
1655 buffer_limit_reached_timestamp_
= OffsetNow();
1657 SetDisabledWhileLocked();
1661 void TraceLog::SetEventCallbackEnabled(const CategoryFilter
& category_filter
,
1663 AutoLock
lock(lock_
);
1664 subtle::NoBarrier_Store(&event_callback_
,
1665 reinterpret_cast<subtle::AtomicWord
>(cb
));
1666 event_callback_category_filter_
= category_filter
;
1667 UpdateCategoryGroupEnabledFlags();
1670 void TraceLog::SetEventCallbackDisabled() {
1671 AutoLock
lock(lock_
);
1672 subtle::NoBarrier_Store(&event_callback_
, 0);
1673 UpdateCategoryGroupEnabledFlags();
1676 // Flush() works as the following:
1677 // 1. Flush() is called in threadA whose message loop is saved in
1678 // flush_message_loop_proxy_;
1679 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1680 // loop to flush the thread local buffers; otherwise finish the flush;
1681 // 3. FlushCurrentThread() deletes the thread local event buffer:
1682 // - The last batch of events of the thread are flushed into the main buffer;
1683 // - The message loop will be removed from thread_message_loops_;
1684 // If this is the last message loop, finish the flush;
1685 // 4. If any thread hasn't finish its flush in time, finish the flush.
1686 void TraceLog::Flush(const TraceLog::OutputCallback
& cb
,
1687 bool use_worker_thread
) {
1688 use_worker_thread_
= use_worker_thread
;
1690 // Can't flush when tracing is enabled because otherwise PostTask would
1691 // - generate more trace events;
1692 // - deschedule the calling thread on some platforms causing inaccurate
1693 // timing of the trace events.
1694 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
1696 cb
.Run(empty_result
, false);
1697 LOG(WARNING
) << "Ignored TraceLog::Flush called when tracing is enabled";
1701 int generation
= this->generation();
1702 // Copy of thread_message_loops_ to be used without locking.
1703 std::vector
<scoped_refptr
<SingleThreadTaskRunner
> >
1704 thread_message_loop_task_runners
;
1706 AutoLock
lock(lock_
);
1707 DCHECK(!flush_message_loop_proxy_
.get());
1708 flush_message_loop_proxy_
= MessageLoopProxy::current();
1709 DCHECK(!thread_message_loops_
.size() || flush_message_loop_proxy_
.get());
1710 flush_output_callback_
= cb
;
1712 if (thread_shared_chunk_
) {
1713 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1714 thread_shared_chunk_
.Pass());
1717 if (thread_message_loops_
.size()) {
1718 for (hash_set
<MessageLoop
*>::const_iterator it
=
1719 thread_message_loops_
.begin();
1720 it
!= thread_message_loops_
.end(); ++it
) {
1721 thread_message_loop_task_runners
.push_back((*it
)->task_runner());
1726 if (thread_message_loop_task_runners
.size()) {
1727 for (size_t i
= 0; i
< thread_message_loop_task_runners
.size(); ++i
) {
1728 thread_message_loop_task_runners
[i
]->PostTask(
1730 Bind(&TraceLog::FlushCurrentThread
, Unretained(this), generation
));
1732 flush_message_loop_proxy_
->PostDelayedTask(
1734 Bind(&TraceLog::OnFlushTimeout
, Unretained(this), generation
),
1735 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs
));
1739 FinishFlush(generation
);
1742 // Usually it runs on a different thread.
1743 void TraceLog::ConvertTraceEventsToTraceFormat(
1744 scoped_ptr
<TraceBuffer
> logged_events
,
1745 const TraceLog::OutputCallback
& flush_output_callback
) {
1747 if (flush_output_callback
.is_null())
1750 // The callback need to be called at least once even if there is no events
1751 // to let the caller know the completion of flush.
1752 bool has_more_events
= true;
1754 scoped_refptr
<RefCountedString
> json_events_str_ptr
=
1755 new RefCountedString();
1757 while (json_events_str_ptr
->size() < kTraceEventBufferSizeInBytes
) {
1758 const TraceBufferChunk
* chunk
= logged_events
->NextChunk();
1759 has_more_events
= chunk
!= NULL
;
1762 for (size_t j
= 0; j
< chunk
->size(); ++j
) {
1763 if (json_events_str_ptr
->size())
1764 json_events_str_ptr
->data().append(",\n");
1765 chunk
->GetEventAt(j
)->AppendAsJSON(&(json_events_str_ptr
->data()));
1768 flush_output_callback
.Run(json_events_str_ptr
, has_more_events
);
1769 } while (has_more_events
);
1772 void TraceLog::FinishFlush(int generation
) {
1773 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1774 OutputCallback flush_output_callback
;
1776 if (!CheckGeneration(generation
))
1780 AutoLock
lock(lock_
);
1782 previous_logged_events
.swap(logged_events_
);
1783 UseNextTraceBuffer();
1784 thread_message_loops_
.clear();
1786 flush_message_loop_proxy_
= NULL
;
1787 flush_output_callback
= flush_output_callback_
;
1788 flush_output_callback_
.Reset();
1791 if (use_worker_thread_
&&
1792 WorkerPool::PostTask(
1794 Bind(&TraceLog::ConvertTraceEventsToTraceFormat
,
1795 Passed(&previous_logged_events
),
1796 flush_output_callback
),
1801 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1802 flush_output_callback
);
1805 // Run in each thread holding a local event buffer.
1806 void TraceLog::FlushCurrentThread(int generation
) {
1808 AutoLock
lock(lock_
);
1809 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
.get()) {
1810 // This is late. The corresponding flush has finished.
1815 // This will flush the thread local buffer.
1816 delete thread_local_event_buffer_
.Get();
1818 AutoLock
lock(lock_
);
1819 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
.get() ||
1820 thread_message_loops_
.size())
1823 flush_message_loop_proxy_
->PostTask(
1825 Bind(&TraceLog::FinishFlush
, Unretained(this), generation
));
1828 void TraceLog::OnFlushTimeout(int generation
) {
1830 AutoLock
lock(lock_
);
1831 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
.get()) {
1832 // Flush has finished before timeout.
1837 "The following threads haven't finished flush in time. "
1838 "If this happens stably for some thread, please call "
1839 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1840 "the thread to avoid its trace events from being lost.";
1841 for (hash_set
<MessageLoop
*>::const_iterator it
=
1842 thread_message_loops_
.begin();
1843 it
!= thread_message_loops_
.end(); ++it
) {
1844 LOG(WARNING
) << "Thread: " << (*it
)->thread_name();
1847 FinishFlush(generation
);
1850 void TraceLog::FlushButLeaveBufferIntact(
1851 const TraceLog::OutputCallback
& flush_output_callback
) {
1852 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1854 AutoLock
lock(lock_
);
1855 AddMetadataEventsWhileLocked();
1856 if (thread_shared_chunk_
) {
1857 // Return the chunk to the main buffer to flush the sampling data.
1858 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1859 thread_shared_chunk_
.Pass());
1861 previous_logged_events
= logged_events_
->CloneForIteration().Pass();
1864 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1865 flush_output_callback
);
1868 void TraceLog::UseNextTraceBuffer() {
1869 logged_events_
.reset(CreateTraceBuffer());
1870 subtle::NoBarrier_AtomicIncrement(&generation_
, 1);
1871 thread_shared_chunk_
.reset();
1872 thread_shared_chunk_index_
= 0;
1875 TraceEventHandle
TraceLog::AddTraceEvent(
1877 const unsigned char* category_group_enabled
,
1879 unsigned long long id
,
1881 const char** arg_names
,
1882 const unsigned char* arg_types
,
1883 const unsigned long long* arg_values
,
1884 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1885 unsigned char flags
) {
1886 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1887 base::TimeTicks now
= base::TimeTicks::NowFromSystemTraceTime();
1888 return AddTraceEventWithThreadIdAndTimestamp(phase
, category_group_enabled
,
1889 name
, id
, thread_id
, now
,
1890 num_args
, arg_names
,
1891 arg_types
, arg_values
,
1892 convertable_values
, flags
);
1895 TraceEventHandle
TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1897 const unsigned char* category_group_enabled
,
1899 unsigned long long id
,
1901 const TimeTicks
& timestamp
,
1903 const char** arg_names
,
1904 const unsigned char* arg_types
,
1905 const unsigned long long* arg_values
,
1906 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1907 unsigned char flags
) {
1908 TraceEventHandle handle
= { 0, 0, 0 };
1909 if (!*category_group_enabled
)
1912 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1913 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1914 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1915 if (thread_is_in_trace_event_
.Get())
1918 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
1921 DCHECK(!timestamp
.is_null());
1923 if (flags
& TRACE_EVENT_FLAG_MANGLE_ID
)
1924 id
^= process_id_hash_
;
1926 TimeTicks offset_event_timestamp
= OffsetTimestamp(timestamp
);
1927 TimeTicks now
= flags
& TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP
?
1928 OffsetNow() : offset_event_timestamp
;
1929 TimeTicks thread_now
= ThreadNow();
1931 ThreadLocalEventBuffer
* thread_local_event_buffer
= NULL
;
1932 // A ThreadLocalEventBuffer needs the message loop
1933 // - to know when the thread exits;
1934 // - to handle the final flush.
1935 // For a thread without a message loop or the message loop may be blocked, the
1936 // trace events will be added into the main buffer directly.
1937 if (!thread_blocks_message_loop_
.Get() && MessageLoop::current()) {
1938 thread_local_event_buffer
= thread_local_event_buffer_
.Get();
1939 if (thread_local_event_buffer
&&
1940 !CheckGeneration(thread_local_event_buffer
->generation())) {
1941 delete thread_local_event_buffer
;
1942 thread_local_event_buffer
= NULL
;
1944 if (!thread_local_event_buffer
) {
1945 thread_local_event_buffer
= new ThreadLocalEventBuffer(this);
1946 thread_local_event_buffer_
.Set(thread_local_event_buffer
);
1950 // Check and update the current thread name only if the event is for the
1951 // current thread to avoid locks in most cases.
1952 if (thread_id
== static_cast<int>(PlatformThread::CurrentId())) {
1953 const char* new_name
= ThreadIdNameManager::GetInstance()->
1955 // Check if the thread name has been set or changed since the previous
1956 // call (if any), but don't bother if the new name is empty. Note this will
1957 // not detect a thread name change within the same char* buffer address: we
1958 // favor common case performance over corner case correctness.
1959 if (new_name
!= g_current_thread_name
.Get().Get() &&
1960 new_name
&& *new_name
) {
1961 g_current_thread_name
.Get().Set(new_name
);
1963 AutoLock
thread_info_lock(thread_info_lock_
);
1965 hash_map
<int, std::string
>::iterator existing_name
=
1966 thread_names_
.find(thread_id
);
1967 if (existing_name
== thread_names_
.end()) {
1968 // This is a new thread id, and a new name.
1969 thread_names_
[thread_id
] = new_name
;
1971 // This is a thread id that we've seen before, but potentially with a
1973 std::vector
<StringPiece
> existing_names
;
1974 Tokenize(existing_name
->second
, ",", &existing_names
);
1975 bool found
= std::find(existing_names
.begin(),
1976 existing_names
.end(),
1977 new_name
) != existing_names
.end();
1979 if (existing_names
.size())
1980 existing_name
->second
.push_back(',');
1981 existing_name
->second
.append(new_name
);
1987 std::string console_message
;
1988 if (*category_group_enabled
&
1989 (ENABLED_FOR_RECORDING
| ENABLED_FOR_MONITORING
)) {
1990 OptionalAutoLock
lock(&lock_
);
1992 TraceEvent
* trace_event
= NULL
;
1993 if (thread_local_event_buffer
) {
1994 trace_event
= thread_local_event_buffer
->AddTraceEvent(&handle
);
1996 lock
.EnsureAcquired();
1997 trace_event
= AddEventToThreadSharedChunkWhileLocked(&handle
, true);
2001 trace_event
->Initialize(thread_id
, offset_event_timestamp
, thread_now
,
2002 phase
, category_group_enabled
, name
, id
,
2003 num_args
, arg_names
, arg_types
, arg_values
,
2004 convertable_values
, flags
);
2006 #if defined(OS_ANDROID)
2007 trace_event
->SendToATrace();
2011 if (trace_options() & kInternalEchoToConsole
) {
2012 console_message
= EventToConsoleMessage(
2013 phase
== TRACE_EVENT_PHASE_COMPLETE
? TRACE_EVENT_PHASE_BEGIN
: phase
,
2014 timestamp
, trace_event
);
2018 if (console_message
.size())
2019 LOG(ERROR
) << console_message
;
2021 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
2022 &watch_category_
)) == category_group_enabled
) {
2023 bool event_name_matches
;
2024 WatchEventCallback watch_event_callback_copy
;
2026 AutoLock
lock(lock_
);
2027 event_name_matches
= watch_event_name_
== name
;
2028 watch_event_callback_copy
= watch_event_callback_
;
2030 if (event_name_matches
) {
2031 if (!watch_event_callback_copy
.is_null())
2032 watch_event_callback_copy
.Run();
2036 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
2037 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
2038 subtle::NoBarrier_Load(&event_callback_
));
2039 if (event_callback
) {
2040 event_callback(offset_event_timestamp
,
2041 phase
== TRACE_EVENT_PHASE_COMPLETE
?
2042 TRACE_EVENT_PHASE_BEGIN
: phase
,
2043 category_group_enabled
, name
, id
,
2044 num_args
, arg_names
, arg_types
, arg_values
,
2049 if (thread_local_event_buffer
)
2050 thread_local_event_buffer
->ReportOverhead(now
, thread_now
);
2055 // May be called when a COMPELETE event ends and the unfinished event has been
2056 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
2057 std::string
TraceLog::EventToConsoleMessage(unsigned char phase
,
2058 const TimeTicks
& timestamp
,
2059 TraceEvent
* trace_event
) {
2060 AutoLock
thread_info_lock(thread_info_lock_
);
2062 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
2063 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
2064 DCHECK(phase
!= TRACE_EVENT_PHASE_COMPLETE
);
2067 int thread_id
= trace_event
?
2068 trace_event
->thread_id() : PlatformThread::CurrentId();
2069 if (phase
== TRACE_EVENT_PHASE_END
) {
2070 duration
= timestamp
- thread_event_start_times_
[thread_id
].top();
2071 thread_event_start_times_
[thread_id
].pop();
2074 std::string thread_name
= thread_names_
[thread_id
];
2075 if (thread_colors_
.find(thread_name
) == thread_colors_
.end())
2076 thread_colors_
[thread_name
] = (thread_colors_
.size() % 6) + 1;
2078 std::ostringstream log
;
2079 log
<< base::StringPrintf("%s: \x1b[0;3%dm",
2080 thread_name
.c_str(),
2081 thread_colors_
[thread_name
]);
2084 if (thread_event_start_times_
.find(thread_id
) !=
2085 thread_event_start_times_
.end())
2086 depth
= thread_event_start_times_
[thread_id
].size();
2088 for (size_t i
= 0; i
< depth
; ++i
)
2092 trace_event
->AppendPrettyPrinted(&log
);
2093 if (phase
== TRACE_EVENT_PHASE_END
)
2094 log
<< base::StringPrintf(" (%.3f ms)", duration
.InMillisecondsF());
2098 if (phase
== TRACE_EVENT_PHASE_BEGIN
)
2099 thread_event_start_times_
[thread_id
].push(timestamp
);
2104 void TraceLog::AddTraceEventEtw(char phase
,
2107 const char* extra
) {
2109 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
2111 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
2112 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
2115 void TraceLog::AddTraceEventEtw(char phase
,
2118 const std::string
& extra
) {
2120 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
2122 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
2123 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
2126 void TraceLog::UpdateTraceEventDuration(
2127 const unsigned char* category_group_enabled
,
2129 TraceEventHandle handle
) {
2130 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
2131 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
2132 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
2133 if (thread_is_in_trace_event_
.Get())
2136 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
2138 TimeTicks thread_now
= ThreadNow();
2139 TimeTicks now
= OffsetNow();
2141 std::string console_message
;
2142 if (*category_group_enabled
& ENABLED_FOR_RECORDING
) {
2143 OptionalAutoLock
lock(&lock_
);
2145 TraceEvent
* trace_event
= GetEventByHandleInternal(handle
, &lock
);
2147 DCHECK(trace_event
->phase() == TRACE_EVENT_PHASE_COMPLETE
);
2148 trace_event
->UpdateDuration(now
, thread_now
);
2149 #if defined(OS_ANDROID)
2150 trace_event
->SendToATrace();
2154 if (trace_options() & kInternalEchoToConsole
) {
2155 console_message
= EventToConsoleMessage(TRACE_EVENT_PHASE_END
,
2160 if (console_message
.size())
2161 LOG(ERROR
) << console_message
;
2163 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
2164 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
2165 subtle::NoBarrier_Load(&event_callback_
));
2166 if (event_callback
) {
2167 event_callback(now
, TRACE_EVENT_PHASE_END
, category_group_enabled
, name
,
2168 trace_event_internal::kNoEventId
, 0, NULL
, NULL
, NULL
,
2169 TRACE_EVENT_FLAG_NONE
);
2174 void TraceLog::SetWatchEvent(const std::string
& category_name
,
2175 const std::string
& event_name
,
2176 const WatchEventCallback
& callback
) {
2177 const unsigned char* category
= GetCategoryGroupEnabled(
2178 category_name
.c_str());
2179 AutoLock
lock(lock_
);
2180 subtle::NoBarrier_Store(&watch_category_
,
2181 reinterpret_cast<subtle::AtomicWord
>(category
));
2182 watch_event_name_
= event_name
;
2183 watch_event_callback_
= callback
;
2186 void TraceLog::CancelWatchEvent() {
2187 AutoLock
lock(lock_
);
2188 subtle::NoBarrier_Store(&watch_category_
, 0);
2189 watch_event_name_
= "";
2190 watch_event_callback_
.Reset();
2193 void TraceLog::AddMetadataEventsWhileLocked() {
2194 lock_
.AssertAcquired();
2196 #if !defined(OS_NACL) // NaCl shouldn't expose the process id.
2197 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2199 "num_cpus", "number",
2200 base::SysInfo::NumberOfProcessors());
2204 int current_thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
2205 if (process_sort_index_
!= 0) {
2206 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2208 "process_sort_index", "sort_index",
2209 process_sort_index_
);
2212 if (process_name_
.size()) {
2213 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2215 "process_name", "name",
2219 if (process_labels_
.size() > 0) {
2220 std::vector
<std::string
> labels
;
2221 for(base::hash_map
<int, std::string
>::iterator it
= process_labels_
.begin();
2222 it
!= process_labels_
.end();
2224 labels
.push_back(it
->second
);
2226 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2228 "process_labels", "labels",
2229 JoinString(labels
, ','));
2232 // Thread sort indices.
2233 for(hash_map
<int, int>::iterator it
= thread_sort_indices_
.begin();
2234 it
!= thread_sort_indices_
.end();
2236 if (it
->second
== 0)
2238 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2240 "thread_sort_index", "sort_index",
2245 AutoLock
thread_info_lock(thread_info_lock_
);
2246 for(hash_map
<int, std::string
>::iterator it
= thread_names_
.begin();
2247 it
!= thread_names_
.end();
2249 if (it
->second
.empty())
2251 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2253 "thread_name", "name",
2257 // If buffer is full, add a metadata record to report this.
2258 if (!buffer_limit_reached_timestamp_
.is_null()) {
2259 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2261 "trace_buffer_overflowed",
2263 buffer_limit_reached_timestamp_
);
2267 void TraceLog::WaitSamplingEventForTesting() {
2268 if (!sampling_thread_
)
2270 sampling_thread_
->WaitSamplingEventForTesting();
2273 void TraceLog::DeleteForTesting() {
2274 DeleteTraceLogForTesting::Delete();
2277 TraceEvent
* TraceLog::GetEventByHandle(TraceEventHandle handle
) {
2278 return GetEventByHandleInternal(handle
, NULL
);
2281 TraceEvent
* TraceLog::GetEventByHandleInternal(TraceEventHandle handle
,
2282 OptionalAutoLock
* lock
) {
2283 if (!handle
.chunk_seq
)
2286 if (thread_local_event_buffer_
.Get()) {
2287 TraceEvent
* trace_event
=
2288 thread_local_event_buffer_
.Get()->GetEventByHandle(handle
);
2293 // The event has been out-of-control of the thread local buffer.
2294 // Try to get the event from the main buffer with a lock.
2296 lock
->EnsureAcquired();
2298 if (thread_shared_chunk_
&&
2299 handle
.chunk_index
== thread_shared_chunk_index_
) {
2300 return handle
.chunk_seq
== thread_shared_chunk_
->seq() ?
2301 thread_shared_chunk_
->GetEventAt(handle
.event_index
) : NULL
;
2304 return logged_events_
->GetEventByHandle(handle
);
2307 void TraceLog::SetProcessID(int process_id
) {
2308 process_id_
= process_id
;
2309 // Create a FNV hash from the process ID for XORing.
2310 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2311 unsigned long long offset_basis
= 14695981039346656037ull;
2312 unsigned long long fnv_prime
= 1099511628211ull;
2313 unsigned long long pid
= static_cast<unsigned long long>(process_id_
);
2314 process_id_hash_
= (offset_basis
^ pid
) * fnv_prime
;
2317 void TraceLog::SetProcessSortIndex(int sort_index
) {
2318 AutoLock
lock(lock_
);
2319 process_sort_index_
= sort_index
;
2322 void TraceLog::SetProcessName(const std::string
& process_name
) {
2323 AutoLock
lock(lock_
);
2324 process_name_
= process_name
;
2327 void TraceLog::UpdateProcessLabel(
2328 int label_id
, const std::string
& current_label
) {
2329 if(!current_label
.length())
2330 return RemoveProcessLabel(label_id
);
2332 AutoLock
lock(lock_
);
2333 process_labels_
[label_id
] = current_label
;
2336 void TraceLog::RemoveProcessLabel(int label_id
) {
2337 AutoLock
lock(lock_
);
2338 base::hash_map
<int, std::string
>::iterator it
= process_labels_
.find(
2340 if (it
== process_labels_
.end())
2343 process_labels_
.erase(it
);
2346 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id
, int sort_index
) {
2347 AutoLock
lock(lock_
);
2348 thread_sort_indices_
[static_cast<int>(thread_id
)] = sort_index
;
2351 void TraceLog::SetTimeOffset(TimeDelta offset
) {
2352 time_offset_
= offset
;
2355 size_t TraceLog::GetObserverCountForTest() const {
2356 return enabled_state_observer_list_
.size();
2359 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2360 thread_blocks_message_loop_
.Set(true);
2361 if (thread_local_event_buffer_
.Get()) {
2362 // This will flush the thread local buffer.
2363 delete thread_local_event_buffer_
.Get();
2367 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2368 const std::string
& str
) {
2369 return str
.empty() ||
2371 str
.at(str
.length() - 1) == ' ';
2374 CategoryFilter::CategoryFilter(const std::string
& filter_string
) {
2375 if (!filter_string
.empty())
2376 Initialize(filter_string
);
2378 Initialize(CategoryFilter::kDefaultCategoryFilterString
);
2381 CategoryFilter::CategoryFilter() {
2382 Initialize(CategoryFilter::kDefaultCategoryFilterString
);
2385 CategoryFilter::CategoryFilter(const CategoryFilter
& cf
)
2386 : included_(cf
.included_
),
2387 disabled_(cf
.disabled_
),
2388 excluded_(cf
.excluded_
),
2389 delays_(cf
.delays_
) {
2392 CategoryFilter::~CategoryFilter() {
2395 CategoryFilter
& CategoryFilter::operator=(const CategoryFilter
& rhs
) {
2399 included_
= rhs
.included_
;
2400 disabled_
= rhs
.disabled_
;
2401 excluded_
= rhs
.excluded_
;
2402 delays_
= rhs
.delays_
;
2406 void CategoryFilter::Initialize(const std::string
& filter_string
) {
2407 // Tokenize list of categories, delimited by ','.
2408 StringTokenizer
tokens(filter_string
, ",");
2409 // Add each token to the appropriate list (included_,excluded_).
2410 while (tokens
.GetNext()) {
2411 std::string category
= tokens
.token();
2412 // Ignore empty categories.
2413 if (category
.empty())
2415 // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
2416 if (category
.find(kSyntheticDelayCategoryFilterPrefix
) == 0 &&
2417 category
.at(category
.size() - 1) == ')') {
2418 category
= category
.substr(
2419 strlen(kSyntheticDelayCategoryFilterPrefix
),
2420 category
.size() - strlen(kSyntheticDelayCategoryFilterPrefix
) - 1);
2421 size_t name_length
= category
.find(';');
2422 if (name_length
!= std::string::npos
&& name_length
> 0 &&
2423 name_length
!= category
.size() - 1) {
2424 delays_
.push_back(category
);
2426 } else if (category
.at(0) == '-') {
2427 // Excluded categories start with '-'.
2428 // Remove '-' from category string.
2429 category
= category
.substr(1);
2430 excluded_
.push_back(category
);
2431 } else if (category
.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
2432 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
2433 disabled_
.push_back(category
);
2435 included_
.push_back(category
);
2440 void CategoryFilter::WriteString(const StringList
& values
,
2442 bool included
) const {
2443 bool prepend_comma
= !out
->empty();
2445 for (StringList::const_iterator ci
= values
.begin();
2446 ci
!= values
.end(); ++ci
) {
2447 if (token_cnt
> 0 || prepend_comma
)
2448 StringAppendF(out
, ",");
2449 StringAppendF(out
, "%s%s", (included
? "" : "-"), ci
->c_str());
2454 void CategoryFilter::WriteString(const StringList
& delays
,
2455 std::string
* out
) const {
2456 bool prepend_comma
= !out
->empty();
2458 for (StringList::const_iterator ci
= delays
.begin();
2459 ci
!= delays
.end(); ++ci
) {
2460 if (token_cnt
> 0 || prepend_comma
)
2461 StringAppendF(out
, ",");
2462 StringAppendF(out
, "%s%s)", kSyntheticDelayCategoryFilterPrefix
,
2468 std::string
CategoryFilter::ToString() const {
2469 std::string filter_string
;
2470 WriteString(included_
, &filter_string
, true);
2471 WriteString(disabled_
, &filter_string
, true);
2472 WriteString(excluded_
, &filter_string
, false);
2473 WriteString(delays_
, &filter_string
);
2474 return filter_string
;
2477 bool CategoryFilter::IsCategoryGroupEnabled(
2478 const char* category_group_name
) const {
2479 // TraceLog should call this method only as part of enabling/disabling
2482 bool had_enabled_by_default
= false;
2483 DCHECK(category_group_name
);
2484 CStringTokenizer
category_group_tokens(
2485 category_group_name
, category_group_name
+ strlen(category_group_name
),
2487 while (category_group_tokens
.GetNext()) {
2488 std::string category_group_token
= category_group_tokens
.token();
2489 // Don't allow empty tokens, nor tokens with leading or trailing space.
2490 DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2491 category_group_token
))
2492 << "Disallowed category string";
2493 if (IsCategoryEnabled(category_group_token
.c_str())) {
2496 if (!MatchPattern(category_group_token
.c_str(),
2497 TRACE_DISABLED_BY_DEFAULT("*")))
2498 had_enabled_by_default
= true;
2500 // Do a second pass to check for explicitly disabled categories
2501 // (those explicitly enabled have priority due to first pass).
2502 category_group_tokens
.Reset();
2503 bool category_group_disabled
= false;
2504 while (category_group_tokens
.GetNext()) {
2505 std::string category_group_token
= category_group_tokens
.token();
2506 for (StringList::const_iterator ci
= excluded_
.begin();
2507 ci
!= excluded_
.end(); ++ci
) {
2508 if (MatchPattern(category_group_token
.c_str(), ci
->c_str())) {
2509 // Current token of category_group_name is present in excluded_list.
2510 // Flag the exclusion and proceed further to check if any of the
2511 // remaining categories of category_group_name is not present in the
2513 category_group_disabled
= true;
2516 // One of the category of category_group_name is not present in
2517 // excluded_ list. So, it has to be included_ list. Enable the
2518 // category_group_name for recording.
2519 category_group_disabled
= false;
2521 // One of the categories present in category_group_name is not present in
2522 // excluded_ list. Implies this category_group_name group can be enabled
2523 // for recording, since one of its groups is enabled for recording.
2524 if (!category_group_disabled
)
2527 // If the category group is not excluded, and there are no included patterns
2528 // we consider this category group enabled, as long as it had categories
2529 // other than disabled-by-default.
2530 return !category_group_disabled
&&
2531 included_
.empty() && had_enabled_by_default
;
2534 bool CategoryFilter::IsCategoryEnabled(const char* category_name
) const {
2535 StringList::const_iterator ci
;
2537 // Check the disabled- filters and the disabled-* wildcard first so that a
2538 // "*" filter does not include the disabled.
2539 for (ci
= disabled_
.begin(); ci
!= disabled_
.end(); ++ci
) {
2540 if (MatchPattern(category_name
, ci
->c_str()))
2544 if (MatchPattern(category_name
, TRACE_DISABLED_BY_DEFAULT("*")))
2547 for (ci
= included_
.begin(); ci
!= included_
.end(); ++ci
) {
2548 if (MatchPattern(category_name
, ci
->c_str()))
2555 bool CategoryFilter::HasIncludedPatterns() const {
2556 return !included_
.empty();
2559 void CategoryFilter::Merge(const CategoryFilter
& nested_filter
) {
2560 // Keep included patterns only if both filters have an included entry.
2561 // Otherwise, one of the filter was specifying "*" and we want to honour the
2563 if (HasIncludedPatterns() && nested_filter
.HasIncludedPatterns()) {
2564 included_
.insert(included_
.end(),
2565 nested_filter
.included_
.begin(),
2566 nested_filter
.included_
.end());
2571 disabled_
.insert(disabled_
.end(),
2572 nested_filter
.disabled_
.begin(),
2573 nested_filter
.disabled_
.end());
2574 excluded_
.insert(excluded_
.end(),
2575 nested_filter
.excluded_
.begin(),
2576 nested_filter
.excluded_
.end());
2577 delays_
.insert(delays_
.end(),
2578 nested_filter
.delays_
.begin(),
2579 nested_filter
.delays_
.end());
2582 void CategoryFilter::Clear() {
2588 const CategoryFilter::StringList
&
2589 CategoryFilter::GetSyntheticDelayValues() const {
2593 } // namespace trace_event
2596 namespace trace_event_internal
{
2598 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2599 const char* category_group
, const char* name
) {
2600 // The single atom works because for now the category_group can only be "gpu".
2601 DCHECK_EQ(strcmp(category_group
, "gpu"), 0);
2602 static TRACE_EVENT_API_ATOMIC_WORD atomic
= 0;
2603 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2604 category_group
, atomic
, category_group_enabled_
);
2606 if (*category_group_enabled_
) {
2608 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2609 TRACE_EVENT_PHASE_COMPLETE
, category_group_enabled_
, name
,
2610 trace_event_internal::kNoEventId
,
2611 static_cast<int>(base::PlatformThread::CurrentId()),
2612 base::TimeTicks::NowFromSystemTraceTime(),
2613 0, NULL
, NULL
, NULL
, NULL
, TRACE_EVENT_FLAG_NONE
);
2617 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2618 if (*category_group_enabled_
) {
2619 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_
,
2620 name_
, event_handle_
);
2624 } // namespace trace_event_internal