1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/debug/trace_event_impl.h"
9 #include "base/base_switches.h"
10 #include "base/bind.h"
11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/trace_event.h"
14 #include "base/debug/trace_event_synthetic_delay.h"
15 #include "base/float_util.h"
16 #include "base/format_macros.h"
17 #include "base/json/string_escape.h"
18 #include "base/lazy_instance.h"
19 #include "base/memory/singleton.h"
20 #include "base/message_loop/message_loop.h"
21 #include "base/process/process_metrics.h"
22 #include "base/stl_util.h"
23 #include "base/strings/string_number_conversions.h"
24 #include "base/strings/string_split.h"
25 #include "base/strings/string_tokenizer.h"
26 #include "base/strings/string_util.h"
27 #include "base/strings/stringprintf.h"
28 #include "base/strings/utf_string_conversions.h"
29 #include "base/synchronization/cancellation_flag.h"
30 #include "base/synchronization/waitable_event.h"
31 #include "base/sys_info.h"
32 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
33 #include "base/threading/platform_thread.h"
34 #include "base/threading/thread_id_name_manager.h"
35 #include "base/time/time.h"
38 #include "base/debug/trace_event_win.h"
41 class DeleteTraceLogForTesting
{
43 static void Delete() {
44 Singleton
<base::debug::TraceLog
,
45 LeakySingletonTraits
<base::debug::TraceLog
> >::OnExit(0);
49 // The thread buckets for the sampling profiler.
50 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state
[3];
57 // The overhead of TraceEvent above this threshold will be reported in the
59 const int kOverheadReportThresholdInMicroseconds
= 50;
61 // Controls the number of trace events we will buffer in-memory
62 // before throwing them away.
63 const size_t kTraceBufferChunkSize
= TraceBufferChunk::kTraceBufferChunkSize
;
64 const size_t kTraceEventVectorBufferChunks
= 256000 / kTraceBufferChunkSize
;
65 const size_t kTraceEventRingBufferChunks
= kTraceEventVectorBufferChunks
/ 4;
66 const size_t kTraceEventBatchChunks
= 1000 / kTraceBufferChunkSize
;
67 // Can store results for 30 seconds with 1 ms sampling interval.
68 const size_t kMonitorTraceEventBufferChunks
= 30000 / kTraceBufferChunkSize
;
69 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
70 const size_t kEchoToConsoleTraceEventBufferChunks
= 256;
72 const int kThreadFlushTimeoutMs
= 3000;
75 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
76 const char kEchoToConsoleCategoryFilter
[] = "-ipc,-task";
79 const char kSyntheticDelayCategoryFilterPrefix
[] = "DELAY(";
81 #define MAX_CATEGORY_GROUPS 100
83 // Parallel arrays g_category_groups and g_category_group_enabled are separate
84 // so that a pointer to a member of g_category_group_enabled can be easily
85 // converted to an index into g_category_groups. This allows macros to deal
86 // only with char enabled pointers from g_category_group_enabled, and we can
87 // convert internally to determine the category name from the char enabled
89 const char* g_category_groups
[MAX_CATEGORY_GROUPS
] = {
91 "tracing already shutdown",
92 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
94 // For reporting trace_event overhead. For thread local event buffers only.
95 "trace_event_overhead"};
97 // The enabled flag is char instead of bool so that the API can be used from C.
98 unsigned char g_category_group_enabled
[MAX_CATEGORY_GROUPS
] = { 0 };
99 // Indexes here have to match the g_category_groups array indexes above.
100 const int g_category_already_shutdown
= 1;
101 const int g_category_categories_exhausted
= 2;
102 const int g_category_metadata
= 3;
103 const int g_category_trace_event_overhead
= 4;
104 const int g_num_builtin_categories
= 5;
105 int g_category_index
= g_num_builtin_categories
; // Skip default categories.
107 // The name of the current thread. This is used to decide if the current
108 // thread name has changed. We combine all the seen thread names into the
109 // output name for the thread.
110 LazyInstance
<ThreadLocalPointer
<const char> >::Leaky
111 g_current_thread_name
= LAZY_INSTANCE_INITIALIZER
;
113 TimeTicks
ThreadNow() {
114 return TimeTicks::IsThreadNowSupported() ?
115 TimeTicks::ThreadNow() : TimeTicks();
118 class TraceBufferRingBuffer
: public TraceBuffer
{
120 TraceBufferRingBuffer(size_t max_chunks
)
121 : max_chunks_(max_chunks
),
122 recyclable_chunks_queue_(new size_t[queue_capacity()]),
124 queue_tail_(max_chunks
),
125 current_iteration_index_(0),
126 current_chunk_seq_(1) {
127 chunks_
.reserve(max_chunks
);
128 for (size_t i
= 0; i
< max_chunks
; ++i
)
129 recyclable_chunks_queue_
[i
] = i
;
132 virtual scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) OVERRIDE
{
133 // Because the number of threads is much less than the number of chunks,
134 // the queue should never be empty.
135 DCHECK(!QueueIsEmpty());
137 *index
= recyclable_chunks_queue_
[queue_head_
];
138 queue_head_
= NextQueueIndex(queue_head_
);
139 current_iteration_index_
= queue_head_
;
141 if (*index
>= chunks_
.size())
142 chunks_
.resize(*index
+ 1);
144 TraceBufferChunk
* chunk
= chunks_
[*index
];
145 chunks_
[*index
] = NULL
; // Put NULL in the slot of a in-flight chunk.
147 chunk
->Reset(current_chunk_seq_
++);
149 chunk
= new TraceBufferChunk(current_chunk_seq_
++);
151 return scoped_ptr
<TraceBufferChunk
>(chunk
);
154 virtual void ReturnChunk(size_t index
,
155 scoped_ptr
<TraceBufferChunk
> chunk
) OVERRIDE
{
156 // When this method is called, the queue should not be full because it
157 // can contain all chunks including the one to be returned.
158 DCHECK(!QueueIsFull());
160 DCHECK_LT(index
, chunks_
.size());
161 DCHECK(!chunks_
[index
]);
162 chunks_
[index
] = chunk
.release();
163 recyclable_chunks_queue_
[queue_tail_
] = index
;
164 queue_tail_
= NextQueueIndex(queue_tail_
);
167 virtual bool IsFull() const OVERRIDE
{
171 virtual size_t Size() const OVERRIDE
{
172 // This is approximate because not all of the chunks are full.
173 return chunks_
.size() * kTraceBufferChunkSize
;
176 virtual size_t Capacity() const OVERRIDE
{
177 return max_chunks_
* kTraceBufferChunkSize
;
180 virtual TraceEvent
* GetEventByHandle(TraceEventHandle handle
) OVERRIDE
{
181 if (handle
.chunk_index
>= chunks_
.size())
183 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
184 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
186 return chunk
->GetEventAt(handle
.event_index
);
189 virtual const TraceBufferChunk
* NextChunk() OVERRIDE
{
193 while (current_iteration_index_
!= queue_tail_
) {
194 size_t chunk_index
= recyclable_chunks_queue_
[current_iteration_index_
];
195 current_iteration_index_
= NextQueueIndex(current_iteration_index_
);
196 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
198 DCHECK(chunks_
[chunk_index
]);
199 return chunks_
[chunk_index
];
204 virtual scoped_ptr
<TraceBuffer
> CloneForIteration() const OVERRIDE
{
205 scoped_ptr
<ClonedTraceBuffer
> cloned_buffer(new ClonedTraceBuffer());
206 for (size_t queue_index
= queue_head_
; queue_index
!= queue_tail_
;
207 queue_index
= NextQueueIndex(queue_index
)) {
208 size_t chunk_index
= recyclable_chunks_queue_
[queue_index
];
209 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
211 TraceBufferChunk
* chunk
= chunks_
[chunk_index
];
212 cloned_buffer
->chunks_
.push_back(chunk
? chunk
->Clone().release() : NULL
);
214 return cloned_buffer
.PassAs
<TraceBuffer
>();
218 class ClonedTraceBuffer
: public TraceBuffer
{
220 ClonedTraceBuffer() : current_iteration_index_(0) {}
222 // The only implemented method.
223 virtual const TraceBufferChunk
* NextChunk() OVERRIDE
{
224 return current_iteration_index_
< chunks_
.size() ?
225 chunks_
[current_iteration_index_
++] : NULL
;
228 virtual scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) OVERRIDE
{
230 return scoped_ptr
<TraceBufferChunk
>();
232 virtual void ReturnChunk(size_t index
,
233 scoped_ptr
<TraceBufferChunk
>) OVERRIDE
{
236 virtual bool IsFull() const OVERRIDE
{ return false; }
237 virtual size_t Size() const OVERRIDE
{ return 0; }
238 virtual size_t Capacity() const OVERRIDE
{ return 0; }
239 virtual TraceEvent
* GetEventByHandle(TraceEventHandle handle
) OVERRIDE
{
242 virtual scoped_ptr
<TraceBuffer
> CloneForIteration() const OVERRIDE
{
244 return scoped_ptr
<TraceBuffer
>();
247 size_t current_iteration_index_
;
248 ScopedVector
<TraceBufferChunk
> chunks_
;
251 bool QueueIsEmpty() const {
252 return queue_head_
== queue_tail_
;
255 size_t QueueSize() const {
256 return queue_tail_
> queue_head_
? queue_tail_
- queue_head_
:
257 queue_tail_
+ queue_capacity() - queue_head_
;
260 bool QueueIsFull() const {
261 return QueueSize() == queue_capacity() - 1;
264 size_t queue_capacity() const {
265 // One extra space to help distinguish full state and empty state.
266 return max_chunks_
+ 1;
269 size_t NextQueueIndex(size_t index
) const {
271 if (index
>= queue_capacity())
277 ScopedVector
<TraceBufferChunk
> chunks_
;
279 scoped_ptr
<size_t[]> recyclable_chunks_queue_
;
283 size_t current_iteration_index_
;
284 uint32 current_chunk_seq_
;
286 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer
);
289 class TraceBufferVector
: public TraceBuffer
{
292 : in_flight_chunk_count_(0),
293 current_iteration_index_(0) {
294 chunks_
.reserve(kTraceEventVectorBufferChunks
);
297 virtual scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) OVERRIDE
{
298 // This function may be called when adding normal events or indirectly from
299 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
300 // have to add the metadata events and flush thread-local buffers even if
301 // the buffer is full.
302 *index
= chunks_
.size();
303 chunks_
.push_back(NULL
); // Put NULL in the slot of a in-flight chunk.
304 ++in_flight_chunk_count_
;
305 // + 1 because zero chunk_seq is not allowed.
306 return scoped_ptr
<TraceBufferChunk
>(
307 new TraceBufferChunk(static_cast<uint32
>(*index
) + 1));
310 virtual void ReturnChunk(size_t index
,
311 scoped_ptr
<TraceBufferChunk
> chunk
) OVERRIDE
{
312 DCHECK_GT(in_flight_chunk_count_
, 0u);
313 DCHECK_LT(index
, chunks_
.size());
314 DCHECK(!chunks_
[index
]);
315 --in_flight_chunk_count_
;
316 chunks_
[index
] = chunk
.release();
319 virtual bool IsFull() const OVERRIDE
{
320 return chunks_
.size() >= kTraceEventVectorBufferChunks
;
323 virtual size_t Size() const OVERRIDE
{
324 // This is approximate because not all of the chunks are full.
325 return chunks_
.size() * kTraceBufferChunkSize
;
328 virtual size_t Capacity() const OVERRIDE
{
329 return kTraceEventVectorBufferChunks
* kTraceBufferChunkSize
;
332 virtual TraceEvent
* GetEventByHandle(TraceEventHandle handle
) OVERRIDE
{
333 if (handle
.chunk_index
>= chunks_
.size())
335 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
336 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
338 return chunk
->GetEventAt(handle
.event_index
);
341 virtual const TraceBufferChunk
* NextChunk() OVERRIDE
{
342 while (current_iteration_index_
< chunks_
.size()) {
343 // Skip in-flight chunks.
344 const TraceBufferChunk
* chunk
= chunks_
[current_iteration_index_
++];
351 virtual scoped_ptr
<TraceBuffer
> CloneForIteration() const OVERRIDE
{
353 return scoped_ptr
<TraceBuffer
>();
357 size_t in_flight_chunk_count_
;
358 size_t current_iteration_index_
;
359 ScopedVector
<TraceBufferChunk
> chunks_
;
361 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector
);
364 template <typename T
>
365 void InitializeMetadataEvent(TraceEvent
* trace_event
,
367 const char* metadata_name
, const char* arg_name
,
373 unsigned char arg_type
;
374 unsigned long long arg_value
;
375 ::trace_event_internal::SetTraceValue(value
, &arg_type
, &arg_value
);
376 trace_event
->Initialize(thread_id
,
377 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA
,
378 &g_category_group_enabled
[g_category_metadata
],
379 metadata_name
, ::trace_event_internal::kNoEventId
,
380 num_args
, &arg_name
, &arg_type
, &arg_value
, NULL
,
381 TRACE_EVENT_FLAG_NONE
);
384 class AutoThreadLocalBoolean
{
386 explicit AutoThreadLocalBoolean(ThreadLocalBoolean
* thread_local_boolean
)
387 : thread_local_boolean_(thread_local_boolean
) {
388 DCHECK(!thread_local_boolean_
->Get());
389 thread_local_boolean_
->Set(true);
391 ~AutoThreadLocalBoolean() {
392 thread_local_boolean_
->Set(false);
396 ThreadLocalBoolean
* thread_local_boolean_
;
397 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean
);
402 void TraceBufferChunk::Reset(uint32 new_seq
) {
403 for (size_t i
= 0; i
< next_free_
; ++i
)
409 TraceEvent
* TraceBufferChunk::AddTraceEvent(size_t* event_index
) {
411 *event_index
= next_free_
++;
412 return &chunk_
[*event_index
];
415 scoped_ptr
<TraceBufferChunk
> TraceBufferChunk::Clone() const {
416 scoped_ptr
<TraceBufferChunk
> cloned_chunk(new TraceBufferChunk(seq_
));
417 cloned_chunk
->next_free_
= next_free_
;
418 for (size_t i
= 0; i
< next_free_
; ++i
)
419 cloned_chunk
->chunk_
[i
].CopyFrom(chunk_
[i
]);
420 return cloned_chunk
.Pass();
423 // A helper class that allows the lock to be acquired in the middle of the scope
424 // and unlocks at the end of scope if locked.
425 class TraceLog::OptionalAutoLock
{
427 explicit OptionalAutoLock(Lock
& lock
)
432 ~OptionalAutoLock() {
437 void EnsureAcquired() {
447 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock
);
450 // Use this function instead of TraceEventHandle constructor to keep the
451 // overhead of ScopedTracer (trace_event.h) constructor minimum.
452 void MakeHandle(uint32 chunk_seq
, size_t chunk_index
, size_t event_index
,
453 TraceEventHandle
* handle
) {
455 DCHECK(chunk_index
< (1u << 16));
456 DCHECK(event_index
< (1u << 16));
457 handle
->chunk_seq
= chunk_seq
;
458 handle
->chunk_index
= static_cast<uint16
>(chunk_index
);
459 handle
->event_index
= static_cast<uint16
>(event_index
);
462 ////////////////////////////////////////////////////////////////////////////////
466 ////////////////////////////////////////////////////////////////////////////////
470 size_t GetAllocLength(const char* str
) { return str
? strlen(str
) + 1 : 0; }
472 // Copies |*member| into |*buffer|, sets |*member| to point to this new
473 // location, and then advances |*buffer| by the amount written.
474 void CopyTraceEventParameter(char** buffer
,
478 size_t written
= strlcpy(*buffer
, *member
, end
- *buffer
) + 1;
479 DCHECK_LE(static_cast<int>(written
), end
- *buffer
);
487 TraceEvent::TraceEvent()
488 : duration_(TimeDelta::FromInternalValue(-1)),
490 category_group_enabled_(NULL
),
493 phase_(TRACE_EVENT_PHASE_BEGIN
),
495 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
496 arg_names_
[i
] = NULL
;
497 memset(arg_values_
, 0, sizeof(arg_values_
));
500 TraceEvent::~TraceEvent() {
503 void TraceEvent::CopyFrom(const TraceEvent
& other
) {
504 timestamp_
= other
.timestamp_
;
505 thread_timestamp_
= other
.thread_timestamp_
;
506 duration_
= other
.duration_
;
508 category_group_enabled_
= other
.category_group_enabled_
;
510 thread_id_
= other
.thread_id_
;
511 phase_
= other
.phase_
;
512 flags_
= other
.flags_
;
513 parameter_copy_storage_
= other
.parameter_copy_storage_
;
515 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
) {
516 arg_names_
[i
] = other
.arg_names_
[i
];
517 arg_types_
[i
] = other
.arg_types_
[i
];
518 arg_values_
[i
] = other
.arg_values_
[i
];
519 convertable_values_
[i
] = other
.convertable_values_
[i
];
523 void TraceEvent::Initialize(
526 TimeTicks thread_timestamp
,
528 const unsigned char* category_group_enabled
,
530 unsigned long long id
,
532 const char** arg_names
,
533 const unsigned char* arg_types
,
534 const unsigned long long* arg_values
,
535 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
536 unsigned char flags
) {
537 timestamp_
= timestamp
;
538 thread_timestamp_
= thread_timestamp
;
539 duration_
= TimeDelta::FromInternalValue(-1);
541 category_group_enabled_
= category_group_enabled
;
543 thread_id_
= thread_id
;
547 // Clamp num_args since it may have been set by a third_party library.
548 num_args
= (num_args
> kTraceMaxNumArgs
) ? kTraceMaxNumArgs
: num_args
;
550 for (; i
< num_args
; ++i
) {
551 arg_names_
[i
] = arg_names
[i
];
552 arg_types_
[i
] = arg_types
[i
];
554 if (arg_types
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
555 convertable_values_
[i
] = convertable_values
[i
];
557 arg_values_
[i
].as_uint
= arg_values
[i
];
559 for (; i
< kTraceMaxNumArgs
; ++i
) {
560 arg_names_
[i
] = NULL
;
561 arg_values_
[i
].as_uint
= 0u;
562 convertable_values_
[i
] = NULL
;
563 arg_types_
[i
] = TRACE_VALUE_TYPE_UINT
;
566 bool copy
= !!(flags
& TRACE_EVENT_FLAG_COPY
);
567 size_t alloc_size
= 0;
569 alloc_size
+= GetAllocLength(name
);
570 for (i
= 0; i
< num_args
; ++i
) {
571 alloc_size
+= GetAllocLength(arg_names_
[i
]);
572 if (arg_types_
[i
] == TRACE_VALUE_TYPE_STRING
)
573 arg_types_
[i
] = TRACE_VALUE_TYPE_COPY_STRING
;
577 bool arg_is_copy
[kTraceMaxNumArgs
];
578 for (i
= 0; i
< num_args
; ++i
) {
579 // No copying of convertable types, we retain ownership.
580 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
583 // We only take a copy of arg_vals if they are of type COPY_STRING.
584 arg_is_copy
[i
] = (arg_types_
[i
] == TRACE_VALUE_TYPE_COPY_STRING
);
586 alloc_size
+= GetAllocLength(arg_values_
[i
].as_string
);
590 parameter_copy_storage_
= new RefCountedString
;
591 parameter_copy_storage_
->data().resize(alloc_size
);
592 char* ptr
= string_as_array(¶meter_copy_storage_
->data());
593 const char* end
= ptr
+ alloc_size
;
595 CopyTraceEventParameter(&ptr
, &name_
, end
);
596 for (i
= 0; i
< num_args
; ++i
) {
597 CopyTraceEventParameter(&ptr
, &arg_names_
[i
], end
);
600 for (i
= 0; i
< num_args
; ++i
) {
601 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
604 CopyTraceEventParameter(&ptr
, &arg_values_
[i
].as_string
, end
);
606 DCHECK_EQ(end
, ptr
) << "Overrun by " << ptr
- end
;
610 void TraceEvent::Reset() {
611 // Only reset fields that won't be initialized in Initialize(), or that may
612 // hold references to other objects.
613 duration_
= TimeDelta::FromInternalValue(-1);
614 parameter_copy_storage_
= NULL
;
615 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
616 convertable_values_
[i
] = NULL
;
619 void TraceEvent::UpdateDuration(const TimeTicks
& now
,
620 const TimeTicks
& thread_now
) {
621 DCHECK(duration_
.ToInternalValue() == -1);
622 duration_
= now
- timestamp_
;
623 thread_duration_
= thread_now
- thread_timestamp_
;
627 void TraceEvent::AppendValueAsJSON(unsigned char type
,
628 TraceEvent::TraceValue value
,
631 case TRACE_VALUE_TYPE_BOOL
:
632 *out
+= value
.as_bool
? "true" : "false";
634 case TRACE_VALUE_TYPE_UINT
:
635 StringAppendF(out
, "%" PRIu64
, static_cast<uint64
>(value
.as_uint
));
637 case TRACE_VALUE_TYPE_INT
:
638 StringAppendF(out
, "%" PRId64
, static_cast<int64
>(value
.as_int
));
640 case TRACE_VALUE_TYPE_DOUBLE
: {
641 // FIXME: base/json/json_writer.cc is using the same code,
642 // should be made into a common method.
644 double val
= value
.as_double
;
646 real
= DoubleToString(val
);
647 // Ensure that the number has a .0 if there's no decimal or 'e'. This
648 // makes sure that when we read the JSON back, it's interpreted as a
649 // real rather than an int.
650 if (real
.find('.') == std::string::npos
&&
651 real
.find('e') == std::string::npos
&&
652 real
.find('E') == std::string::npos
) {
655 // The JSON spec requires that non-integer values in the range (-1,1)
656 // have a zero before the decimal point - ".52" is not valid, "0.52" is.
657 if (real
[0] == '.') {
659 } else if (real
.length() > 1 && real
[0] == '-' && real
[1] == '.') {
660 // "-.1" bad "-0.1" good
663 } else if (IsNaN(val
)){
664 // The JSON spec doesn't allow NaN and Infinity (since these are
665 // objects in EcmaScript). Use strings instead.
667 } else if (val
< 0) {
668 real
= "\"-Infinity\"";
670 real
= "\"Infinity\"";
672 StringAppendF(out
, "%s", real
.c_str());
675 case TRACE_VALUE_TYPE_POINTER
:
676 // JSON only supports double and int numbers.
677 // So as not to lose bits from a 64-bit pointer, output as a hex string.
678 StringAppendF(out
, "\"0x%" PRIx64
"\"", static_cast<uint64
>(
679 reinterpret_cast<intptr_t>(
682 case TRACE_VALUE_TYPE_STRING
:
683 case TRACE_VALUE_TYPE_COPY_STRING
:
684 EscapeJSONString(value
.as_string
? value
.as_string
: "NULL", true, out
);
687 NOTREACHED() << "Don't know how to print this value";
692 void TraceEvent::AppendAsJSON(std::string
* out
) const {
693 int64 time_int64
= timestamp_
.ToInternalValue();
694 int process_id
= TraceLog::GetInstance()->process_id();
695 // Category group checked at category creation time.
696 DCHECK(!strchr(name_
, '"'));
698 "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
","
699 "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
700 TraceLog::GetCategoryGroupName(category_group_enabled_
),
707 // Output argument names and values, stop at first NULL argument name.
708 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
712 *out
+= arg_names_
[i
];
715 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
716 convertable_values_
[i
]->AppendAsTraceFormat(out
);
718 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], out
);
722 if (phase_
== TRACE_EVENT_PHASE_COMPLETE
) {
723 int64 duration
= duration_
.ToInternalValue();
725 StringAppendF(out
, ",\"dur\":%" PRId64
, duration
);
726 if (!thread_timestamp_
.is_null()) {
727 int64 thread_duration
= thread_duration_
.ToInternalValue();
728 if (thread_duration
!= -1)
729 StringAppendF(out
, ",\"tdur\":%" PRId64
, thread_duration
);
733 // Output tts if thread_timestamp is valid.
734 if (!thread_timestamp_
.is_null()) {
735 int64 thread_time_int64
= thread_timestamp_
.ToInternalValue();
736 StringAppendF(out
, ",\"tts\":%" PRId64
, thread_time_int64
);
739 // If id_ is set, print it out as a hex string so we don't loose any
740 // bits (it might be a 64-bit pointer).
741 if (flags_
& TRACE_EVENT_FLAG_HAS_ID
)
742 StringAppendF(out
, ",\"id\":\"0x%" PRIx64
"\"", static_cast<uint64
>(id_
));
744 // Instant events also output their scope.
745 if (phase_
== TRACE_EVENT_PHASE_INSTANT
) {
747 switch (flags_
& TRACE_EVENT_FLAG_SCOPE_MASK
) {
748 case TRACE_EVENT_SCOPE_GLOBAL
:
749 scope
= TRACE_EVENT_SCOPE_NAME_GLOBAL
;
752 case TRACE_EVENT_SCOPE_PROCESS
:
753 scope
= TRACE_EVENT_SCOPE_NAME_PROCESS
;
756 case TRACE_EVENT_SCOPE_THREAD
:
757 scope
= TRACE_EVENT_SCOPE_NAME_THREAD
;
760 StringAppendF(out
, ",\"s\":\"%c\"", scope
);
766 void TraceEvent::AppendPrettyPrinted(std::ostringstream
* out
) const {
767 *out
<< name_
<< "[";
768 *out
<< TraceLog::GetCategoryGroupName(category_group_enabled_
);
772 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
775 *out
<< arg_names_
[i
] << ":";
776 std::string value_as_text
;
778 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
779 convertable_values_
[i
]->AppendAsTraceFormat(&value_as_text
);
781 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], &value_as_text
);
783 *out
<< value_as_text
;
789 ////////////////////////////////////////////////////////////////////////////////
793 ////////////////////////////////////////////////////////////////////////////////
795 TraceResultBuffer::OutputCallback
796 TraceResultBuffer::SimpleOutput::GetCallback() {
797 return Bind(&SimpleOutput::Append
, Unretained(this));
800 void TraceResultBuffer::SimpleOutput::Append(
801 const std::string
& json_trace_output
) {
802 json_output
+= json_trace_output
;
805 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
808 TraceResultBuffer::~TraceResultBuffer() {
811 void TraceResultBuffer::SetOutputCallback(
812 const OutputCallback
& json_chunk_callback
) {
813 output_callback_
= json_chunk_callback
;
816 void TraceResultBuffer::Start() {
817 append_comma_
= false;
818 output_callback_
.Run("[");
821 void TraceResultBuffer::AddFragment(const std::string
& trace_fragment
) {
823 output_callback_
.Run(",");
824 append_comma_
= true;
825 output_callback_
.Run(trace_fragment
);
828 void TraceResultBuffer::Finish() {
829 output_callback_
.Run("]");
832 ////////////////////////////////////////////////////////////////////////////////
834 // TraceSamplingThread
836 ////////////////////////////////////////////////////////////////////////////////
837 class TraceBucketData
;
838 typedef base::Callback
<void(TraceBucketData
*)> TraceSampleCallback
;
840 class TraceBucketData
{
842 TraceBucketData(base::subtle::AtomicWord
* bucket
,
844 TraceSampleCallback callback
);
847 TRACE_EVENT_API_ATOMIC_WORD
* bucket
;
848 const char* bucket_name
;
849 TraceSampleCallback callback
;
852 // This object must be created on the IO thread.
853 class TraceSamplingThread
: public PlatformThread::Delegate
{
855 TraceSamplingThread();
856 virtual ~TraceSamplingThread();
858 // Implementation of PlatformThread::Delegate:
859 virtual void ThreadMain() OVERRIDE
;
861 static void DefaultSamplingCallback(TraceBucketData
* bucekt_data
);
864 void WaitSamplingEventForTesting();
867 friend class TraceLog
;
870 // Not thread-safe. Once the ThreadMain has been called, this can no longer
872 void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
873 const char* const name
,
874 TraceSampleCallback callback
);
875 // Splits a combined "category\0name" into the two component parts.
876 static void ExtractCategoryAndName(const char* combined
,
877 const char** category
,
879 std::vector
<TraceBucketData
> sample_buckets_
;
880 bool thread_running_
;
881 CancellationFlag cancellation_flag_
;
882 WaitableEvent waitable_event_for_testing_
;
886 TraceSamplingThread::TraceSamplingThread()
887 : thread_running_(false),
888 waitable_event_for_testing_(false, false) {
891 TraceSamplingThread::~TraceSamplingThread() {
894 void TraceSamplingThread::ThreadMain() {
895 PlatformThread::SetName("Sampling Thread");
896 thread_running_
= true;
897 const int kSamplingFrequencyMicroseconds
= 1000;
898 while (!cancellation_flag_
.IsSet()) {
899 PlatformThread::Sleep(
900 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds
));
902 waitable_event_for_testing_
.Signal();
907 void TraceSamplingThread::DefaultSamplingCallback(
908 TraceBucketData
* bucket_data
) {
909 TRACE_EVENT_API_ATOMIC_WORD category_and_name
=
910 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data
->bucket
);
911 if (!category_and_name
)
913 const char* const combined
=
914 reinterpret_cast<const char* const>(category_and_name
);
915 const char* category_group
;
917 ExtractCategoryAndName(combined
, &category_group
, &name
);
918 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE
,
919 TraceLog::GetCategoryGroupEnabled(category_group
),
920 name
, 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
923 void TraceSamplingThread::GetSamples() {
924 for (size_t i
= 0; i
< sample_buckets_
.size(); ++i
) {
925 TraceBucketData
* bucket_data
= &sample_buckets_
[i
];
926 bucket_data
->callback
.Run(bucket_data
);
930 void TraceSamplingThread::RegisterSampleBucket(
931 TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
932 const char* const name
,
933 TraceSampleCallback callback
) {
934 // Access to sample_buckets_ doesn't cause races with the sampling thread
935 // that uses the sample_buckets_, because it is guaranteed that
936 // RegisterSampleBucket is called before the sampling thread is created.
937 DCHECK(!thread_running_
);
938 sample_buckets_
.push_back(TraceBucketData(bucket
, name
, callback
));
942 void TraceSamplingThread::ExtractCategoryAndName(const char* combined
,
943 const char** category
,
945 *category
= combined
;
946 *name
= &combined
[strlen(combined
) + 1];
949 void TraceSamplingThread::Stop() {
950 cancellation_flag_
.Set();
953 void TraceSamplingThread::WaitSamplingEventForTesting() {
954 waitable_event_for_testing_
.Wait();
957 TraceBucketData::TraceBucketData(base::subtle::AtomicWord
* bucket
,
959 TraceSampleCallback callback
)
965 TraceBucketData::~TraceBucketData() {
968 ////////////////////////////////////////////////////////////////////////////////
972 ////////////////////////////////////////////////////////////////////////////////
974 class TraceLog::ThreadLocalEventBuffer
975 : public MessageLoop::DestructionObserver
{
977 ThreadLocalEventBuffer(TraceLog
* trace_log
);
978 virtual ~ThreadLocalEventBuffer();
980 TraceEvent
* AddTraceEvent(TraceEventHandle
* handle
);
982 void ReportOverhead(const TimeTicks
& event_timestamp
,
983 const TimeTicks
& event_thread_timestamp
);
985 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) {
986 if (!chunk_
|| handle
.chunk_seq
!= chunk_
->seq() ||
987 handle
.chunk_index
!= chunk_index_
)
990 return chunk_
->GetEventAt(handle
.event_index
);
993 int generation() const { return generation_
; }
996 // MessageLoop::DestructionObserver
997 virtual void WillDestroyCurrentMessageLoop() OVERRIDE
;
999 void FlushWhileLocked();
1001 void CheckThisIsCurrentBuffer() const {
1002 DCHECK(trace_log_
->thread_local_event_buffer_
.Get() == this);
1005 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1006 // as long as the thread exists.
1007 TraceLog
* trace_log_
;
1008 scoped_ptr
<TraceBufferChunk
> chunk_
;
1009 size_t chunk_index_
;
1011 TimeDelta overhead_
;
1014 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer
);
1017 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog
* trace_log
)
1018 : trace_log_(trace_log
),
1021 generation_(trace_log
->generation()) {
1022 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
1023 // the following message_loop won't be NULL.
1024 MessageLoop
* message_loop
= MessageLoop::current();
1025 message_loop
->AddDestructionObserver(this);
1027 AutoLock
lock(trace_log
->lock_
);
1028 trace_log
->thread_message_loops_
.insert(message_loop
);
1031 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
1032 CheckThisIsCurrentBuffer();
1033 MessageLoop::current()->RemoveDestructionObserver(this);
1035 // Zero event_count_ happens in either of the following cases:
1036 // - no event generated for the thread;
1037 // - the thread has no message loop;
1038 // - trace_event_overhead is disabled.
1040 InitializeMetadataEvent(AddTraceEvent(NULL
),
1041 static_cast<int>(base::PlatformThread::CurrentId()),
1042 "overhead", "average_overhead",
1043 overhead_
.InMillisecondsF() / event_count_
);
1047 AutoLock
lock(trace_log_
->lock_
);
1049 trace_log_
->thread_message_loops_
.erase(MessageLoop::current());
1051 trace_log_
->thread_local_event_buffer_
.Set(NULL
);
1054 TraceEvent
* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
1055 TraceEventHandle
* handle
) {
1056 CheckThisIsCurrentBuffer();
1058 if (chunk_
&& chunk_
->IsFull()) {
1059 AutoLock
lock(trace_log_
->lock_
);
1064 AutoLock
lock(trace_log_
->lock_
);
1065 chunk_
= trace_log_
->logged_events_
->GetChunk(&chunk_index_
);
1066 trace_log_
->CheckIfBufferIsFullWhileLocked();
1072 TraceEvent
* trace_event
= chunk_
->AddTraceEvent(&event_index
);
1073 if (trace_event
&& handle
)
1074 MakeHandle(chunk_
->seq(), chunk_index_
, event_index
, handle
);
1079 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1080 const TimeTicks
& event_timestamp
,
1081 const TimeTicks
& event_thread_timestamp
) {
1082 if (!g_category_group_enabled
[g_category_trace_event_overhead
])
1085 CheckThisIsCurrentBuffer();
1088 TimeTicks thread_now
= ThreadNow();
1089 TimeTicks now
= trace_log_
->OffsetNow();
1090 TimeDelta overhead
= now
- event_timestamp
;
1091 if (overhead
.InMicroseconds() >= kOverheadReportThresholdInMicroseconds
) {
1092 TraceEvent
* trace_event
= AddTraceEvent(NULL
);
1094 trace_event
->Initialize(
1095 static_cast<int>(PlatformThread::CurrentId()),
1096 event_timestamp
, event_thread_timestamp
,
1097 TRACE_EVENT_PHASE_COMPLETE
,
1098 &g_category_group_enabled
[g_category_trace_event_overhead
],
1099 "overhead", 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
1100 trace_event
->UpdateDuration(now
, thread_now
);
1103 overhead_
+= overhead
;
1106 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1110 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1114 trace_log_
->lock_
.AssertAcquired();
1115 if (trace_log_
->CheckGeneration(generation_
)) {
1116 // Return the chunk to the buffer only if the generation matches.
1117 trace_log_
->logged_events_
->ReturnChunk(chunk_index_
, chunk_
.Pass());
1119 // Otherwise this method may be called from the destructor, or TraceLog will
1120 // find the generation mismatch and delete this buffer soon.
1124 TraceLog
* TraceLog::GetInstance() {
1125 return Singleton
<TraceLog
, LeakySingletonTraits
<TraceLog
> >::get();
1128 TraceLog::TraceLog()
1130 num_traces_recorded_(0),
1132 dispatching_to_observer_list_(false),
1133 process_sort_index_(0),
1134 process_id_hash_(0),
1137 trace_options_(RECORD_UNTIL_FULL
),
1138 sampling_thread_handle_(0),
1139 category_filter_(CategoryFilter::kDefaultCategoryFilterString
),
1140 event_callback_category_filter_(
1141 CategoryFilter::kDefaultCategoryFilterString
),
1142 thread_shared_chunk_index_(0),
1144 // Trace is enabled or disabled on one thread while other threads are
1145 // accessing the enabled flag. We don't care whether edge-case events are
1146 // traced or not, so we allow races on the enabled flag to keep the trace
1148 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1149 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1150 // sizeof(g_category_group_enabled),
1151 // "trace_event category enabled");
1152 for (int i
= 0; i
< MAX_CATEGORY_GROUPS
; ++i
) {
1153 ANNOTATE_BENIGN_RACE(&g_category_group_enabled
[i
],
1154 "trace_event category enabled");
1156 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
1159 SetProcessID(static_cast<int>(GetCurrentProcId()));
1161 // NaCl also shouldn't access the command line.
1162 if (CommandLine::InitializedForCurrentProcess() &&
1163 CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole
)) {
1164 std::string filter
= CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1165 switches::kTraceToConsole
);
1166 if (filter
.empty()) {
1167 filter
= kEchoToConsoleCategoryFilter
;
1170 filter
.append(kEchoToConsoleCategoryFilter
);
1173 LOG(ERROR
) << "Start " << switches::kTraceToConsole
1174 << " with CategoryFilter '" << filter
<< "'.";
1175 SetEnabled(CategoryFilter(filter
), RECORDING_MODE
, ECHO_TO_CONSOLE
);
1179 logged_events_
.reset(CreateTraceBuffer());
1182 TraceLog::~TraceLog() {
1185 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1186 const char* category_group
) {
1187 TraceLog
* tracelog
= GetInstance();
1189 DCHECK(!g_category_group_enabled
[g_category_already_shutdown
]);
1190 return &g_category_group_enabled
[g_category_already_shutdown
];
1192 return tracelog
->GetCategoryGroupEnabledInternal(category_group
);
1195 const char* TraceLog::GetCategoryGroupName(
1196 const unsigned char* category_group_enabled
) {
1197 // Calculate the index of the category group by finding
1198 // category_group_enabled in g_category_group_enabled array.
1199 uintptr_t category_begin
=
1200 reinterpret_cast<uintptr_t>(g_category_group_enabled
);
1201 uintptr_t category_ptr
= reinterpret_cast<uintptr_t>(category_group_enabled
);
1202 DCHECK(category_ptr
>= category_begin
&&
1203 category_ptr
< reinterpret_cast<uintptr_t>(
1204 g_category_group_enabled
+ MAX_CATEGORY_GROUPS
)) <<
1205 "out of bounds category pointer";
1206 uintptr_t category_index
=
1207 (category_ptr
- category_begin
) / sizeof(g_category_group_enabled
[0]);
1208 return g_category_groups
[category_index
];
1211 void TraceLog::UpdateCategoryGroupEnabledFlag(int category_index
) {
1212 unsigned char enabled_flag
= 0;
1213 const char* category_group
= g_category_groups
[category_index
];
1214 if (mode_
== RECORDING_MODE
&&
1215 category_filter_
.IsCategoryGroupEnabled(category_group
))
1216 enabled_flag
|= ENABLED_FOR_RECORDING
;
1217 else if (mode_
== MONITORING_MODE
&&
1218 category_filter_
.IsCategoryGroupEnabled(category_group
))
1219 enabled_flag
|= ENABLED_FOR_MONITORING
;
1220 if (event_callback_
&&
1221 event_callback_category_filter_
.IsCategoryGroupEnabled(category_group
))
1222 enabled_flag
|= ENABLED_FOR_EVENT_CALLBACK
;
1223 g_category_group_enabled
[category_index
] = enabled_flag
;
1226 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1227 for (int i
= 0; i
< g_category_index
; i
++)
1228 UpdateCategoryGroupEnabledFlag(i
);
1231 void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() {
1232 ResetTraceEventSyntheticDelays();
1233 const CategoryFilter::StringList
& delays
=
1234 category_filter_
.GetSyntheticDelayValues();
1235 CategoryFilter::StringList::const_iterator ci
;
1236 for (ci
= delays
.begin(); ci
!= delays
.end(); ++ci
) {
1237 StringTokenizer
tokens(*ci
, ";");
1238 if (!tokens
.GetNext())
1240 TraceEventSyntheticDelay
* delay
=
1241 TraceEventSyntheticDelay::Lookup(tokens
.token());
1242 while (tokens
.GetNext()) {
1243 std::string token
= tokens
.token();
1245 double target_duration
= strtod(token
.c_str(), &duration_end
);
1246 if (duration_end
!= token
.c_str()) {
1247 delay
->SetTargetDuration(
1248 TimeDelta::FromMicroseconds(target_duration
* 1e6
));
1249 } else if (token
== "static") {
1250 delay
->SetMode(TraceEventSyntheticDelay::STATIC
);
1251 } else if (token
== "oneshot") {
1252 delay
->SetMode(TraceEventSyntheticDelay::ONE_SHOT
);
1253 } else if (token
== "alternating") {
1254 delay
->SetMode(TraceEventSyntheticDelay::ALTERNATING
);
1260 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1261 const char* category_group
) {
1262 DCHECK(!strchr(category_group
, '"')) <<
1263 "Category groups may not contain double quote";
1264 AutoLock
lock(lock_
);
1266 unsigned char* category_group_enabled
= NULL
;
1267 // Search for pre-existing category group.
1268 for (int i
= 0; i
< g_category_index
; i
++) {
1269 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1270 category_group_enabled
= &g_category_group_enabled
[i
];
1275 if (!category_group_enabled
) {
1276 // Create a new category group
1277 DCHECK(g_category_index
< MAX_CATEGORY_GROUPS
) <<
1278 "must increase MAX_CATEGORY_GROUPS";
1279 if (g_category_index
< MAX_CATEGORY_GROUPS
) {
1280 int new_index
= g_category_index
++;
1281 // Don't hold on to the category_group pointer, so that we can create
1282 // category groups with strings not known at compile time (this is
1283 // required by SetWatchEvent).
1284 const char* new_group
= strdup(category_group
);
1285 ANNOTATE_LEAKING_OBJECT_PTR(new_group
);
1286 g_category_groups
[new_index
] = new_group
;
1287 DCHECK(!g_category_group_enabled
[new_index
]);
1288 // Note that if both included and excluded patterns in the
1289 // CategoryFilter are empty, we exclude nothing,
1290 // thereby enabling this category group.
1291 UpdateCategoryGroupEnabledFlag(new_index
);
1292 category_group_enabled
= &g_category_group_enabled
[new_index
];
1294 category_group_enabled
=
1295 &g_category_group_enabled
[g_category_categories_exhausted
];
1298 return category_group_enabled
;
1301 void TraceLog::GetKnownCategoryGroups(
1302 std::vector
<std::string
>* category_groups
) {
1303 AutoLock
lock(lock_
);
1304 category_groups
->push_back(
1305 g_category_groups
[g_category_trace_event_overhead
]);
1306 for (int i
= g_num_builtin_categories
; i
< g_category_index
; i
++)
1307 category_groups
->push_back(g_category_groups
[i
]);
1310 void TraceLog::SetEnabled(const CategoryFilter
& category_filter
,
1313 std::vector
<EnabledStateObserver
*> observer_list
;
1315 AutoLock
lock(lock_
);
1317 // Can't enable tracing when Flush() is in progress.
1318 DCHECK(!flush_message_loop_proxy_
.get());
1320 Options old_options
= trace_options();
1323 if (options
!= old_options
) {
1324 DLOG(ERROR
) << "Attempting to re-enable tracing with a different "
1325 << "set of options.";
1328 if (mode
!= mode_
) {
1329 DLOG(ERROR
) << "Attempting to re-enable tracing with a different mode.";
1332 category_filter_
.Merge(category_filter
);
1333 UpdateCategoryGroupEnabledFlags();
1337 if (dispatching_to_observer_list_
) {
1339 "Cannot manipulate TraceLog::Enabled state from an observer.";
1345 if (options
!= old_options
) {
1346 subtle::NoBarrier_Store(&trace_options_
, options
);
1347 UseNextTraceBuffer();
1350 num_traces_recorded_
++;
1352 category_filter_
= CategoryFilter(category_filter
);
1353 UpdateCategoryGroupEnabledFlags();
1354 UpdateSyntheticDelaysFromCategoryFilter();
1356 if (options
& ENABLE_SAMPLING
) {
1357 sampling_thread_
.reset(new TraceSamplingThread
);
1358 sampling_thread_
->RegisterSampleBucket(
1361 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1362 sampling_thread_
->RegisterSampleBucket(
1365 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1366 sampling_thread_
->RegisterSampleBucket(
1369 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1370 if (!PlatformThread::Create(
1371 0, sampling_thread_
.get(), &sampling_thread_handle_
)) {
1372 DCHECK(false) << "failed to create thread";
1376 dispatching_to_observer_list_
= true;
1377 observer_list
= enabled_state_observer_list_
;
1379 // Notify observers outside the lock in case they trigger trace events.
1380 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1381 observer_list
[i
]->OnTraceLogEnabled();
1384 AutoLock
lock(lock_
);
1385 dispatching_to_observer_list_
= false;
1389 CategoryFilter
TraceLog::GetCurrentCategoryFilter() {
1390 AutoLock
lock(lock_
);
1391 return category_filter_
;
1394 void TraceLog::SetDisabled() {
1395 AutoLock
lock(lock_
);
1396 SetDisabledWhileLocked();
1399 void TraceLog::SetDisabledWhileLocked() {
1400 lock_
.AssertAcquired();
1405 if (dispatching_to_observer_list_
) {
1407 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1413 if (sampling_thread_
.get()) {
1414 // Stop the sampling thread.
1415 sampling_thread_
->Stop();
1417 PlatformThread::Join(sampling_thread_handle_
);
1419 sampling_thread_handle_
= PlatformThreadHandle();
1420 sampling_thread_
.reset();
1423 category_filter_
.Clear();
1424 subtle::NoBarrier_Store(&watch_category_
, 0);
1425 watch_event_name_
= "";
1426 UpdateCategoryGroupEnabledFlags();
1427 AddMetadataEventsWhileLocked();
1429 dispatching_to_observer_list_
= true;
1430 std::vector
<EnabledStateObserver
*> observer_list
=
1431 enabled_state_observer_list_
;
1434 // Dispatch to observers outside the lock in case the observer triggers a
1436 AutoUnlock
unlock(lock_
);
1437 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1438 observer_list
[i
]->OnTraceLogDisabled();
1440 dispatching_to_observer_list_
= false;
1443 int TraceLog::GetNumTracesRecorded() {
1444 AutoLock
lock(lock_
);
1447 return num_traces_recorded_
;
1450 void TraceLog::AddEnabledStateObserver(EnabledStateObserver
* listener
) {
1451 enabled_state_observer_list_
.push_back(listener
);
1454 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver
* listener
) {
1455 std::vector
<EnabledStateObserver
*>::iterator it
=
1456 std::find(enabled_state_observer_list_
.begin(),
1457 enabled_state_observer_list_
.end(),
1459 if (it
!= enabled_state_observer_list_
.end())
1460 enabled_state_observer_list_
.erase(it
);
1463 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver
* listener
) const {
1464 std::vector
<EnabledStateObserver
*>::const_iterator it
=
1465 std::find(enabled_state_observer_list_
.begin(),
1466 enabled_state_observer_list_
.end(),
1468 return it
!= enabled_state_observer_list_
.end();
1471 float TraceLog::GetBufferPercentFull() const {
1472 AutoLock
lock(lock_
);
1473 return static_cast<float>(static_cast<double>(logged_events_
->Size()) /
1474 logged_events_
->Capacity());
1477 bool TraceLog::BufferIsFull() const {
1478 AutoLock
lock(lock_
);
1479 return logged_events_
->IsFull();
1482 TraceBuffer
* TraceLog::CreateTraceBuffer() {
1483 Options options
= trace_options();
1484 if (options
& RECORD_CONTINUOUSLY
)
1485 return new TraceBufferRingBuffer(kTraceEventRingBufferChunks
);
1486 else if ((options
& ENABLE_SAMPLING
) && mode_
== MONITORING_MODE
)
1487 return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks
);
1488 else if (options
& ECHO_TO_CONSOLE
)
1489 return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks
);
1490 return new TraceBufferVector();
1493 TraceEvent
* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1494 TraceEventHandle
* handle
, bool check_buffer_is_full
) {
1495 lock_
.AssertAcquired();
1497 if (thread_shared_chunk_
&& thread_shared_chunk_
->IsFull()) {
1498 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1499 thread_shared_chunk_
.Pass());
1502 if (!thread_shared_chunk_
) {
1503 thread_shared_chunk_
= logged_events_
->GetChunk(
1504 &thread_shared_chunk_index_
);
1505 if (check_buffer_is_full
)
1506 CheckIfBufferIsFullWhileLocked();
1508 if (!thread_shared_chunk_
)
1512 TraceEvent
* trace_event
= thread_shared_chunk_
->AddTraceEvent(&event_index
);
1513 if (trace_event
&& handle
) {
1514 MakeHandle(thread_shared_chunk_
->seq(), thread_shared_chunk_index_
,
1515 event_index
, handle
);
1520 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1521 lock_
.AssertAcquired();
1522 if (logged_events_
->IsFull())
1523 SetDisabledWhileLocked();
1526 void TraceLog::SetEventCallbackEnabled(const CategoryFilter
& category_filter
,
1528 AutoLock
lock(lock_
);
1529 subtle::NoBarrier_Store(&event_callback_
,
1530 reinterpret_cast<subtle::AtomicWord
>(cb
));
1531 event_callback_category_filter_
= category_filter
;
1532 UpdateCategoryGroupEnabledFlags();
1535 void TraceLog::SetEventCallbackDisabled() {
1536 AutoLock
lock(lock_
);
1537 subtle::NoBarrier_Store(&event_callback_
, 0);
1538 UpdateCategoryGroupEnabledFlags();
1541 // Flush() works as the following:
1542 // 1. Flush() is called in threadA whose message loop is saved in
1543 // flush_message_loop_proxy_;
1544 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1545 // loop to flush the thread local buffers; otherwise finish the flush;
1546 // 3. FlushCurrentThread() deletes the thread local event buffer:
1547 // - The last batch of events of the thread are flushed into the main buffer;
1548 // - The message loop will be removed from thread_message_loops_;
1549 // If this is the last message loop, finish the flush;
1550 // 4. If any thread hasn't finish its flush in time, finish the flush.
1551 void TraceLog::Flush(const TraceLog::OutputCallback
& cb
) {
1553 // Can't flush when tracing is enabled because otherwise PostTask would
1554 // - generate more trace events;
1555 // - deschedule the calling thread on some platforms causing inaccurate
1556 // timing of the trace events.
1557 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
1559 cb
.Run(empty_result
, false);
1560 LOG(WARNING
) << "Ignored TraceLog::Flush called when tracing is enabled";
1564 int generation
= this->generation();
1566 AutoLock
lock(lock_
);
1567 DCHECK(!flush_message_loop_proxy_
.get());
1568 flush_message_loop_proxy_
= MessageLoopProxy::current();
1569 DCHECK(!thread_message_loops_
.size() || flush_message_loop_proxy_
.get());
1570 flush_output_callback_
= cb
;
1572 if (thread_shared_chunk_
) {
1573 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1574 thread_shared_chunk_
.Pass());
1577 if (thread_message_loops_
.size()) {
1578 for (hash_set
<MessageLoop
*>::const_iterator it
=
1579 thread_message_loops_
.begin();
1580 it
!= thread_message_loops_
.end(); ++it
) {
1583 Bind(&TraceLog::FlushCurrentThread
, Unretained(this), generation
));
1585 flush_message_loop_proxy_
->PostDelayedTask(
1587 Bind(&TraceLog::OnFlushTimeout
, Unretained(this), generation
),
1588 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs
));
1593 FinishFlush(generation
);
1596 void TraceLog::ConvertTraceEventsToTraceFormat(
1597 scoped_ptr
<TraceBuffer
> logged_events
,
1598 const TraceLog::OutputCallback
& flush_output_callback
) {
1600 if (flush_output_callback
.is_null())
1603 // The callback need to be called at least once even if there is no events
1604 // to let the caller know the completion of flush.
1605 bool has_more_events
= true;
1607 scoped_refptr
<RefCountedString
> json_events_str_ptr
=
1608 new RefCountedString();
1610 for (size_t i
= 0; i
< kTraceEventBatchChunks
; ++i
) {
1611 const TraceBufferChunk
* chunk
= logged_events
->NextChunk();
1613 has_more_events
= false;
1616 for (size_t j
= 0; j
< chunk
->size(); ++j
) {
1618 json_events_str_ptr
->data().append(",");
1619 chunk
->GetEventAt(j
)->AppendAsJSON(&(json_events_str_ptr
->data()));
1623 flush_output_callback
.Run(json_events_str_ptr
, has_more_events
);
1624 } while (has_more_events
);
1627 void TraceLog::FinishFlush(int generation
) {
1628 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1629 OutputCallback flush_output_callback
;
1631 if (!CheckGeneration(generation
))
1635 AutoLock
lock(lock_
);
1637 previous_logged_events
.swap(logged_events_
);
1638 UseNextTraceBuffer();
1639 thread_message_loops_
.clear();
1641 flush_message_loop_proxy_
= NULL
;
1642 flush_output_callback
= flush_output_callback_
;
1643 flush_output_callback_
.Reset();
1646 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1647 flush_output_callback
);
1650 // Run in each thread holding a local event buffer.
1651 void TraceLog::FlushCurrentThread(int generation
) {
1653 AutoLock
lock(lock_
);
1654 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
) {
1655 // This is late. The corresponding flush has finished.
1660 // This will flush the thread local buffer.
1661 delete thread_local_event_buffer_
.Get();
1663 AutoLock
lock(lock_
);
1664 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
||
1665 thread_message_loops_
.size())
1668 flush_message_loop_proxy_
->PostTask(
1670 Bind(&TraceLog::FinishFlush
, Unretained(this), generation
));
1673 void TraceLog::OnFlushTimeout(int generation
) {
1675 AutoLock
lock(lock_
);
1676 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
) {
1677 // Flush has finished before timeout.
1682 "The following threads haven't finished flush in time. "
1683 "If this happens stably for some thread, please call "
1684 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1685 "the thread to avoid its trace events from being lost.";
1686 for (hash_set
<MessageLoop
*>::const_iterator it
=
1687 thread_message_loops_
.begin();
1688 it
!= thread_message_loops_
.end(); ++it
) {
1689 LOG(WARNING
) << "Thread: " << (*it
)->thread_name();
1692 FinishFlush(generation
);
1695 void TraceLog::FlushButLeaveBufferIntact(
1696 const TraceLog::OutputCallback
& flush_output_callback
) {
1697 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1699 AutoLock
lock(lock_
);
1700 AddMetadataEventsWhileLocked();
1701 if (thread_shared_chunk_
) {
1702 // Return the chunk to the main buffer to flush the sampling data.
1703 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1704 thread_shared_chunk_
.Pass());
1706 previous_logged_events
= logged_events_
->CloneForIteration().Pass();
1709 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1710 flush_output_callback
);
1713 void TraceLog::UseNextTraceBuffer() {
1714 logged_events_
.reset(CreateTraceBuffer());
1715 subtle::NoBarrier_AtomicIncrement(&generation_
, 1);
1716 thread_shared_chunk_
.reset();
1717 thread_shared_chunk_index_
= 0;
1720 TraceEventHandle
TraceLog::AddTraceEvent(
1722 const unsigned char* category_group_enabled
,
1724 unsigned long long id
,
1726 const char** arg_names
,
1727 const unsigned char* arg_types
,
1728 const unsigned long long* arg_values
,
1729 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1730 unsigned char flags
) {
1731 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1732 base::TimeTicks now
= base::TimeTicks::NowFromSystemTraceTime();
1733 return AddTraceEventWithThreadIdAndTimestamp(phase
, category_group_enabled
,
1734 name
, id
, thread_id
, now
,
1735 num_args
, arg_names
,
1736 arg_types
, arg_values
,
1737 convertable_values
, flags
);
1740 TraceEventHandle
TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1742 const unsigned char* category_group_enabled
,
1744 unsigned long long id
,
1746 const TimeTicks
& timestamp
,
1748 const char** arg_names
,
1749 const unsigned char* arg_types
,
1750 const unsigned long long* arg_values
,
1751 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1752 unsigned char flags
) {
1753 TraceEventHandle handle
= { 0, 0, 0 };
1754 if (!*category_group_enabled
)
1757 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1758 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1759 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1760 if (thread_is_in_trace_event_
.Get())
1763 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
1767 if (flags
& TRACE_EVENT_FLAG_MANGLE_ID
)
1768 id
^= process_id_hash_
;
1770 TimeTicks now
= OffsetTimestamp(timestamp
);
1771 TimeTicks thread_now
= ThreadNow();
1773 ThreadLocalEventBuffer
* thread_local_event_buffer
= NULL
;
1774 // A ThreadLocalEventBuffer needs the message loop
1775 // - to know when the thread exits;
1776 // - to handle the final flush.
1777 // For a thread without a message loop or the message loop may be blocked, the
1778 // trace events will be added into the main buffer directly.
1779 if (!thread_blocks_message_loop_
.Get() && MessageLoop::current()) {
1780 thread_local_event_buffer
= thread_local_event_buffer_
.Get();
1781 if (thread_local_event_buffer
&&
1782 !CheckGeneration(thread_local_event_buffer
->generation())) {
1783 delete thread_local_event_buffer
;
1784 thread_local_event_buffer
= NULL
;
1786 if (!thread_local_event_buffer
) {
1787 thread_local_event_buffer
= new ThreadLocalEventBuffer(this);
1788 thread_local_event_buffer_
.Set(thread_local_event_buffer
);
1792 // Check and update the current thread name only if the event is for the
1793 // current thread to avoid locks in most cases.
1794 if (thread_id
== static_cast<int>(PlatformThread::CurrentId())) {
1795 const char* new_name
= ThreadIdNameManager::GetInstance()->
1797 // Check if the thread name has been set or changed since the previous
1798 // call (if any), but don't bother if the new name is empty. Note this will
1799 // not detect a thread name change within the same char* buffer address: we
1800 // favor common case performance over corner case correctness.
1801 if (new_name
!= g_current_thread_name
.Get().Get() &&
1802 new_name
&& *new_name
) {
1803 g_current_thread_name
.Get().Set(new_name
);
1805 AutoLock
thread_info_lock(thread_info_lock_
);
1807 hash_map
<int, std::string
>::iterator existing_name
=
1808 thread_names_
.find(thread_id
);
1809 if (existing_name
== thread_names_
.end()) {
1810 // This is a new thread id, and a new name.
1811 thread_names_
[thread_id
] = new_name
;
1813 // This is a thread id that we've seen before, but potentially with a
1815 std::vector
<StringPiece
> existing_names
;
1816 Tokenize(existing_name
->second
, ",", &existing_names
);
1817 bool found
= std::find(existing_names
.begin(),
1818 existing_names
.end(),
1819 new_name
) != existing_names
.end();
1821 if (existing_names
.size())
1822 existing_name
->second
.push_back(',');
1823 existing_name
->second
.append(new_name
);
1829 std::string console_message
;
1830 if (*category_group_enabled
&
1831 (ENABLED_FOR_RECORDING
| ENABLED_FOR_MONITORING
)) {
1832 OptionalAutoLock
lock(lock_
);
1834 TraceEvent
* trace_event
= NULL
;
1835 if (thread_local_event_buffer
) {
1836 trace_event
= thread_local_event_buffer
->AddTraceEvent(&handle
);
1838 lock
.EnsureAcquired();
1839 trace_event
= AddEventToThreadSharedChunkWhileLocked(&handle
, true);
1843 trace_event
->Initialize(thread_id
, now
, thread_now
, phase
,
1844 category_group_enabled
, name
, id
,
1845 num_args
, arg_names
, arg_types
, arg_values
,
1846 convertable_values
, flags
);
1848 #if defined(OS_ANDROID)
1849 trace_event
->SendToATrace();
1853 if (trace_options() & ECHO_TO_CONSOLE
) {
1854 console_message
= EventToConsoleMessage(
1855 phase
== TRACE_EVENT_PHASE_COMPLETE
? TRACE_EVENT_PHASE_BEGIN
: phase
,
1856 timestamp
, trace_event
);
1860 if (console_message
.size())
1861 LOG(ERROR
) << console_message
;
1863 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
1864 &watch_category_
)) == category_group_enabled
) {
1865 bool event_name_matches
;
1866 WatchEventCallback watch_event_callback_copy
;
1868 AutoLock
lock(lock_
);
1869 event_name_matches
= watch_event_name_
== name
;
1870 watch_event_callback_copy
= watch_event_callback_
;
1872 if (event_name_matches
) {
1873 if (!watch_event_callback_copy
.is_null())
1874 watch_event_callback_copy
.Run();
1878 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
1879 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
1880 subtle::NoBarrier_Load(&event_callback_
));
1881 if (event_callback
) {
1883 phase
== TRACE_EVENT_PHASE_COMPLETE
?
1884 TRACE_EVENT_PHASE_BEGIN
: phase
,
1885 category_group_enabled
, name
, id
,
1886 num_args
, arg_names
, arg_types
, arg_values
,
1891 if (thread_local_event_buffer
)
1892 thread_local_event_buffer
->ReportOverhead(now
, thread_now
);
1897 // May be called when a COMPELETE event ends and the unfinished event has been
1898 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
1899 std::string
TraceLog::EventToConsoleMessage(unsigned char phase
,
1900 const TimeTicks
& timestamp
,
1901 TraceEvent
* trace_event
) {
1902 AutoLock
thread_info_lock(thread_info_lock_
);
1904 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1905 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1906 DCHECK(phase
!= TRACE_EVENT_PHASE_COMPLETE
);
1909 int thread_id
= trace_event
?
1910 trace_event
->thread_id() : PlatformThread::CurrentId();
1911 if (phase
== TRACE_EVENT_PHASE_END
) {
1912 duration
= timestamp
- thread_event_start_times_
[thread_id
].top();
1913 thread_event_start_times_
[thread_id
].pop();
1916 std::string thread_name
= thread_names_
[thread_id
];
1917 if (thread_colors_
.find(thread_name
) == thread_colors_
.end())
1918 thread_colors_
[thread_name
] = (thread_colors_
.size() % 6) + 1;
1920 std::ostringstream log
;
1921 log
<< base::StringPrintf("%s: \x1b[0;3%dm",
1922 thread_name
.c_str(),
1923 thread_colors_
[thread_name
]);
1926 if (thread_event_start_times_
.find(thread_id
) !=
1927 thread_event_start_times_
.end())
1928 depth
= thread_event_start_times_
[thread_id
].size();
1930 for (size_t i
= 0; i
< depth
; ++i
)
1934 trace_event
->AppendPrettyPrinted(&log
);
1935 if (phase
== TRACE_EVENT_PHASE_END
)
1936 log
<< base::StringPrintf(" (%.3f ms)", duration
.InMillisecondsF());
1940 if (phase
== TRACE_EVENT_PHASE_BEGIN
)
1941 thread_event_start_times_
[thread_id
].push(timestamp
);
1946 void TraceLog::AddTraceEventEtw(char phase
,
1949 const char* extra
) {
1951 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
1953 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
1954 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
1957 void TraceLog::AddTraceEventEtw(char phase
,
1960 const std::string
& extra
) {
1962 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
1964 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
1965 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
1968 void TraceLog::UpdateTraceEventDuration(
1969 const unsigned char* category_group_enabled
,
1971 TraceEventHandle handle
) {
1972 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1973 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1974 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1975 if (thread_is_in_trace_event_
.Get())
1978 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
1980 TimeTicks thread_now
= ThreadNow();
1981 TimeTicks now
= OffsetNow();
1983 std::string console_message
;
1984 if (*category_group_enabled
& ENABLED_FOR_RECORDING
) {
1985 OptionalAutoLock
lock(lock_
);
1987 TraceEvent
* trace_event
= GetEventByHandleInternal(handle
, &lock
);
1989 DCHECK(trace_event
->phase() == TRACE_EVENT_PHASE_COMPLETE
);
1990 trace_event
->UpdateDuration(now
, thread_now
);
1991 #if defined(OS_ANDROID)
1992 trace_event
->SendToATrace();
1996 if (trace_options() & ECHO_TO_CONSOLE
) {
1997 console_message
= EventToConsoleMessage(TRACE_EVENT_PHASE_END
,
2002 if (console_message
.size())
2003 LOG(ERROR
) << console_message
;
2005 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
2006 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
2007 subtle::NoBarrier_Load(&event_callback_
));
2008 if (event_callback
) {
2009 event_callback(now
, TRACE_EVENT_PHASE_END
, category_group_enabled
, name
,
2010 trace_event_internal::kNoEventId
, 0, NULL
, NULL
, NULL
,
2011 TRACE_EVENT_FLAG_NONE
);
2016 void TraceLog::SetWatchEvent(const std::string
& category_name
,
2017 const std::string
& event_name
,
2018 const WatchEventCallback
& callback
) {
2019 const unsigned char* category
= GetCategoryGroupEnabled(
2020 category_name
.c_str());
2021 AutoLock
lock(lock_
);
2022 subtle::NoBarrier_Store(&watch_category_
,
2023 reinterpret_cast<subtle::AtomicWord
>(category
));
2024 watch_event_name_
= event_name
;
2025 watch_event_callback_
= callback
;
2028 void TraceLog::CancelWatchEvent() {
2029 AutoLock
lock(lock_
);
2030 subtle::NoBarrier_Store(&watch_category_
, 0);
2031 watch_event_name_
= "";
2032 watch_event_callback_
.Reset();
2035 void TraceLog::AddMetadataEventsWhileLocked() {
2036 lock_
.AssertAcquired();
2038 int current_thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
2039 if (process_sort_index_
!= 0) {
2040 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2042 "process_sort_index", "sort_index",
2043 process_sort_index_
);
2046 if (process_name_
.size()) {
2047 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2049 "process_name", "name",
2053 if (process_labels_
.size() > 0) {
2054 std::vector
<std::string
> labels
;
2055 for(base::hash_map
<int, std::string
>::iterator it
= process_labels_
.begin();
2056 it
!= process_labels_
.end();
2058 labels
.push_back(it
->second
);
2060 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2062 "process_labels", "labels",
2063 JoinString(labels
, ','));
2066 // Thread sort indices.
2067 for(hash_map
<int, int>::iterator it
= thread_sort_indices_
.begin();
2068 it
!= thread_sort_indices_
.end();
2070 if (it
->second
== 0)
2072 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2074 "thread_sort_index", "sort_index",
2079 AutoLock
thread_info_lock(thread_info_lock_
);
2080 for(hash_map
<int, std::string
>::iterator it
= thread_names_
.begin();
2081 it
!= thread_names_
.end();
2083 if (it
->second
.empty())
2085 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2087 "thread_name", "name",
2092 void TraceLog::WaitSamplingEventForTesting() {
2093 if (!sampling_thread_
)
2095 sampling_thread_
->WaitSamplingEventForTesting();
2098 void TraceLog::DeleteForTesting() {
2099 DeleteTraceLogForTesting::Delete();
2102 TraceEvent
* TraceLog::GetEventByHandle(TraceEventHandle handle
) {
2103 return GetEventByHandleInternal(handle
, NULL
);
2106 TraceEvent
* TraceLog::GetEventByHandleInternal(TraceEventHandle handle
,
2107 OptionalAutoLock
* lock
) {
2108 if (!handle
.chunk_seq
)
2111 if (thread_local_event_buffer_
.Get()) {
2112 TraceEvent
* trace_event
=
2113 thread_local_event_buffer_
.Get()->GetEventByHandle(handle
);
2118 // The event has been out-of-control of the thread local buffer.
2119 // Try to get the event from the main buffer with a lock.
2121 lock
->EnsureAcquired();
2123 if (thread_shared_chunk_
&&
2124 handle
.chunk_index
== thread_shared_chunk_index_
) {
2125 return handle
.chunk_seq
== thread_shared_chunk_
->seq() ?
2126 thread_shared_chunk_
->GetEventAt(handle
.event_index
) : NULL
;
2129 return logged_events_
->GetEventByHandle(handle
);
2132 void TraceLog::SetProcessID(int process_id
) {
2133 process_id_
= process_id
;
2134 // Create a FNV hash from the process ID for XORing.
2135 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2136 unsigned long long offset_basis
= 14695981039346656037ull;
2137 unsigned long long fnv_prime
= 1099511628211ull;
2138 unsigned long long pid
= static_cast<unsigned long long>(process_id_
);
2139 process_id_hash_
= (offset_basis
^ pid
) * fnv_prime
;
2142 void TraceLog::SetProcessSortIndex(int sort_index
) {
2143 AutoLock
lock(lock_
);
2144 process_sort_index_
= sort_index
;
2147 void TraceLog::SetProcessName(const std::string
& process_name
) {
2148 AutoLock
lock(lock_
);
2149 process_name_
= process_name
;
2152 void TraceLog::UpdateProcessLabel(
2153 int label_id
, const std::string
& current_label
) {
2154 if(!current_label
.length())
2155 return RemoveProcessLabel(label_id
);
2157 AutoLock
lock(lock_
);
2158 process_labels_
[label_id
] = current_label
;
2161 void TraceLog::RemoveProcessLabel(int label_id
) {
2162 AutoLock
lock(lock_
);
2163 base::hash_map
<int, std::string
>::iterator it
= process_labels_
.find(
2165 if (it
== process_labels_
.end())
2168 process_labels_
.erase(it
);
2171 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id
, int sort_index
) {
2172 AutoLock
lock(lock_
);
2173 thread_sort_indices_
[static_cast<int>(thread_id
)] = sort_index
;
2176 void TraceLog::SetTimeOffset(TimeDelta offset
) {
2177 time_offset_
= offset
;
2180 size_t TraceLog::GetObserverCountForTest() const {
2181 return enabled_state_observer_list_
.size();
2184 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2185 thread_blocks_message_loop_
.Set(true);
2186 if (thread_local_event_buffer_
.Get()) {
2187 // This will flush the thread local buffer.
2188 delete thread_local_event_buffer_
.Get();
2192 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2193 const std::string
& str
) {
2194 return str
.empty() ||
2196 str
.at(str
.length() - 1) == ' ';
2199 bool CategoryFilter::DoesCategoryGroupContainCategory(
2200 const char* category_group
,
2201 const char* category
) const {
2203 CStringTokenizer
category_group_tokens(category_group
,
2204 category_group
+ strlen(category_group
), ",");
2205 while (category_group_tokens
.GetNext()) {
2206 std::string category_group_token
= category_group_tokens
.token();
2207 // Don't allow empty tokens, nor tokens with leading or trailing space.
2208 DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2209 category_group_token
))
2210 << "Disallowed category string";
2211 if (MatchPattern(category_group_token
.c_str(), category
))
2217 CategoryFilter::CategoryFilter(const std::string
& filter_string
) {
2218 if (!filter_string
.empty())
2219 Initialize(filter_string
);
2221 Initialize(CategoryFilter::kDefaultCategoryFilterString
);
2224 CategoryFilter::CategoryFilter(const CategoryFilter
& cf
)
2225 : included_(cf
.included_
),
2226 disabled_(cf
.disabled_
),
2227 excluded_(cf
.excluded_
),
2228 delays_(cf
.delays_
) {
2231 CategoryFilter::~CategoryFilter() {
2234 CategoryFilter
& CategoryFilter::operator=(const CategoryFilter
& rhs
) {
2238 included_
= rhs
.included_
;
2239 disabled_
= rhs
.disabled_
;
2240 excluded_
= rhs
.excluded_
;
2241 delays_
= rhs
.delays_
;
2245 void CategoryFilter::Initialize(const std::string
& filter_string
) {
2246 // Tokenize list of categories, delimited by ','.
2247 StringTokenizer
tokens(filter_string
, ",");
2248 // Add each token to the appropriate list (included_,excluded_).
2249 while (tokens
.GetNext()) {
2250 std::string category
= tokens
.token();
2251 // Ignore empty categories.
2252 if (category
.empty())
2254 // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
2255 if (category
.find(kSyntheticDelayCategoryFilterPrefix
) == 0 &&
2256 category
.at(category
.size() - 1) == ')') {
2257 category
= category
.substr(
2258 strlen(kSyntheticDelayCategoryFilterPrefix
),
2259 category
.size() - strlen(kSyntheticDelayCategoryFilterPrefix
) - 1);
2260 size_t name_length
= category
.find(';');
2261 if (name_length
!= std::string::npos
&& name_length
> 0 &&
2262 name_length
!= category
.size() - 1) {
2263 delays_
.push_back(category
);
2265 } else if (category
.at(0) == '-') {
2266 // Excluded categories start with '-'.
2267 // Remove '-' from category string.
2268 category
= category
.substr(1);
2269 excluded_
.push_back(category
);
2270 } else if (category
.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
2271 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
2272 disabled_
.push_back(category
);
2274 included_
.push_back(category
);
2279 void CategoryFilter::WriteString(const StringList
& values
,
2281 bool included
) const {
2282 bool prepend_comma
= !out
->empty();
2284 for (StringList::const_iterator ci
= values
.begin();
2285 ci
!= values
.end(); ++ci
) {
2286 if (token_cnt
> 0 || prepend_comma
)
2287 StringAppendF(out
, ",");
2288 StringAppendF(out
, "%s%s", (included
? "" : "-"), ci
->c_str());
2293 void CategoryFilter::WriteString(const StringList
& delays
,
2294 std::string
* out
) const {
2295 bool prepend_comma
= !out
->empty();
2297 for (StringList::const_iterator ci
= delays
.begin();
2298 ci
!= delays
.end(); ++ci
) {
2299 if (token_cnt
> 0 || prepend_comma
)
2300 StringAppendF(out
, ",");
2301 StringAppendF(out
, "%s%s)", kSyntheticDelayCategoryFilterPrefix
,
2307 std::string
CategoryFilter::ToString() const {
2308 std::string filter_string
;
2309 WriteString(included_
, &filter_string
, true);
2310 WriteString(disabled_
, &filter_string
, true);
2311 WriteString(excluded_
, &filter_string
, false);
2312 WriteString(delays_
, &filter_string
);
2313 return filter_string
;
2316 bool CategoryFilter::IsCategoryGroupEnabled(
2317 const char* category_group_name
) const {
2318 // TraceLog should call this method only as part of enabling/disabling
2320 StringList::const_iterator ci
;
2322 // Check the disabled- filters and the disabled-* wildcard first so that a
2323 // "*" filter does not include the disabled.
2324 for (ci
= disabled_
.begin(); ci
!= disabled_
.end(); ++ci
) {
2325 if (DoesCategoryGroupContainCategory(category_group_name
, ci
->c_str()))
2328 if (DoesCategoryGroupContainCategory(category_group_name
,
2329 TRACE_DISABLED_BY_DEFAULT("*")))
2332 for (ci
= included_
.begin(); ci
!= included_
.end(); ++ci
) {
2333 if (DoesCategoryGroupContainCategory(category_group_name
, ci
->c_str()))
2337 for (ci
= excluded_
.begin(); ci
!= excluded_
.end(); ++ci
) {
2338 if (DoesCategoryGroupContainCategory(category_group_name
, ci
->c_str()))
2341 // If the category group is not excluded, and there are no included patterns
2342 // we consider this pattern enabled.
2343 return included_
.empty();
2346 bool CategoryFilter::HasIncludedPatterns() const {
2347 return !included_
.empty();
2350 void CategoryFilter::Merge(const CategoryFilter
& nested_filter
) {
2351 // Keep included patterns only if both filters have an included entry.
2352 // Otherwise, one of the filter was specifying "*" and we want to honour the
2354 if (HasIncludedPatterns() && nested_filter
.HasIncludedPatterns()) {
2355 included_
.insert(included_
.end(),
2356 nested_filter
.included_
.begin(),
2357 nested_filter
.included_
.end());
2362 disabled_
.insert(disabled_
.end(),
2363 nested_filter
.disabled_
.begin(),
2364 nested_filter
.disabled_
.end());
2365 excluded_
.insert(excluded_
.end(),
2366 nested_filter
.excluded_
.begin(),
2367 nested_filter
.excluded_
.end());
2368 delays_
.insert(delays_
.end(),
2369 nested_filter
.delays_
.begin(),
2370 nested_filter
.delays_
.end());
2373 void CategoryFilter::Clear() {
2379 const CategoryFilter::StringList
&
2380 CategoryFilter::GetSyntheticDelayValues() const {
2384 } // namespace debug
2387 namespace trace_event_internal
{
2389 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2390 const char* category_group
, const char* name
) {
2391 // The single atom works because for now the category_group can only be "gpu".
2392 DCHECK(strcmp(category_group
, "gpu") == 0);
2393 static TRACE_EVENT_API_ATOMIC_WORD atomic
= 0;
2394 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2395 category_group
, atomic
, category_group_enabled_
);
2397 if (*category_group_enabled_
) {
2399 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2400 TRACE_EVENT_PHASE_COMPLETE
, category_group_enabled_
, name
,
2401 trace_event_internal::kNoEventId
,
2402 static_cast<int>(base::PlatformThread::CurrentId()),
2403 base::TimeTicks::NowFromSystemTraceTime(),
2404 0, NULL
, NULL
, NULL
, NULL
, TRACE_EVENT_FLAG_NONE
);
2408 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2409 if (*category_group_enabled_
) {
2410 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_
,
2411 name_
, event_handle_
);
2415 } // namespace trace_event_internal