1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/debug/trace_event_impl.h"
9 #include "base/base_switches.h"
10 #include "base/bind.h"
11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/trace_event.h"
14 #include "base/debug/trace_event_synthetic_delay.h"
15 #include "base/float_util.h"
16 #include "base/format_macros.h"
17 #include "base/json/string_escape.h"
18 #include "base/lazy_instance.h"
19 #include "base/memory/singleton.h"
20 #include "base/message_loop/message_loop.h"
21 #include "base/process/process_metrics.h"
22 #include "base/stl_util.h"
23 #include "base/strings/string_number_conversions.h"
24 #include "base/strings/string_split.h"
25 #include "base/strings/string_tokenizer.h"
26 #include "base/strings/string_util.h"
27 #include "base/strings/stringprintf.h"
28 #include "base/strings/utf_string_conversions.h"
29 #include "base/synchronization/cancellation_flag.h"
30 #include "base/synchronization/waitable_event.h"
31 #include "base/sys_info.h"
32 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
33 #include "base/threading/platform_thread.h"
34 #include "base/threading/thread_id_name_manager.h"
35 #include "base/time/time.h"
38 #include "base/debug/trace_event_win.h"
41 class DeleteTraceLogForTesting
{
43 static void Delete() {
44 Singleton
<base::debug::TraceLog
,
45 LeakySingletonTraits
<base::debug::TraceLog
> >::OnExit(0);
49 // The thread buckets for the sampling profiler.
50 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state
[3];
57 // The overhead of TraceEvent above this threshold will be reported in the
59 const int kOverheadReportThresholdInMicroseconds
= 50;
61 // Controls the number of trace events we will buffer in-memory
62 // before throwing them away.
63 const size_t kTraceBufferChunkSize
= TraceBufferChunk::kTraceBufferChunkSize
;
64 const size_t kTraceEventVectorBufferChunks
= 256000 / kTraceBufferChunkSize
;
65 const size_t kTraceEventRingBufferChunks
= kTraceEventVectorBufferChunks
/ 4;
66 const size_t kTraceEventBatchChunks
= 1000 / kTraceBufferChunkSize
;
67 // Can store results for 30 seconds with 1 ms sampling interval.
68 const size_t kMonitorTraceEventBufferChunks
= 30000 / kTraceBufferChunkSize
;
69 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
70 const size_t kEchoToConsoleTraceEventBufferChunks
= 256;
72 const int kThreadFlushTimeoutMs
= 3000;
75 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
76 const char kEchoToConsoleCategoryFilter
[] = "-ipc,-task";
79 const char kSyntheticDelayCategoryFilterPrefix
[] = "DELAY(";
81 #define MAX_CATEGORY_GROUPS 100
83 // Parallel arrays g_category_groups and g_category_group_enabled are separate
84 // so that a pointer to a member of g_category_group_enabled can be easily
85 // converted to an index into g_category_groups. This allows macros to deal
86 // only with char enabled pointers from g_category_group_enabled, and we can
87 // convert internally to determine the category name from the char enabled
89 const char* g_category_groups
[MAX_CATEGORY_GROUPS
] = {
91 "tracing already shutdown",
92 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
94 // For reporting trace_event overhead. For thread local event buffers only.
95 "trace_event_overhead"};
97 // The enabled flag is char instead of bool so that the API can be used from C.
98 unsigned char g_category_group_enabled
[MAX_CATEGORY_GROUPS
] = { 0 };
99 // Indexes here have to match the g_category_groups array indexes above.
100 const int g_category_already_shutdown
= 1;
101 const int g_category_categories_exhausted
= 2;
102 const int g_category_metadata
= 3;
103 const int g_category_trace_event_overhead
= 4;
104 const int g_num_builtin_categories
= 5;
105 // Skip default categories.
106 base::subtle::AtomicWord g_category_index
= g_num_builtin_categories
;
108 // The name of the current thread. This is used to decide if the current
109 // thread name has changed. We combine all the seen thread names into the
110 // output name for the thread.
111 LazyInstance
<ThreadLocalPointer
<const char> >::Leaky
112 g_current_thread_name
= LAZY_INSTANCE_INITIALIZER
;
114 TimeTicks
ThreadNow() {
115 return TimeTicks::IsThreadNowSupported() ?
116 TimeTicks::ThreadNow() : TimeTicks();
119 class TraceBufferRingBuffer
: public TraceBuffer
{
121 TraceBufferRingBuffer(size_t max_chunks
)
122 : max_chunks_(max_chunks
),
123 recyclable_chunks_queue_(new size_t[queue_capacity()]),
125 queue_tail_(max_chunks
),
126 current_iteration_index_(0),
127 current_chunk_seq_(1) {
128 chunks_
.reserve(max_chunks
);
129 for (size_t i
= 0; i
< max_chunks
; ++i
)
130 recyclable_chunks_queue_
[i
] = i
;
133 virtual scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) OVERRIDE
{
134 // Because the number of threads is much less than the number of chunks,
135 // the queue should never be empty.
136 DCHECK(!QueueIsEmpty());
138 *index
= recyclable_chunks_queue_
[queue_head_
];
139 queue_head_
= NextQueueIndex(queue_head_
);
140 current_iteration_index_
= queue_head_
;
142 if (*index
>= chunks_
.size())
143 chunks_
.resize(*index
+ 1);
145 TraceBufferChunk
* chunk
= chunks_
[*index
];
146 chunks_
[*index
] = NULL
; // Put NULL in the slot of a in-flight chunk.
148 chunk
->Reset(current_chunk_seq_
++);
150 chunk
= new TraceBufferChunk(current_chunk_seq_
++);
152 return scoped_ptr
<TraceBufferChunk
>(chunk
);
155 virtual void ReturnChunk(size_t index
,
156 scoped_ptr
<TraceBufferChunk
> chunk
) OVERRIDE
{
157 // When this method is called, the queue should not be full because it
158 // can contain all chunks including the one to be returned.
159 DCHECK(!QueueIsFull());
161 DCHECK_LT(index
, chunks_
.size());
162 DCHECK(!chunks_
[index
]);
163 chunks_
[index
] = chunk
.release();
164 recyclable_chunks_queue_
[queue_tail_
] = index
;
165 queue_tail_
= NextQueueIndex(queue_tail_
);
168 virtual bool IsFull() const OVERRIDE
{
172 virtual size_t Size() const OVERRIDE
{
173 // This is approximate because not all of the chunks are full.
174 return chunks_
.size() * kTraceBufferChunkSize
;
177 virtual size_t Capacity() const OVERRIDE
{
178 return max_chunks_
* kTraceBufferChunkSize
;
181 virtual TraceEvent
* GetEventByHandle(TraceEventHandle handle
) OVERRIDE
{
182 if (handle
.chunk_index
>= chunks_
.size())
184 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
185 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
187 return chunk
->GetEventAt(handle
.event_index
);
190 virtual const TraceBufferChunk
* NextChunk() OVERRIDE
{
194 while (current_iteration_index_
!= queue_tail_
) {
195 size_t chunk_index
= recyclable_chunks_queue_
[current_iteration_index_
];
196 current_iteration_index_
= NextQueueIndex(current_iteration_index_
);
197 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
199 DCHECK(chunks_
[chunk_index
]);
200 return chunks_
[chunk_index
];
205 virtual scoped_ptr
<TraceBuffer
> CloneForIteration() const OVERRIDE
{
206 scoped_ptr
<ClonedTraceBuffer
> cloned_buffer(new ClonedTraceBuffer());
207 for (size_t queue_index
= queue_head_
; queue_index
!= queue_tail_
;
208 queue_index
= NextQueueIndex(queue_index
)) {
209 size_t chunk_index
= recyclable_chunks_queue_
[queue_index
];
210 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
212 TraceBufferChunk
* chunk
= chunks_
[chunk_index
];
213 cloned_buffer
->chunks_
.push_back(chunk
? chunk
->Clone().release() : NULL
);
215 return cloned_buffer
.PassAs
<TraceBuffer
>();
219 class ClonedTraceBuffer
: public TraceBuffer
{
221 ClonedTraceBuffer() : current_iteration_index_(0) {}
223 // The only implemented method.
224 virtual const TraceBufferChunk
* NextChunk() OVERRIDE
{
225 return current_iteration_index_
< chunks_
.size() ?
226 chunks_
[current_iteration_index_
++] : NULL
;
229 virtual scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) OVERRIDE
{
231 return scoped_ptr
<TraceBufferChunk
>();
233 virtual void ReturnChunk(size_t index
,
234 scoped_ptr
<TraceBufferChunk
>) OVERRIDE
{
237 virtual bool IsFull() const OVERRIDE
{ return false; }
238 virtual size_t Size() const OVERRIDE
{ return 0; }
239 virtual size_t Capacity() const OVERRIDE
{ return 0; }
240 virtual TraceEvent
* GetEventByHandle(TraceEventHandle handle
) OVERRIDE
{
243 virtual scoped_ptr
<TraceBuffer
> CloneForIteration() const OVERRIDE
{
245 return scoped_ptr
<TraceBuffer
>();
248 size_t current_iteration_index_
;
249 ScopedVector
<TraceBufferChunk
> chunks_
;
252 bool QueueIsEmpty() const {
253 return queue_head_
== queue_tail_
;
256 size_t QueueSize() const {
257 return queue_tail_
> queue_head_
? queue_tail_
- queue_head_
:
258 queue_tail_
+ queue_capacity() - queue_head_
;
261 bool QueueIsFull() const {
262 return QueueSize() == queue_capacity() - 1;
265 size_t queue_capacity() const {
266 // One extra space to help distinguish full state and empty state.
267 return max_chunks_
+ 1;
270 size_t NextQueueIndex(size_t index
) const {
272 if (index
>= queue_capacity())
278 ScopedVector
<TraceBufferChunk
> chunks_
;
280 scoped_ptr
<size_t[]> recyclable_chunks_queue_
;
284 size_t current_iteration_index_
;
285 uint32 current_chunk_seq_
;
287 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer
);
290 class TraceBufferVector
: public TraceBuffer
{
293 : in_flight_chunk_count_(0),
294 current_iteration_index_(0) {
295 chunks_
.reserve(kTraceEventVectorBufferChunks
);
298 virtual scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) OVERRIDE
{
299 // This function may be called when adding normal events or indirectly from
300 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
301 // have to add the metadata events and flush thread-local buffers even if
302 // the buffer is full.
303 *index
= chunks_
.size();
304 chunks_
.push_back(NULL
); // Put NULL in the slot of a in-flight chunk.
305 ++in_flight_chunk_count_
;
306 // + 1 because zero chunk_seq is not allowed.
307 return scoped_ptr
<TraceBufferChunk
>(
308 new TraceBufferChunk(static_cast<uint32
>(*index
) + 1));
311 virtual void ReturnChunk(size_t index
,
312 scoped_ptr
<TraceBufferChunk
> chunk
) OVERRIDE
{
313 DCHECK_GT(in_flight_chunk_count_
, 0u);
314 DCHECK_LT(index
, chunks_
.size());
315 DCHECK(!chunks_
[index
]);
316 --in_flight_chunk_count_
;
317 chunks_
[index
] = chunk
.release();
320 virtual bool IsFull() const OVERRIDE
{
321 return chunks_
.size() >= kTraceEventVectorBufferChunks
;
324 virtual size_t Size() const OVERRIDE
{
325 // This is approximate because not all of the chunks are full.
326 return chunks_
.size() * kTraceBufferChunkSize
;
329 virtual size_t Capacity() const OVERRIDE
{
330 return kTraceEventVectorBufferChunks
* kTraceBufferChunkSize
;
333 virtual TraceEvent
* GetEventByHandle(TraceEventHandle handle
) OVERRIDE
{
334 if (handle
.chunk_index
>= chunks_
.size())
336 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
337 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
339 return chunk
->GetEventAt(handle
.event_index
);
342 virtual const TraceBufferChunk
* NextChunk() OVERRIDE
{
343 while (current_iteration_index_
< chunks_
.size()) {
344 // Skip in-flight chunks.
345 const TraceBufferChunk
* chunk
= chunks_
[current_iteration_index_
++];
352 virtual scoped_ptr
<TraceBuffer
> CloneForIteration() const OVERRIDE
{
354 return scoped_ptr
<TraceBuffer
>();
358 size_t in_flight_chunk_count_
;
359 size_t current_iteration_index_
;
360 ScopedVector
<TraceBufferChunk
> chunks_
;
362 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector
);
365 template <typename T
>
366 void InitializeMetadataEvent(TraceEvent
* trace_event
,
368 const char* metadata_name
, const char* arg_name
,
374 unsigned char arg_type
;
375 unsigned long long arg_value
;
376 ::trace_event_internal::SetTraceValue(value
, &arg_type
, &arg_value
);
377 trace_event
->Initialize(thread_id
,
378 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA
,
379 &g_category_group_enabled
[g_category_metadata
],
380 metadata_name
, ::trace_event_internal::kNoEventId
,
381 num_args
, &arg_name
, &arg_type
, &arg_value
, NULL
,
382 TRACE_EVENT_FLAG_NONE
);
385 class AutoThreadLocalBoolean
{
387 explicit AutoThreadLocalBoolean(ThreadLocalBoolean
* thread_local_boolean
)
388 : thread_local_boolean_(thread_local_boolean
) {
389 DCHECK(!thread_local_boolean_
->Get());
390 thread_local_boolean_
->Set(true);
392 ~AutoThreadLocalBoolean() {
393 thread_local_boolean_
->Set(false);
397 ThreadLocalBoolean
* thread_local_boolean_
;
398 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean
);
403 void TraceBufferChunk::Reset(uint32 new_seq
) {
404 for (size_t i
= 0; i
< next_free_
; ++i
)
410 TraceEvent
* TraceBufferChunk::AddTraceEvent(size_t* event_index
) {
412 *event_index
= next_free_
++;
413 return &chunk_
[*event_index
];
416 scoped_ptr
<TraceBufferChunk
> TraceBufferChunk::Clone() const {
417 scoped_ptr
<TraceBufferChunk
> cloned_chunk(new TraceBufferChunk(seq_
));
418 cloned_chunk
->next_free_
= next_free_
;
419 for (size_t i
= 0; i
< next_free_
; ++i
)
420 cloned_chunk
->chunk_
[i
].CopyFrom(chunk_
[i
]);
421 return cloned_chunk
.Pass();
424 // A helper class that allows the lock to be acquired in the middle of the scope
425 // and unlocks at the end of scope if locked.
426 class TraceLog::OptionalAutoLock
{
428 explicit OptionalAutoLock(Lock
& lock
)
433 ~OptionalAutoLock() {
438 void EnsureAcquired() {
448 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock
);
451 // Use this function instead of TraceEventHandle constructor to keep the
452 // overhead of ScopedTracer (trace_event.h) constructor minimum.
453 void MakeHandle(uint32 chunk_seq
, size_t chunk_index
, size_t event_index
,
454 TraceEventHandle
* handle
) {
456 DCHECK(chunk_index
< (1u << 16));
457 DCHECK(event_index
< (1u << 16));
458 handle
->chunk_seq
= chunk_seq
;
459 handle
->chunk_index
= static_cast<uint16
>(chunk_index
);
460 handle
->event_index
= static_cast<uint16
>(event_index
);
463 ////////////////////////////////////////////////////////////////////////////////
467 ////////////////////////////////////////////////////////////////////////////////
471 size_t GetAllocLength(const char* str
) { return str
? strlen(str
) + 1 : 0; }
473 // Copies |*member| into |*buffer|, sets |*member| to point to this new
474 // location, and then advances |*buffer| by the amount written.
475 void CopyTraceEventParameter(char** buffer
,
479 size_t written
= strlcpy(*buffer
, *member
, end
- *buffer
) + 1;
480 DCHECK_LE(static_cast<int>(written
), end
- *buffer
);
488 TraceEvent::TraceEvent()
489 : duration_(TimeDelta::FromInternalValue(-1)),
491 category_group_enabled_(NULL
),
494 phase_(TRACE_EVENT_PHASE_BEGIN
),
496 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
497 arg_names_
[i
] = NULL
;
498 memset(arg_values_
, 0, sizeof(arg_values_
));
501 TraceEvent::~TraceEvent() {
504 void TraceEvent::CopyFrom(const TraceEvent
& other
) {
505 timestamp_
= other
.timestamp_
;
506 thread_timestamp_
= other
.thread_timestamp_
;
507 duration_
= other
.duration_
;
509 category_group_enabled_
= other
.category_group_enabled_
;
511 thread_id_
= other
.thread_id_
;
512 phase_
= other
.phase_
;
513 flags_
= other
.flags_
;
514 parameter_copy_storage_
= other
.parameter_copy_storage_
;
516 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
) {
517 arg_names_
[i
] = other
.arg_names_
[i
];
518 arg_types_
[i
] = other
.arg_types_
[i
];
519 arg_values_
[i
] = other
.arg_values_
[i
];
520 convertable_values_
[i
] = other
.convertable_values_
[i
];
524 void TraceEvent::Initialize(
527 TimeTicks thread_timestamp
,
529 const unsigned char* category_group_enabled
,
531 unsigned long long id
,
533 const char** arg_names
,
534 const unsigned char* arg_types
,
535 const unsigned long long* arg_values
,
536 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
537 unsigned char flags
) {
538 timestamp_
= timestamp
;
539 thread_timestamp_
= thread_timestamp
;
540 duration_
= TimeDelta::FromInternalValue(-1);
542 category_group_enabled_
= category_group_enabled
;
544 thread_id_
= thread_id
;
548 // Clamp num_args since it may have been set by a third_party library.
549 num_args
= (num_args
> kTraceMaxNumArgs
) ? kTraceMaxNumArgs
: num_args
;
551 for (; i
< num_args
; ++i
) {
552 arg_names_
[i
] = arg_names
[i
];
553 arg_types_
[i
] = arg_types
[i
];
555 if (arg_types
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
556 convertable_values_
[i
] = convertable_values
[i
];
558 arg_values_
[i
].as_uint
= arg_values
[i
];
560 for (; i
< kTraceMaxNumArgs
; ++i
) {
561 arg_names_
[i
] = NULL
;
562 arg_values_
[i
].as_uint
= 0u;
563 convertable_values_
[i
] = NULL
;
564 arg_types_
[i
] = TRACE_VALUE_TYPE_UINT
;
567 bool copy
= !!(flags
& TRACE_EVENT_FLAG_COPY
);
568 size_t alloc_size
= 0;
570 alloc_size
+= GetAllocLength(name
);
571 for (i
= 0; i
< num_args
; ++i
) {
572 alloc_size
+= GetAllocLength(arg_names_
[i
]);
573 if (arg_types_
[i
] == TRACE_VALUE_TYPE_STRING
)
574 arg_types_
[i
] = TRACE_VALUE_TYPE_COPY_STRING
;
578 bool arg_is_copy
[kTraceMaxNumArgs
];
579 for (i
= 0; i
< num_args
; ++i
) {
580 // No copying of convertable types, we retain ownership.
581 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
584 // We only take a copy of arg_vals if they are of type COPY_STRING.
585 arg_is_copy
[i
] = (arg_types_
[i
] == TRACE_VALUE_TYPE_COPY_STRING
);
587 alloc_size
+= GetAllocLength(arg_values_
[i
].as_string
);
591 parameter_copy_storage_
= new RefCountedString
;
592 parameter_copy_storage_
->data().resize(alloc_size
);
593 char* ptr
= string_as_array(¶meter_copy_storage_
->data());
594 const char* end
= ptr
+ alloc_size
;
596 CopyTraceEventParameter(&ptr
, &name_
, end
);
597 for (i
= 0; i
< num_args
; ++i
) {
598 CopyTraceEventParameter(&ptr
, &arg_names_
[i
], end
);
601 for (i
= 0; i
< num_args
; ++i
) {
602 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
605 CopyTraceEventParameter(&ptr
, &arg_values_
[i
].as_string
, end
);
607 DCHECK_EQ(end
, ptr
) << "Overrun by " << ptr
- end
;
611 void TraceEvent::Reset() {
612 // Only reset fields that won't be initialized in Initialize(), or that may
613 // hold references to other objects.
614 duration_
= TimeDelta::FromInternalValue(-1);
615 parameter_copy_storage_
= NULL
;
616 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
617 convertable_values_
[i
] = NULL
;
620 void TraceEvent::UpdateDuration(const TimeTicks
& now
,
621 const TimeTicks
& thread_now
) {
622 DCHECK(duration_
.ToInternalValue() == -1);
623 duration_
= now
- timestamp_
;
624 thread_duration_
= thread_now
- thread_timestamp_
;
628 void TraceEvent::AppendValueAsJSON(unsigned char type
,
629 TraceEvent::TraceValue value
,
632 case TRACE_VALUE_TYPE_BOOL
:
633 *out
+= value
.as_bool
? "true" : "false";
635 case TRACE_VALUE_TYPE_UINT
:
636 StringAppendF(out
, "%" PRIu64
, static_cast<uint64
>(value
.as_uint
));
638 case TRACE_VALUE_TYPE_INT
:
639 StringAppendF(out
, "%" PRId64
, static_cast<int64
>(value
.as_int
));
641 case TRACE_VALUE_TYPE_DOUBLE
: {
642 // FIXME: base/json/json_writer.cc is using the same code,
643 // should be made into a common method.
645 double val
= value
.as_double
;
647 real
= DoubleToString(val
);
648 // Ensure that the number has a .0 if there's no decimal or 'e'. This
649 // makes sure that when we read the JSON back, it's interpreted as a
650 // real rather than an int.
651 if (real
.find('.') == std::string::npos
&&
652 real
.find('e') == std::string::npos
&&
653 real
.find('E') == std::string::npos
) {
656 // The JSON spec requires that non-integer values in the range (-1,1)
657 // have a zero before the decimal point - ".52" is not valid, "0.52" is.
658 if (real
[0] == '.') {
660 } else if (real
.length() > 1 && real
[0] == '-' && real
[1] == '.') {
661 // "-.1" bad "-0.1" good
664 } else if (IsNaN(val
)){
665 // The JSON spec doesn't allow NaN and Infinity (since these are
666 // objects in EcmaScript). Use strings instead.
668 } else if (val
< 0) {
669 real
= "\"-Infinity\"";
671 real
= "\"Infinity\"";
673 StringAppendF(out
, "%s", real
.c_str());
676 case TRACE_VALUE_TYPE_POINTER
:
677 // JSON only supports double and int numbers.
678 // So as not to lose bits from a 64-bit pointer, output as a hex string.
679 StringAppendF(out
, "\"0x%" PRIx64
"\"", static_cast<uint64
>(
680 reinterpret_cast<intptr_t>(
683 case TRACE_VALUE_TYPE_STRING
:
684 case TRACE_VALUE_TYPE_COPY_STRING
:
685 EscapeJSONString(value
.as_string
? value
.as_string
: "NULL", true, out
);
688 NOTREACHED() << "Don't know how to print this value";
693 void TraceEvent::AppendAsJSON(std::string
* out
) const {
694 int64 time_int64
= timestamp_
.ToInternalValue();
695 int process_id
= TraceLog::GetInstance()->process_id();
696 // Category group checked at category creation time.
697 DCHECK(!strchr(name_
, '"'));
699 "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
","
700 "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
701 TraceLog::GetCategoryGroupName(category_group_enabled_
),
708 // Output argument names and values, stop at first NULL argument name.
709 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
713 *out
+= arg_names_
[i
];
716 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
717 convertable_values_
[i
]->AppendAsTraceFormat(out
);
719 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], out
);
723 if (phase_
== TRACE_EVENT_PHASE_COMPLETE
) {
724 int64 duration
= duration_
.ToInternalValue();
726 StringAppendF(out
, ",\"dur\":%" PRId64
, duration
);
727 if (!thread_timestamp_
.is_null()) {
728 int64 thread_duration
= thread_duration_
.ToInternalValue();
729 if (thread_duration
!= -1)
730 StringAppendF(out
, ",\"tdur\":%" PRId64
, thread_duration
);
734 // Output tts if thread_timestamp is valid.
735 if (!thread_timestamp_
.is_null()) {
736 int64 thread_time_int64
= thread_timestamp_
.ToInternalValue();
737 StringAppendF(out
, ",\"tts\":%" PRId64
, thread_time_int64
);
740 // If id_ is set, print it out as a hex string so we don't loose any
741 // bits (it might be a 64-bit pointer).
742 if (flags_
& TRACE_EVENT_FLAG_HAS_ID
)
743 StringAppendF(out
, ",\"id\":\"0x%" PRIx64
"\"", static_cast<uint64
>(id_
));
745 // Instant events also output their scope.
746 if (phase_
== TRACE_EVENT_PHASE_INSTANT
) {
748 switch (flags_
& TRACE_EVENT_FLAG_SCOPE_MASK
) {
749 case TRACE_EVENT_SCOPE_GLOBAL
:
750 scope
= TRACE_EVENT_SCOPE_NAME_GLOBAL
;
753 case TRACE_EVENT_SCOPE_PROCESS
:
754 scope
= TRACE_EVENT_SCOPE_NAME_PROCESS
;
757 case TRACE_EVENT_SCOPE_THREAD
:
758 scope
= TRACE_EVENT_SCOPE_NAME_THREAD
;
761 StringAppendF(out
, ",\"s\":\"%c\"", scope
);
767 void TraceEvent::AppendPrettyPrinted(std::ostringstream
* out
) const {
768 *out
<< name_
<< "[";
769 *out
<< TraceLog::GetCategoryGroupName(category_group_enabled_
);
773 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
776 *out
<< arg_names_
[i
] << ":";
777 std::string value_as_text
;
779 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
780 convertable_values_
[i
]->AppendAsTraceFormat(&value_as_text
);
782 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], &value_as_text
);
784 *out
<< value_as_text
;
790 ////////////////////////////////////////////////////////////////////////////////
794 ////////////////////////////////////////////////////////////////////////////////
796 TraceResultBuffer::OutputCallback
797 TraceResultBuffer::SimpleOutput::GetCallback() {
798 return Bind(&SimpleOutput::Append
, Unretained(this));
801 void TraceResultBuffer::SimpleOutput::Append(
802 const std::string
& json_trace_output
) {
803 json_output
+= json_trace_output
;
806 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
809 TraceResultBuffer::~TraceResultBuffer() {
812 void TraceResultBuffer::SetOutputCallback(
813 const OutputCallback
& json_chunk_callback
) {
814 output_callback_
= json_chunk_callback
;
817 void TraceResultBuffer::Start() {
818 append_comma_
= false;
819 output_callback_
.Run("[");
822 void TraceResultBuffer::AddFragment(const std::string
& trace_fragment
) {
824 output_callback_
.Run(",");
825 append_comma_
= true;
826 output_callback_
.Run(trace_fragment
);
829 void TraceResultBuffer::Finish() {
830 output_callback_
.Run("]");
833 ////////////////////////////////////////////////////////////////////////////////
835 // TraceSamplingThread
837 ////////////////////////////////////////////////////////////////////////////////
838 class TraceBucketData
;
839 typedef base::Callback
<void(TraceBucketData
*)> TraceSampleCallback
;
841 class TraceBucketData
{
843 TraceBucketData(base::subtle::AtomicWord
* bucket
,
845 TraceSampleCallback callback
);
848 TRACE_EVENT_API_ATOMIC_WORD
* bucket
;
849 const char* bucket_name
;
850 TraceSampleCallback callback
;
853 // This object must be created on the IO thread.
854 class TraceSamplingThread
: public PlatformThread::Delegate
{
856 TraceSamplingThread();
857 virtual ~TraceSamplingThread();
859 // Implementation of PlatformThread::Delegate:
860 virtual void ThreadMain() OVERRIDE
;
862 static void DefaultSamplingCallback(TraceBucketData
* bucekt_data
);
865 void WaitSamplingEventForTesting();
868 friend class TraceLog
;
871 // Not thread-safe. Once the ThreadMain has been called, this can no longer
873 void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
874 const char* const name
,
875 TraceSampleCallback callback
);
876 // Splits a combined "category\0name" into the two component parts.
877 static void ExtractCategoryAndName(const char* combined
,
878 const char** category
,
880 std::vector
<TraceBucketData
> sample_buckets_
;
881 bool thread_running_
;
882 CancellationFlag cancellation_flag_
;
883 WaitableEvent waitable_event_for_testing_
;
887 TraceSamplingThread::TraceSamplingThread()
888 : thread_running_(false),
889 waitable_event_for_testing_(false, false) {
892 TraceSamplingThread::~TraceSamplingThread() {
895 void TraceSamplingThread::ThreadMain() {
896 PlatformThread::SetName("Sampling Thread");
897 thread_running_
= true;
898 const int kSamplingFrequencyMicroseconds
= 1000;
899 while (!cancellation_flag_
.IsSet()) {
900 PlatformThread::Sleep(
901 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds
));
903 waitable_event_for_testing_
.Signal();
908 void TraceSamplingThread::DefaultSamplingCallback(
909 TraceBucketData
* bucket_data
) {
910 TRACE_EVENT_API_ATOMIC_WORD category_and_name
=
911 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data
->bucket
);
912 if (!category_and_name
)
914 const char* const combined
=
915 reinterpret_cast<const char* const>(category_and_name
);
916 const char* category_group
;
918 ExtractCategoryAndName(combined
, &category_group
, &name
);
919 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE
,
920 TraceLog::GetCategoryGroupEnabled(category_group
),
921 name
, 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
924 void TraceSamplingThread::GetSamples() {
925 for (size_t i
= 0; i
< sample_buckets_
.size(); ++i
) {
926 TraceBucketData
* bucket_data
= &sample_buckets_
[i
];
927 bucket_data
->callback
.Run(bucket_data
);
931 void TraceSamplingThread::RegisterSampleBucket(
932 TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
933 const char* const name
,
934 TraceSampleCallback callback
) {
935 // Access to sample_buckets_ doesn't cause races with the sampling thread
936 // that uses the sample_buckets_, because it is guaranteed that
937 // RegisterSampleBucket is called before the sampling thread is created.
938 DCHECK(!thread_running_
);
939 sample_buckets_
.push_back(TraceBucketData(bucket
, name
, callback
));
943 void TraceSamplingThread::ExtractCategoryAndName(const char* combined
,
944 const char** category
,
946 *category
= combined
;
947 *name
= &combined
[strlen(combined
) + 1];
950 void TraceSamplingThread::Stop() {
951 cancellation_flag_
.Set();
954 void TraceSamplingThread::WaitSamplingEventForTesting() {
955 waitable_event_for_testing_
.Wait();
958 TraceBucketData::TraceBucketData(base::subtle::AtomicWord
* bucket
,
960 TraceSampleCallback callback
)
966 TraceBucketData::~TraceBucketData() {
969 ////////////////////////////////////////////////////////////////////////////////
973 ////////////////////////////////////////////////////////////////////////////////
975 class TraceLog::ThreadLocalEventBuffer
976 : public MessageLoop::DestructionObserver
{
978 ThreadLocalEventBuffer(TraceLog
* trace_log
);
979 virtual ~ThreadLocalEventBuffer();
981 TraceEvent
* AddTraceEvent(TraceEventHandle
* handle
);
983 void ReportOverhead(const TimeTicks
& event_timestamp
,
984 const TimeTicks
& event_thread_timestamp
);
986 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) {
987 if (!chunk_
|| handle
.chunk_seq
!= chunk_
->seq() ||
988 handle
.chunk_index
!= chunk_index_
)
991 return chunk_
->GetEventAt(handle
.event_index
);
994 int generation() const { return generation_
; }
997 // MessageLoop::DestructionObserver
998 virtual void WillDestroyCurrentMessageLoop() OVERRIDE
;
1000 void FlushWhileLocked();
1002 void CheckThisIsCurrentBuffer() const {
1003 DCHECK(trace_log_
->thread_local_event_buffer_
.Get() == this);
1006 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1007 // as long as the thread exists.
1008 TraceLog
* trace_log_
;
1009 scoped_ptr
<TraceBufferChunk
> chunk_
;
1010 size_t chunk_index_
;
1012 TimeDelta overhead_
;
1015 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer
);
1018 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog
* trace_log
)
1019 : trace_log_(trace_log
),
1022 generation_(trace_log
->generation()) {
1023 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
1024 // the following message_loop won't be NULL.
1025 MessageLoop
* message_loop
= MessageLoop::current();
1026 message_loop
->AddDestructionObserver(this);
1028 AutoLock
lock(trace_log
->lock_
);
1029 trace_log
->thread_message_loops_
.insert(message_loop
);
1032 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
1033 CheckThisIsCurrentBuffer();
1034 MessageLoop::current()->RemoveDestructionObserver(this);
1036 // Zero event_count_ happens in either of the following cases:
1037 // - no event generated for the thread;
1038 // - the thread has no message loop;
1039 // - trace_event_overhead is disabled.
1041 InitializeMetadataEvent(AddTraceEvent(NULL
),
1042 static_cast<int>(base::PlatformThread::CurrentId()),
1043 "overhead", "average_overhead",
1044 overhead_
.InMillisecondsF() / event_count_
);
1048 AutoLock
lock(trace_log_
->lock_
);
1050 trace_log_
->thread_message_loops_
.erase(MessageLoop::current());
1052 trace_log_
->thread_local_event_buffer_
.Set(NULL
);
1055 TraceEvent
* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
1056 TraceEventHandle
* handle
) {
1057 CheckThisIsCurrentBuffer();
1059 if (chunk_
&& chunk_
->IsFull()) {
1060 AutoLock
lock(trace_log_
->lock_
);
1065 AutoLock
lock(trace_log_
->lock_
);
1066 chunk_
= trace_log_
->logged_events_
->GetChunk(&chunk_index_
);
1067 trace_log_
->CheckIfBufferIsFullWhileLocked();
1073 TraceEvent
* trace_event
= chunk_
->AddTraceEvent(&event_index
);
1074 if (trace_event
&& handle
)
1075 MakeHandle(chunk_
->seq(), chunk_index_
, event_index
, handle
);
1080 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1081 const TimeTicks
& event_timestamp
,
1082 const TimeTicks
& event_thread_timestamp
) {
1083 if (!g_category_group_enabled
[g_category_trace_event_overhead
])
1086 CheckThisIsCurrentBuffer();
1089 TimeTicks thread_now
= ThreadNow();
1090 TimeTicks now
= trace_log_
->OffsetNow();
1091 TimeDelta overhead
= now
- event_timestamp
;
1092 if (overhead
.InMicroseconds() >= kOverheadReportThresholdInMicroseconds
) {
1093 TraceEvent
* trace_event
= AddTraceEvent(NULL
);
1095 trace_event
->Initialize(
1096 static_cast<int>(PlatformThread::CurrentId()),
1097 event_timestamp
, event_thread_timestamp
,
1098 TRACE_EVENT_PHASE_COMPLETE
,
1099 &g_category_group_enabled
[g_category_trace_event_overhead
],
1100 "overhead", 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
1101 trace_event
->UpdateDuration(now
, thread_now
);
1104 overhead_
+= overhead
;
1107 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1111 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1115 trace_log_
->lock_
.AssertAcquired();
1116 if (trace_log_
->CheckGeneration(generation_
)) {
1117 // Return the chunk to the buffer only if the generation matches.
1118 trace_log_
->logged_events_
->ReturnChunk(chunk_index_
, chunk_
.Pass());
1120 // Otherwise this method may be called from the destructor, or TraceLog will
1121 // find the generation mismatch and delete this buffer soon.
1125 TraceLog
* TraceLog::GetInstance() {
1126 return Singleton
<TraceLog
, LeakySingletonTraits
<TraceLog
> >::get();
1129 TraceLog::TraceLog()
1131 num_traces_recorded_(0),
1133 dispatching_to_observer_list_(false),
1134 process_sort_index_(0),
1135 process_id_hash_(0),
1138 trace_options_(RECORD_UNTIL_FULL
),
1139 sampling_thread_handle_(0),
1140 category_filter_(CategoryFilter::kDefaultCategoryFilterString
),
1141 event_callback_category_filter_(
1142 CategoryFilter::kDefaultCategoryFilterString
),
1143 thread_shared_chunk_index_(0),
1145 // Trace is enabled or disabled on one thread while other threads are
1146 // accessing the enabled flag. We don't care whether edge-case events are
1147 // traced or not, so we allow races on the enabled flag to keep the trace
1149 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1150 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1151 // sizeof(g_category_group_enabled),
1152 // "trace_event category enabled");
1153 for (int i
= 0; i
< MAX_CATEGORY_GROUPS
; ++i
) {
1154 ANNOTATE_BENIGN_RACE(&g_category_group_enabled
[i
],
1155 "trace_event category enabled");
1157 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
1160 SetProcessID(static_cast<int>(GetCurrentProcId()));
1162 // NaCl also shouldn't access the command line.
1163 if (CommandLine::InitializedForCurrentProcess() &&
1164 CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole
)) {
1165 std::string filter
= CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1166 switches::kTraceToConsole
);
1167 if (filter
.empty()) {
1168 filter
= kEchoToConsoleCategoryFilter
;
1171 filter
.append(kEchoToConsoleCategoryFilter
);
1174 LOG(ERROR
) << "Start " << switches::kTraceToConsole
1175 << " with CategoryFilter '" << filter
<< "'.";
1176 SetEnabled(CategoryFilter(filter
), RECORDING_MODE
, ECHO_TO_CONSOLE
);
1180 logged_events_
.reset(CreateTraceBuffer());
1183 TraceLog::~TraceLog() {
1186 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1187 const char* category_group
) {
1188 TraceLog
* tracelog
= GetInstance();
1190 DCHECK(!g_category_group_enabled
[g_category_already_shutdown
]);
1191 return &g_category_group_enabled
[g_category_already_shutdown
];
1193 return tracelog
->GetCategoryGroupEnabledInternal(category_group
);
1196 const char* TraceLog::GetCategoryGroupName(
1197 const unsigned char* category_group_enabled
) {
1198 // Calculate the index of the category group by finding
1199 // category_group_enabled in g_category_group_enabled array.
1200 uintptr_t category_begin
=
1201 reinterpret_cast<uintptr_t>(g_category_group_enabled
);
1202 uintptr_t category_ptr
= reinterpret_cast<uintptr_t>(category_group_enabled
);
1203 DCHECK(category_ptr
>= category_begin
&&
1204 category_ptr
< reinterpret_cast<uintptr_t>(
1205 g_category_group_enabled
+ MAX_CATEGORY_GROUPS
)) <<
1206 "out of bounds category pointer";
1207 uintptr_t category_index
=
1208 (category_ptr
- category_begin
) / sizeof(g_category_group_enabled
[0]);
1209 return g_category_groups
[category_index
];
1212 void TraceLog::UpdateCategoryGroupEnabledFlag(int category_index
) {
1213 unsigned char enabled_flag
= 0;
1214 const char* category_group
= g_category_groups
[category_index
];
1215 if (mode_
== RECORDING_MODE
&&
1216 category_filter_
.IsCategoryGroupEnabled(category_group
))
1217 enabled_flag
|= ENABLED_FOR_RECORDING
;
1218 else if (mode_
== MONITORING_MODE
&&
1219 category_filter_
.IsCategoryGroupEnabled(category_group
))
1220 enabled_flag
|= ENABLED_FOR_MONITORING
;
1221 if (event_callback_
&&
1222 event_callback_category_filter_
.IsCategoryGroupEnabled(category_group
))
1223 enabled_flag
|= ENABLED_FOR_EVENT_CALLBACK
;
1224 g_category_group_enabled
[category_index
] = enabled_flag
;
1227 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1228 int category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1229 for (int i
= 0; i
< category_index
; i
++)
1230 UpdateCategoryGroupEnabledFlag(i
);
1233 void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() {
1234 ResetTraceEventSyntheticDelays();
1235 const CategoryFilter::StringList
& delays
=
1236 category_filter_
.GetSyntheticDelayValues();
1237 CategoryFilter::StringList::const_iterator ci
;
1238 for (ci
= delays
.begin(); ci
!= delays
.end(); ++ci
) {
1239 StringTokenizer
tokens(*ci
, ";");
1240 if (!tokens
.GetNext())
1242 TraceEventSyntheticDelay
* delay
=
1243 TraceEventSyntheticDelay::Lookup(tokens
.token());
1244 while (tokens
.GetNext()) {
1245 std::string token
= tokens
.token();
1247 double target_duration
= strtod(token
.c_str(), &duration_end
);
1248 if (duration_end
!= token
.c_str()) {
1249 delay
->SetTargetDuration(
1250 TimeDelta::FromMicroseconds(target_duration
* 1e6
));
1251 } else if (token
== "static") {
1252 delay
->SetMode(TraceEventSyntheticDelay::STATIC
);
1253 } else if (token
== "oneshot") {
1254 delay
->SetMode(TraceEventSyntheticDelay::ONE_SHOT
);
1255 } else if (token
== "alternating") {
1256 delay
->SetMode(TraceEventSyntheticDelay::ALTERNATING
);
1262 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1263 const char* category_group
) {
1264 DCHECK(!strchr(category_group
, '"')) <<
1265 "Category groups may not contain double quote";
1266 // The g_category_groups is append only, avoid using a lock for the fast path.
1267 int current_category_index
= base::subtle::Acquire_Load(&g_category_index
);
1269 // Search for pre-existing category group.
1270 for (int i
= 0; i
< current_category_index
; ++i
) {
1271 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1272 return &g_category_group_enabled
[i
];
1276 unsigned char* category_group_enabled
= NULL
;
1277 // This is the slow path: the lock is not held in the case above, so more
1278 // than one thread could have reached here trying to add the same category.
1279 // Only hold to lock when actually appending a new category, and
1280 // check the categories groups again.
1281 AutoLock
lock(lock_
);
1282 int category_index
= base::subtle::Acquire_Load(&g_category_index
);
1283 for (int i
= 0; i
< category_index
; ++i
) {
1284 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1285 return &g_category_group_enabled
[i
];
1289 // Create a new category group.
1290 DCHECK(category_index
< MAX_CATEGORY_GROUPS
) <<
1291 "must increase MAX_CATEGORY_GROUPS";
1292 if (category_index
< MAX_CATEGORY_GROUPS
) {
1293 // Don't hold on to the category_group pointer, so that we can create
1294 // category groups with strings not known at compile time (this is
1295 // required by SetWatchEvent).
1296 const char* new_group
= strdup(category_group
);
1297 ANNOTATE_LEAKING_OBJECT_PTR(new_group
);
1298 g_category_groups
[category_index
] = new_group
;
1299 DCHECK(!g_category_group_enabled
[category_index
]);
1300 // Note that if both included and excluded patterns in the
1301 // CategoryFilter are empty, we exclude nothing,
1302 // thereby enabling this category group.
1303 UpdateCategoryGroupEnabledFlag(category_index
);
1304 category_group_enabled
= &g_category_group_enabled
[category_index
];
1305 // Update the max index now.
1306 base::subtle::Release_Store(&g_category_index
, category_index
+ 1);
1308 category_group_enabled
=
1309 &g_category_group_enabled
[g_category_categories_exhausted
];
1311 return category_group_enabled
;
1314 void TraceLog::GetKnownCategoryGroups(
1315 std::vector
<std::string
>* category_groups
) {
1316 AutoLock
lock(lock_
);
1317 category_groups
->push_back(
1318 g_category_groups
[g_category_trace_event_overhead
]);
1319 int category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1320 for (int i
= g_num_builtin_categories
; i
< category_index
; i
++)
1321 category_groups
->push_back(g_category_groups
[i
]);
1324 void TraceLog::SetEnabled(const CategoryFilter
& category_filter
,
1327 std::vector
<EnabledStateObserver
*> observer_list
;
1329 AutoLock
lock(lock_
);
1331 // Can't enable tracing when Flush() is in progress.
1332 DCHECK(!flush_message_loop_proxy_
.get());
1334 Options old_options
= trace_options();
1337 if (options
!= old_options
) {
1338 DLOG(ERROR
) << "Attempting to re-enable tracing with a different "
1339 << "set of options.";
1342 if (mode
!= mode_
) {
1343 DLOG(ERROR
) << "Attempting to re-enable tracing with a different mode.";
1346 category_filter_
.Merge(category_filter
);
1347 UpdateCategoryGroupEnabledFlags();
1351 if (dispatching_to_observer_list_
) {
1353 "Cannot manipulate TraceLog::Enabled state from an observer.";
1359 if (options
!= old_options
) {
1360 subtle::NoBarrier_Store(&trace_options_
, options
);
1361 UseNextTraceBuffer();
1364 num_traces_recorded_
++;
1366 category_filter_
= CategoryFilter(category_filter
);
1367 UpdateCategoryGroupEnabledFlags();
1368 UpdateSyntheticDelaysFromCategoryFilter();
1370 if (options
& ENABLE_SAMPLING
) {
1371 sampling_thread_
.reset(new TraceSamplingThread
);
1372 sampling_thread_
->RegisterSampleBucket(
1375 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1376 sampling_thread_
->RegisterSampleBucket(
1379 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1380 sampling_thread_
->RegisterSampleBucket(
1383 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1384 if (!PlatformThread::Create(
1385 0, sampling_thread_
.get(), &sampling_thread_handle_
)) {
1386 DCHECK(false) << "failed to create thread";
1390 dispatching_to_observer_list_
= true;
1391 observer_list
= enabled_state_observer_list_
;
1393 // Notify observers outside the lock in case they trigger trace events.
1394 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1395 observer_list
[i
]->OnTraceLogEnabled();
1398 AutoLock
lock(lock_
);
1399 dispatching_to_observer_list_
= false;
1403 CategoryFilter
TraceLog::GetCurrentCategoryFilter() {
1404 AutoLock
lock(lock_
);
1405 return category_filter_
;
1408 void TraceLog::SetDisabled() {
1409 AutoLock
lock(lock_
);
1410 SetDisabledWhileLocked();
1413 void TraceLog::SetDisabledWhileLocked() {
1414 lock_
.AssertAcquired();
1419 if (dispatching_to_observer_list_
) {
1421 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1427 if (sampling_thread_
.get()) {
1428 // Stop the sampling thread.
1429 sampling_thread_
->Stop();
1431 PlatformThread::Join(sampling_thread_handle_
);
1433 sampling_thread_handle_
= PlatformThreadHandle();
1434 sampling_thread_
.reset();
1437 category_filter_
.Clear();
1438 subtle::NoBarrier_Store(&watch_category_
, 0);
1439 watch_event_name_
= "";
1440 UpdateCategoryGroupEnabledFlags();
1441 AddMetadataEventsWhileLocked();
1443 dispatching_to_observer_list_
= true;
1444 std::vector
<EnabledStateObserver
*> observer_list
=
1445 enabled_state_observer_list_
;
1448 // Dispatch to observers outside the lock in case the observer triggers a
1450 AutoUnlock
unlock(lock_
);
1451 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1452 observer_list
[i
]->OnTraceLogDisabled();
1454 dispatching_to_observer_list_
= false;
1457 int TraceLog::GetNumTracesRecorded() {
1458 AutoLock
lock(lock_
);
1461 return num_traces_recorded_
;
1464 void TraceLog::AddEnabledStateObserver(EnabledStateObserver
* listener
) {
1465 enabled_state_observer_list_
.push_back(listener
);
1468 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver
* listener
) {
1469 std::vector
<EnabledStateObserver
*>::iterator it
=
1470 std::find(enabled_state_observer_list_
.begin(),
1471 enabled_state_observer_list_
.end(),
1473 if (it
!= enabled_state_observer_list_
.end())
1474 enabled_state_observer_list_
.erase(it
);
1477 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver
* listener
) const {
1478 std::vector
<EnabledStateObserver
*>::const_iterator it
=
1479 std::find(enabled_state_observer_list_
.begin(),
1480 enabled_state_observer_list_
.end(),
1482 return it
!= enabled_state_observer_list_
.end();
1485 float TraceLog::GetBufferPercentFull() const {
1486 AutoLock
lock(lock_
);
1487 return static_cast<float>(static_cast<double>(logged_events_
->Size()) /
1488 logged_events_
->Capacity());
1491 bool TraceLog::BufferIsFull() const {
1492 AutoLock
lock(lock_
);
1493 return logged_events_
->IsFull();
1496 TraceBuffer
* TraceLog::CreateTraceBuffer() {
1497 Options options
= trace_options();
1498 if (options
& RECORD_CONTINUOUSLY
)
1499 return new TraceBufferRingBuffer(kTraceEventRingBufferChunks
);
1500 else if ((options
& ENABLE_SAMPLING
) && mode_
== MONITORING_MODE
)
1501 return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks
);
1502 else if (options
& ECHO_TO_CONSOLE
)
1503 return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks
);
1504 return new TraceBufferVector();
1507 TraceEvent
* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1508 TraceEventHandle
* handle
, bool check_buffer_is_full
) {
1509 lock_
.AssertAcquired();
1511 if (thread_shared_chunk_
&& thread_shared_chunk_
->IsFull()) {
1512 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1513 thread_shared_chunk_
.Pass());
1516 if (!thread_shared_chunk_
) {
1517 thread_shared_chunk_
= logged_events_
->GetChunk(
1518 &thread_shared_chunk_index_
);
1519 if (check_buffer_is_full
)
1520 CheckIfBufferIsFullWhileLocked();
1522 if (!thread_shared_chunk_
)
1526 TraceEvent
* trace_event
= thread_shared_chunk_
->AddTraceEvent(&event_index
);
1527 if (trace_event
&& handle
) {
1528 MakeHandle(thread_shared_chunk_
->seq(), thread_shared_chunk_index_
,
1529 event_index
, handle
);
1534 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1535 lock_
.AssertAcquired();
1536 if (logged_events_
->IsFull())
1537 SetDisabledWhileLocked();
1540 void TraceLog::SetEventCallbackEnabled(const CategoryFilter
& category_filter
,
1542 AutoLock
lock(lock_
);
1543 subtle::NoBarrier_Store(&event_callback_
,
1544 reinterpret_cast<subtle::AtomicWord
>(cb
));
1545 event_callback_category_filter_
= category_filter
;
1546 UpdateCategoryGroupEnabledFlags();
1549 void TraceLog::SetEventCallbackDisabled() {
1550 AutoLock
lock(lock_
);
1551 subtle::NoBarrier_Store(&event_callback_
, 0);
1552 UpdateCategoryGroupEnabledFlags();
1555 // Flush() works as the following:
1556 // 1. Flush() is called in threadA whose message loop is saved in
1557 // flush_message_loop_proxy_;
1558 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1559 // loop to flush the thread local buffers; otherwise finish the flush;
1560 // 3. FlushCurrentThread() deletes the thread local event buffer:
1561 // - The last batch of events of the thread are flushed into the main buffer;
1562 // - The message loop will be removed from thread_message_loops_;
1563 // If this is the last message loop, finish the flush;
1564 // 4. If any thread hasn't finish its flush in time, finish the flush.
1565 void TraceLog::Flush(const TraceLog::OutputCallback
& cb
) {
1567 // Can't flush when tracing is enabled because otherwise PostTask would
1568 // - generate more trace events;
1569 // - deschedule the calling thread on some platforms causing inaccurate
1570 // timing of the trace events.
1571 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
1573 cb
.Run(empty_result
, false);
1574 LOG(WARNING
) << "Ignored TraceLog::Flush called when tracing is enabled";
1578 int generation
= this->generation();
1580 AutoLock
lock(lock_
);
1581 DCHECK(!flush_message_loop_proxy_
.get());
1582 flush_message_loop_proxy_
= MessageLoopProxy::current();
1583 DCHECK(!thread_message_loops_
.size() || flush_message_loop_proxy_
.get());
1584 flush_output_callback_
= cb
;
1586 if (thread_shared_chunk_
) {
1587 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1588 thread_shared_chunk_
.Pass());
1591 if (thread_message_loops_
.size()) {
1592 for (hash_set
<MessageLoop
*>::const_iterator it
=
1593 thread_message_loops_
.begin();
1594 it
!= thread_message_loops_
.end(); ++it
) {
1597 Bind(&TraceLog::FlushCurrentThread
, Unretained(this), generation
));
1599 flush_message_loop_proxy_
->PostDelayedTask(
1601 Bind(&TraceLog::OnFlushTimeout
, Unretained(this), generation
),
1602 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs
));
1607 FinishFlush(generation
);
1610 void TraceLog::ConvertTraceEventsToTraceFormat(
1611 scoped_ptr
<TraceBuffer
> logged_events
,
1612 const TraceLog::OutputCallback
& flush_output_callback
) {
1614 if (flush_output_callback
.is_null())
1617 // The callback need to be called at least once even if there is no events
1618 // to let the caller know the completion of flush.
1619 bool has_more_events
= true;
1621 scoped_refptr
<RefCountedString
> json_events_str_ptr
=
1622 new RefCountedString();
1624 for (size_t i
= 0; i
< kTraceEventBatchChunks
; ++i
) {
1625 const TraceBufferChunk
* chunk
= logged_events
->NextChunk();
1627 has_more_events
= false;
1630 for (size_t j
= 0; j
< chunk
->size(); ++j
) {
1632 json_events_str_ptr
->data().append(",");
1633 chunk
->GetEventAt(j
)->AppendAsJSON(&(json_events_str_ptr
->data()));
1637 flush_output_callback
.Run(json_events_str_ptr
, has_more_events
);
1638 } while (has_more_events
);
1641 void TraceLog::FinishFlush(int generation
) {
1642 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1643 OutputCallback flush_output_callback
;
1645 if (!CheckGeneration(generation
))
1649 AutoLock
lock(lock_
);
1651 previous_logged_events
.swap(logged_events_
);
1652 UseNextTraceBuffer();
1653 thread_message_loops_
.clear();
1655 flush_message_loop_proxy_
= NULL
;
1656 flush_output_callback
= flush_output_callback_
;
1657 flush_output_callback_
.Reset();
1660 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1661 flush_output_callback
);
1664 // Run in each thread holding a local event buffer.
1665 void TraceLog::FlushCurrentThread(int generation
) {
1667 AutoLock
lock(lock_
);
1668 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
) {
1669 // This is late. The corresponding flush has finished.
1674 // This will flush the thread local buffer.
1675 delete thread_local_event_buffer_
.Get();
1677 AutoLock
lock(lock_
);
1678 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
||
1679 thread_message_loops_
.size())
1682 flush_message_loop_proxy_
->PostTask(
1684 Bind(&TraceLog::FinishFlush
, Unretained(this), generation
));
1687 void TraceLog::OnFlushTimeout(int generation
) {
1689 AutoLock
lock(lock_
);
1690 if (!CheckGeneration(generation
) || !flush_message_loop_proxy_
) {
1691 // Flush has finished before timeout.
1696 "The following threads haven't finished flush in time. "
1697 "If this happens stably for some thread, please call "
1698 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1699 "the thread to avoid its trace events from being lost.";
1700 for (hash_set
<MessageLoop
*>::const_iterator it
=
1701 thread_message_loops_
.begin();
1702 it
!= thread_message_loops_
.end(); ++it
) {
1703 LOG(WARNING
) << "Thread: " << (*it
)->thread_name();
1706 FinishFlush(generation
);
1709 void TraceLog::FlushButLeaveBufferIntact(
1710 const TraceLog::OutputCallback
& flush_output_callback
) {
1711 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1713 AutoLock
lock(lock_
);
1714 AddMetadataEventsWhileLocked();
1715 if (thread_shared_chunk_
) {
1716 // Return the chunk to the main buffer to flush the sampling data.
1717 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1718 thread_shared_chunk_
.Pass());
1720 previous_logged_events
= logged_events_
->CloneForIteration().Pass();
1723 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1724 flush_output_callback
);
1727 void TraceLog::UseNextTraceBuffer() {
1728 logged_events_
.reset(CreateTraceBuffer());
1729 subtle::NoBarrier_AtomicIncrement(&generation_
, 1);
1730 thread_shared_chunk_
.reset();
1731 thread_shared_chunk_index_
= 0;
1734 TraceEventHandle
TraceLog::AddTraceEvent(
1736 const unsigned char* category_group_enabled
,
1738 unsigned long long id
,
1740 const char** arg_names
,
1741 const unsigned char* arg_types
,
1742 const unsigned long long* arg_values
,
1743 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1744 unsigned char flags
) {
1745 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
1746 base::TimeTicks now
= base::TimeTicks::NowFromSystemTraceTime();
1747 return AddTraceEventWithThreadIdAndTimestamp(phase
, category_group_enabled
,
1748 name
, id
, thread_id
, now
,
1749 num_args
, arg_names
,
1750 arg_types
, arg_values
,
1751 convertable_values
, flags
);
1754 TraceEventHandle
TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1756 const unsigned char* category_group_enabled
,
1758 unsigned long long id
,
1760 const TimeTicks
& timestamp
,
1762 const char** arg_names
,
1763 const unsigned char* arg_types
,
1764 const unsigned long long* arg_values
,
1765 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
1766 unsigned char flags
) {
1767 TraceEventHandle handle
= { 0, 0, 0 };
1768 if (!*category_group_enabled
)
1771 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1772 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1773 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1774 if (thread_is_in_trace_event_
.Get())
1777 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
1781 if (flags
& TRACE_EVENT_FLAG_MANGLE_ID
)
1782 id
^= process_id_hash_
;
1784 TimeTicks now
= OffsetTimestamp(timestamp
);
1785 TimeTicks thread_now
= ThreadNow();
1787 ThreadLocalEventBuffer
* thread_local_event_buffer
= NULL
;
1788 // A ThreadLocalEventBuffer needs the message loop
1789 // - to know when the thread exits;
1790 // - to handle the final flush.
1791 // For a thread without a message loop or the message loop may be blocked, the
1792 // trace events will be added into the main buffer directly.
1793 if (!thread_blocks_message_loop_
.Get() && MessageLoop::current()) {
1794 thread_local_event_buffer
= thread_local_event_buffer_
.Get();
1795 if (thread_local_event_buffer
&&
1796 !CheckGeneration(thread_local_event_buffer
->generation())) {
1797 delete thread_local_event_buffer
;
1798 thread_local_event_buffer
= NULL
;
1800 if (!thread_local_event_buffer
) {
1801 thread_local_event_buffer
= new ThreadLocalEventBuffer(this);
1802 thread_local_event_buffer_
.Set(thread_local_event_buffer
);
1806 // Check and update the current thread name only if the event is for the
1807 // current thread to avoid locks in most cases.
1808 if (thread_id
== static_cast<int>(PlatformThread::CurrentId())) {
1809 const char* new_name
= ThreadIdNameManager::GetInstance()->
1811 // Check if the thread name has been set or changed since the previous
1812 // call (if any), but don't bother if the new name is empty. Note this will
1813 // not detect a thread name change within the same char* buffer address: we
1814 // favor common case performance over corner case correctness.
1815 if (new_name
!= g_current_thread_name
.Get().Get() &&
1816 new_name
&& *new_name
) {
1817 g_current_thread_name
.Get().Set(new_name
);
1819 AutoLock
thread_info_lock(thread_info_lock_
);
1821 hash_map
<int, std::string
>::iterator existing_name
=
1822 thread_names_
.find(thread_id
);
1823 if (existing_name
== thread_names_
.end()) {
1824 // This is a new thread id, and a new name.
1825 thread_names_
[thread_id
] = new_name
;
1827 // This is a thread id that we've seen before, but potentially with a
1829 std::vector
<StringPiece
> existing_names
;
1830 Tokenize(existing_name
->second
, ",", &existing_names
);
1831 bool found
= std::find(existing_names
.begin(),
1832 existing_names
.end(),
1833 new_name
) != existing_names
.end();
1835 if (existing_names
.size())
1836 existing_name
->second
.push_back(',');
1837 existing_name
->second
.append(new_name
);
1843 std::string console_message
;
1844 if (*category_group_enabled
&
1845 (ENABLED_FOR_RECORDING
| ENABLED_FOR_MONITORING
)) {
1846 OptionalAutoLock
lock(lock_
);
1848 TraceEvent
* trace_event
= NULL
;
1849 if (thread_local_event_buffer
) {
1850 trace_event
= thread_local_event_buffer
->AddTraceEvent(&handle
);
1852 lock
.EnsureAcquired();
1853 trace_event
= AddEventToThreadSharedChunkWhileLocked(&handle
, true);
1857 trace_event
->Initialize(thread_id
, now
, thread_now
, phase
,
1858 category_group_enabled
, name
, id
,
1859 num_args
, arg_names
, arg_types
, arg_values
,
1860 convertable_values
, flags
);
1862 #if defined(OS_ANDROID)
1863 trace_event
->SendToATrace();
1867 if (trace_options() & ECHO_TO_CONSOLE
) {
1868 console_message
= EventToConsoleMessage(
1869 phase
== TRACE_EVENT_PHASE_COMPLETE
? TRACE_EVENT_PHASE_BEGIN
: phase
,
1870 timestamp
, trace_event
);
1874 if (console_message
.size())
1875 LOG(ERROR
) << console_message
;
1877 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
1878 &watch_category_
)) == category_group_enabled
) {
1879 bool event_name_matches
;
1880 WatchEventCallback watch_event_callback_copy
;
1882 AutoLock
lock(lock_
);
1883 event_name_matches
= watch_event_name_
== name
;
1884 watch_event_callback_copy
= watch_event_callback_
;
1886 if (event_name_matches
) {
1887 if (!watch_event_callback_copy
.is_null())
1888 watch_event_callback_copy
.Run();
1892 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
1893 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
1894 subtle::NoBarrier_Load(&event_callback_
));
1895 if (event_callback
) {
1897 phase
== TRACE_EVENT_PHASE_COMPLETE
?
1898 TRACE_EVENT_PHASE_BEGIN
: phase
,
1899 category_group_enabled
, name
, id
,
1900 num_args
, arg_names
, arg_types
, arg_values
,
1905 if (thread_local_event_buffer
)
1906 thread_local_event_buffer
->ReportOverhead(now
, thread_now
);
1911 // May be called when a COMPELETE event ends and the unfinished event has been
1912 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
1913 std::string
TraceLog::EventToConsoleMessage(unsigned char phase
,
1914 const TimeTicks
& timestamp
,
1915 TraceEvent
* trace_event
) {
1916 AutoLock
thread_info_lock(thread_info_lock_
);
1918 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1919 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1920 DCHECK(phase
!= TRACE_EVENT_PHASE_COMPLETE
);
1923 int thread_id
= trace_event
?
1924 trace_event
->thread_id() : PlatformThread::CurrentId();
1925 if (phase
== TRACE_EVENT_PHASE_END
) {
1926 duration
= timestamp
- thread_event_start_times_
[thread_id
].top();
1927 thread_event_start_times_
[thread_id
].pop();
1930 std::string thread_name
= thread_names_
[thread_id
];
1931 if (thread_colors_
.find(thread_name
) == thread_colors_
.end())
1932 thread_colors_
[thread_name
] = (thread_colors_
.size() % 6) + 1;
1934 std::ostringstream log
;
1935 log
<< base::StringPrintf("%s: \x1b[0;3%dm",
1936 thread_name
.c_str(),
1937 thread_colors_
[thread_name
]);
1940 if (thread_event_start_times_
.find(thread_id
) !=
1941 thread_event_start_times_
.end())
1942 depth
= thread_event_start_times_
[thread_id
].size();
1944 for (size_t i
= 0; i
< depth
; ++i
)
1948 trace_event
->AppendPrettyPrinted(&log
);
1949 if (phase
== TRACE_EVENT_PHASE_END
)
1950 log
<< base::StringPrintf(" (%.3f ms)", duration
.InMillisecondsF());
1954 if (phase
== TRACE_EVENT_PHASE_BEGIN
)
1955 thread_event_start_times_
[thread_id
].push(timestamp
);
1960 void TraceLog::AddTraceEventEtw(char phase
,
1963 const char* extra
) {
1965 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
1967 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
1968 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
1971 void TraceLog::AddTraceEventEtw(char phase
,
1974 const std::string
& extra
) {
1976 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
1978 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
1979 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
1982 void TraceLog::UpdateTraceEventDuration(
1983 const unsigned char* category_group_enabled
,
1985 TraceEventHandle handle
) {
1986 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1987 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1988 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1989 if (thread_is_in_trace_event_
.Get())
1992 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
1994 TimeTicks thread_now
= ThreadNow();
1995 TimeTicks now
= OffsetNow();
1997 std::string console_message
;
1998 if (*category_group_enabled
& ENABLED_FOR_RECORDING
) {
1999 OptionalAutoLock
lock(lock_
);
2001 TraceEvent
* trace_event
= GetEventByHandleInternal(handle
, &lock
);
2003 DCHECK(trace_event
->phase() == TRACE_EVENT_PHASE_COMPLETE
);
2004 trace_event
->UpdateDuration(now
, thread_now
);
2005 #if defined(OS_ANDROID)
2006 trace_event
->SendToATrace();
2010 if (trace_options() & ECHO_TO_CONSOLE
) {
2011 console_message
= EventToConsoleMessage(TRACE_EVENT_PHASE_END
,
2016 if (console_message
.size())
2017 LOG(ERROR
) << console_message
;
2019 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
2020 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
2021 subtle::NoBarrier_Load(&event_callback_
));
2022 if (event_callback
) {
2023 event_callback(now
, TRACE_EVENT_PHASE_END
, category_group_enabled
, name
,
2024 trace_event_internal::kNoEventId
, 0, NULL
, NULL
, NULL
,
2025 TRACE_EVENT_FLAG_NONE
);
2030 void TraceLog::SetWatchEvent(const std::string
& category_name
,
2031 const std::string
& event_name
,
2032 const WatchEventCallback
& callback
) {
2033 const unsigned char* category
= GetCategoryGroupEnabled(
2034 category_name
.c_str());
2035 AutoLock
lock(lock_
);
2036 subtle::NoBarrier_Store(&watch_category_
,
2037 reinterpret_cast<subtle::AtomicWord
>(category
));
2038 watch_event_name_
= event_name
;
2039 watch_event_callback_
= callback
;
2042 void TraceLog::CancelWatchEvent() {
2043 AutoLock
lock(lock_
);
2044 subtle::NoBarrier_Store(&watch_category_
, 0);
2045 watch_event_name_
= "";
2046 watch_event_callback_
.Reset();
2049 void TraceLog::AddMetadataEventsWhileLocked() {
2050 lock_
.AssertAcquired();
2052 int current_thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
2053 if (process_sort_index_
!= 0) {
2054 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2056 "process_sort_index", "sort_index",
2057 process_sort_index_
);
2060 if (process_name_
.size()) {
2061 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2063 "process_name", "name",
2067 if (process_labels_
.size() > 0) {
2068 std::vector
<std::string
> labels
;
2069 for(base::hash_map
<int, std::string
>::iterator it
= process_labels_
.begin();
2070 it
!= process_labels_
.end();
2072 labels
.push_back(it
->second
);
2074 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2076 "process_labels", "labels",
2077 JoinString(labels
, ','));
2080 // Thread sort indices.
2081 for(hash_map
<int, int>::iterator it
= thread_sort_indices_
.begin();
2082 it
!= thread_sort_indices_
.end();
2084 if (it
->second
== 0)
2086 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2088 "thread_sort_index", "sort_index",
2093 AutoLock
thread_info_lock(thread_info_lock_
);
2094 for(hash_map
<int, std::string
>::iterator it
= thread_names_
.begin();
2095 it
!= thread_names_
.end();
2097 if (it
->second
.empty())
2099 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2101 "thread_name", "name",
2106 void TraceLog::WaitSamplingEventForTesting() {
2107 if (!sampling_thread_
)
2109 sampling_thread_
->WaitSamplingEventForTesting();
2112 void TraceLog::DeleteForTesting() {
2113 DeleteTraceLogForTesting::Delete();
2116 TraceEvent
* TraceLog::GetEventByHandle(TraceEventHandle handle
) {
2117 return GetEventByHandleInternal(handle
, NULL
);
2120 TraceEvent
* TraceLog::GetEventByHandleInternal(TraceEventHandle handle
,
2121 OptionalAutoLock
* lock
) {
2122 if (!handle
.chunk_seq
)
2125 if (thread_local_event_buffer_
.Get()) {
2126 TraceEvent
* trace_event
=
2127 thread_local_event_buffer_
.Get()->GetEventByHandle(handle
);
2132 // The event has been out-of-control of the thread local buffer.
2133 // Try to get the event from the main buffer with a lock.
2135 lock
->EnsureAcquired();
2137 if (thread_shared_chunk_
&&
2138 handle
.chunk_index
== thread_shared_chunk_index_
) {
2139 return handle
.chunk_seq
== thread_shared_chunk_
->seq() ?
2140 thread_shared_chunk_
->GetEventAt(handle
.event_index
) : NULL
;
2143 return logged_events_
->GetEventByHandle(handle
);
2146 void TraceLog::SetProcessID(int process_id
) {
2147 process_id_
= process_id
;
2148 // Create a FNV hash from the process ID for XORing.
2149 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2150 unsigned long long offset_basis
= 14695981039346656037ull;
2151 unsigned long long fnv_prime
= 1099511628211ull;
2152 unsigned long long pid
= static_cast<unsigned long long>(process_id_
);
2153 process_id_hash_
= (offset_basis
^ pid
) * fnv_prime
;
2156 void TraceLog::SetProcessSortIndex(int sort_index
) {
2157 AutoLock
lock(lock_
);
2158 process_sort_index_
= sort_index
;
2161 void TraceLog::SetProcessName(const std::string
& process_name
) {
2162 AutoLock
lock(lock_
);
2163 process_name_
= process_name
;
2166 void TraceLog::UpdateProcessLabel(
2167 int label_id
, const std::string
& current_label
) {
2168 if(!current_label
.length())
2169 return RemoveProcessLabel(label_id
);
2171 AutoLock
lock(lock_
);
2172 process_labels_
[label_id
] = current_label
;
2175 void TraceLog::RemoveProcessLabel(int label_id
) {
2176 AutoLock
lock(lock_
);
2177 base::hash_map
<int, std::string
>::iterator it
= process_labels_
.find(
2179 if (it
== process_labels_
.end())
2182 process_labels_
.erase(it
);
2185 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id
, int sort_index
) {
2186 AutoLock
lock(lock_
);
2187 thread_sort_indices_
[static_cast<int>(thread_id
)] = sort_index
;
2190 void TraceLog::SetTimeOffset(TimeDelta offset
) {
2191 time_offset_
= offset
;
2194 size_t TraceLog::GetObserverCountForTest() const {
2195 return enabled_state_observer_list_
.size();
2198 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2199 thread_blocks_message_loop_
.Set(true);
2200 if (thread_local_event_buffer_
.Get()) {
2201 // This will flush the thread local buffer.
2202 delete thread_local_event_buffer_
.Get();
2206 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2207 const std::string
& str
) {
2208 return str
.empty() ||
2210 str
.at(str
.length() - 1) == ' ';
2213 bool CategoryFilter::DoesCategoryGroupContainCategory(
2214 const char* category_group
,
2215 const char* category
) const {
2217 CStringTokenizer
category_group_tokens(category_group
,
2218 category_group
+ strlen(category_group
), ",");
2219 while (category_group_tokens
.GetNext()) {
2220 std::string category_group_token
= category_group_tokens
.token();
2221 // Don't allow empty tokens, nor tokens with leading or trailing space.
2222 DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2223 category_group_token
))
2224 << "Disallowed category string";
2225 if (MatchPattern(category_group_token
.c_str(), category
))
2231 CategoryFilter::CategoryFilter(const std::string
& filter_string
) {
2232 if (!filter_string
.empty())
2233 Initialize(filter_string
);
2235 Initialize(CategoryFilter::kDefaultCategoryFilterString
);
2238 CategoryFilter::CategoryFilter(const CategoryFilter
& cf
)
2239 : included_(cf
.included_
),
2240 disabled_(cf
.disabled_
),
2241 excluded_(cf
.excluded_
),
2242 delays_(cf
.delays_
) {
2245 CategoryFilter::~CategoryFilter() {
2248 CategoryFilter
& CategoryFilter::operator=(const CategoryFilter
& rhs
) {
2252 included_
= rhs
.included_
;
2253 disabled_
= rhs
.disabled_
;
2254 excluded_
= rhs
.excluded_
;
2255 delays_
= rhs
.delays_
;
2259 void CategoryFilter::Initialize(const std::string
& filter_string
) {
2260 // Tokenize list of categories, delimited by ','.
2261 StringTokenizer
tokens(filter_string
, ",");
2262 // Add each token to the appropriate list (included_,excluded_).
2263 while (tokens
.GetNext()) {
2264 std::string category
= tokens
.token();
2265 // Ignore empty categories.
2266 if (category
.empty())
2268 // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
2269 if (category
.find(kSyntheticDelayCategoryFilterPrefix
) == 0 &&
2270 category
.at(category
.size() - 1) == ')') {
2271 category
= category
.substr(
2272 strlen(kSyntheticDelayCategoryFilterPrefix
),
2273 category
.size() - strlen(kSyntheticDelayCategoryFilterPrefix
) - 1);
2274 size_t name_length
= category
.find(';');
2275 if (name_length
!= std::string::npos
&& name_length
> 0 &&
2276 name_length
!= category
.size() - 1) {
2277 delays_
.push_back(category
);
2279 } else if (category
.at(0) == '-') {
2280 // Excluded categories start with '-'.
2281 // Remove '-' from category string.
2282 category
= category
.substr(1);
2283 excluded_
.push_back(category
);
2284 } else if (category
.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
2285 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
2286 disabled_
.push_back(category
);
2288 included_
.push_back(category
);
2293 void CategoryFilter::WriteString(const StringList
& values
,
2295 bool included
) const {
2296 bool prepend_comma
= !out
->empty();
2298 for (StringList::const_iterator ci
= values
.begin();
2299 ci
!= values
.end(); ++ci
) {
2300 if (token_cnt
> 0 || prepend_comma
)
2301 StringAppendF(out
, ",");
2302 StringAppendF(out
, "%s%s", (included
? "" : "-"), ci
->c_str());
2307 void CategoryFilter::WriteString(const StringList
& delays
,
2308 std::string
* out
) const {
2309 bool prepend_comma
= !out
->empty();
2311 for (StringList::const_iterator ci
= delays
.begin();
2312 ci
!= delays
.end(); ++ci
) {
2313 if (token_cnt
> 0 || prepend_comma
)
2314 StringAppendF(out
, ",");
2315 StringAppendF(out
, "%s%s)", kSyntheticDelayCategoryFilterPrefix
,
2321 std::string
CategoryFilter::ToString() const {
2322 std::string filter_string
;
2323 WriteString(included_
, &filter_string
, true);
2324 WriteString(disabled_
, &filter_string
, true);
2325 WriteString(excluded_
, &filter_string
, false);
2326 WriteString(delays_
, &filter_string
);
2327 return filter_string
;
2330 bool CategoryFilter::IsCategoryGroupEnabled(
2331 const char* category_group_name
) const {
2332 // TraceLog should call this method only as part of enabling/disabling
2334 StringList::const_iterator ci
;
2336 // Check the disabled- filters and the disabled-* wildcard first so that a
2337 // "*" filter does not include the disabled.
2338 for (ci
= disabled_
.begin(); ci
!= disabled_
.end(); ++ci
) {
2339 if (DoesCategoryGroupContainCategory(category_group_name
, ci
->c_str()))
2342 if (DoesCategoryGroupContainCategory(category_group_name
,
2343 TRACE_DISABLED_BY_DEFAULT("*")))
2346 for (ci
= included_
.begin(); ci
!= included_
.end(); ++ci
) {
2347 if (DoesCategoryGroupContainCategory(category_group_name
, ci
->c_str()))
2351 for (ci
= excluded_
.begin(); ci
!= excluded_
.end(); ++ci
) {
2352 if (DoesCategoryGroupContainCategory(category_group_name
, ci
->c_str()))
2355 // If the category group is not excluded, and there are no included patterns
2356 // we consider this pattern enabled.
2357 return included_
.empty();
2360 bool CategoryFilter::HasIncludedPatterns() const {
2361 return !included_
.empty();
2364 void CategoryFilter::Merge(const CategoryFilter
& nested_filter
) {
2365 // Keep included patterns only if both filters have an included entry.
2366 // Otherwise, one of the filter was specifying "*" and we want to honour the
2368 if (HasIncludedPatterns() && nested_filter
.HasIncludedPatterns()) {
2369 included_
.insert(included_
.end(),
2370 nested_filter
.included_
.begin(),
2371 nested_filter
.included_
.end());
2376 disabled_
.insert(disabled_
.end(),
2377 nested_filter
.disabled_
.begin(),
2378 nested_filter
.disabled_
.end());
2379 excluded_
.insert(excluded_
.end(),
2380 nested_filter
.excluded_
.begin(),
2381 nested_filter
.excluded_
.end());
2382 delays_
.insert(delays_
.end(),
2383 nested_filter
.delays_
.begin(),
2384 nested_filter
.delays_
.end());
2387 void CategoryFilter::Clear() {
2393 const CategoryFilter::StringList
&
2394 CategoryFilter::GetSyntheticDelayValues() const {
2398 } // namespace debug
2401 namespace trace_event_internal
{
2403 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2404 const char* category_group
, const char* name
) {
2405 // The single atom works because for now the category_group can only be "gpu".
2406 DCHECK(strcmp(category_group
, "gpu") == 0);
2407 static TRACE_EVENT_API_ATOMIC_WORD atomic
= 0;
2408 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2409 category_group
, atomic
, category_group_enabled_
);
2411 if (*category_group_enabled_
) {
2413 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2414 TRACE_EVENT_PHASE_COMPLETE
, category_group_enabled_
, name
,
2415 trace_event_internal::kNoEventId
,
2416 static_cast<int>(base::PlatformThread::CurrentId()),
2417 base::TimeTicks::NowFromSystemTraceTime(),
2418 0, NULL
, NULL
, NULL
, NULL
, TRACE_EVENT_FLAG_NONE
);
2422 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2423 if (*category_group_enabled_
) {
2424 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_
,
2425 name_
, event_handle_
);
2429 } // namespace trace_event_internal