1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/trace_event_impl.h"
10 #include "base/base_switches.h"
11 #include "base/bind.h"
12 #include "base/command_line.h"
13 #include "base/debug/leak_annotations.h"
14 #include "base/format_macros.h"
15 #include "base/json/string_escape.h"
16 #include "base/lazy_instance.h"
17 #include "base/location.h"
18 #include "base/memory/singleton.h"
19 #include "base/process/process_metrics.h"
20 #include "base/stl_util.h"
21 #include "base/strings/string_number_conversions.h"
22 #include "base/strings/string_split.h"
23 #include "base/strings/string_tokenizer.h"
24 #include "base/strings/string_util.h"
25 #include "base/strings/stringprintf.h"
26 #include "base/strings/utf_string_conversions.h"
27 #include "base/synchronization/cancellation_flag.h"
28 #include "base/synchronization/waitable_event.h"
29 #include "base/sys_info.h"
30 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
31 #include "base/thread_task_runner_handle.h"
32 #include "base/threading/platform_thread.h"
33 #include "base/threading/thread_id_name_manager.h"
34 #include "base/threading/worker_pool.h"
35 #include "base/time/time.h"
36 #include "base/trace_event/memory_dump_manager.h"
37 #include "base/trace_event/memory_dump_provider.h"
38 #include "base/trace_event/process_memory_dump.h"
39 #include "base/trace_event/trace_event.h"
40 #include "base/trace_event/trace_event_synthetic_delay.h"
43 #include "base/trace_event/trace_event_etw_export_win.h"
44 #include "base/trace_event/trace_event_win.h"
47 class DeleteTraceLogForTesting
{
49 static void Delete() {
50 Singleton
<base::trace_event::TraceLog
,
51 LeakySingletonTraits
<base::trace_event::TraceLog
>>::OnExit(0);
55 // The thread buckets for the sampling profiler.
56 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state
[3];
59 namespace trace_event
{
63 // The overhead of TraceEvent above this threshold will be reported in the
65 const int kOverheadReportThresholdInMicroseconds
= 50;
67 // Controls the number of trace events we will buffer in-memory
68 // before throwing them away.
69 const size_t kTraceBufferChunkSize
= TraceBufferChunk::kTraceBufferChunkSize
;
70 const size_t kTraceEventVectorBigBufferChunks
=
71 512000000 / kTraceBufferChunkSize
;
72 const size_t kTraceEventVectorBufferChunks
= 256000 / kTraceBufferChunkSize
;
73 const size_t kTraceEventRingBufferChunks
= kTraceEventVectorBufferChunks
/ 4;
74 const size_t kTraceEventBufferSizeInBytes
= 100 * 1024;
75 // Can store results for 30 seconds with 1 ms sampling interval.
76 const size_t kMonitorTraceEventBufferChunks
= 30000 / kTraceBufferChunkSize
;
77 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
78 const size_t kEchoToConsoleTraceEventBufferChunks
= 256;
80 const int kThreadFlushTimeoutMs
= 3000;
83 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
84 const char kEchoToConsoleCategoryFilter
[] = "-ipc,-task";
87 #define MAX_CATEGORY_GROUPS 100
89 // Parallel arrays g_category_groups and g_category_group_enabled are separate
90 // so that a pointer to a member of g_category_group_enabled can be easily
91 // converted to an index into g_category_groups. This allows macros to deal
92 // only with char enabled pointers from g_category_group_enabled, and we can
93 // convert internally to determine the category name from the char enabled
95 const char* g_category_groups
[MAX_CATEGORY_GROUPS
] = {
97 "tracing already shutdown",
98 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
100 // For reporting trace_event overhead. For thread local event buffers only.
101 "trace_event_overhead"};
103 // The enabled flag is char instead of bool so that the API can be used from C.
104 unsigned char g_category_group_enabled
[MAX_CATEGORY_GROUPS
] = { 0 };
105 // Indexes here have to match the g_category_groups array indexes above.
106 const int g_category_already_shutdown
= 1;
107 const int g_category_categories_exhausted
= 2;
108 const int g_category_metadata
= 3;
109 const int g_category_trace_event_overhead
= 4;
110 const int g_num_builtin_categories
= 5;
111 // Skip default categories.
112 base::subtle::AtomicWord g_category_index
= g_num_builtin_categories
;
114 // The name of the current thread. This is used to decide if the current
115 // thread name has changed. We combine all the seen thread names into the
116 // output name for the thread.
117 LazyInstance
<ThreadLocalPointer
<const char> >::Leaky
118 g_current_thread_name
= LAZY_INSTANCE_INITIALIZER
;
120 ThreadTicks
ThreadNow() {
121 return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
124 class TraceBufferRingBuffer
: public TraceBuffer
{
126 TraceBufferRingBuffer(size_t max_chunks
)
127 : max_chunks_(max_chunks
),
128 recyclable_chunks_queue_(new size_t[queue_capacity()]),
130 queue_tail_(max_chunks
),
131 current_iteration_index_(0),
132 current_chunk_seq_(1) {
133 chunks_
.reserve(max_chunks
);
134 for (size_t i
= 0; i
< max_chunks
; ++i
)
135 recyclable_chunks_queue_
[i
] = i
;
138 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
139 // Because the number of threads is much less than the number of chunks,
140 // the queue should never be empty.
141 DCHECK(!QueueIsEmpty());
143 *index
= recyclable_chunks_queue_
[queue_head_
];
144 queue_head_
= NextQueueIndex(queue_head_
);
145 current_iteration_index_
= queue_head_
;
147 if (*index
>= chunks_
.size())
148 chunks_
.resize(*index
+ 1);
150 TraceBufferChunk
* chunk
= chunks_
[*index
];
151 chunks_
[*index
] = NULL
; // Put NULL in the slot of a in-flight chunk.
153 chunk
->Reset(current_chunk_seq_
++);
155 chunk
= new TraceBufferChunk(current_chunk_seq_
++);
157 return scoped_ptr
<TraceBufferChunk
>(chunk
);
160 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
> chunk
) override
{
161 // When this method is called, the queue should not be full because it
162 // can contain all chunks including the one to be returned.
163 DCHECK(!QueueIsFull());
165 DCHECK_LT(index
, chunks_
.size());
166 DCHECK(!chunks_
[index
]);
167 chunks_
[index
] = chunk
.release();
168 recyclable_chunks_queue_
[queue_tail_
] = index
;
169 queue_tail_
= NextQueueIndex(queue_tail_
);
172 bool IsFull() const override
{ return false; }
174 size_t Size() const override
{
175 // This is approximate because not all of the chunks are full.
176 return chunks_
.size() * kTraceBufferChunkSize
;
179 size_t Capacity() const override
{
180 return max_chunks_
* kTraceBufferChunkSize
;
183 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
184 if (handle
.chunk_index
>= chunks_
.size())
186 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
187 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
189 return chunk
->GetEventAt(handle
.event_index
);
192 const TraceBufferChunk
* NextChunk() override
{
196 while (current_iteration_index_
!= queue_tail_
) {
197 size_t chunk_index
= recyclable_chunks_queue_
[current_iteration_index_
];
198 current_iteration_index_
= NextQueueIndex(current_iteration_index_
);
199 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
201 DCHECK(chunks_
[chunk_index
]);
202 return chunks_
[chunk_index
];
207 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
208 scoped_ptr
<ClonedTraceBuffer
> cloned_buffer(new ClonedTraceBuffer());
209 for (size_t queue_index
= queue_head_
; queue_index
!= queue_tail_
;
210 queue_index
= NextQueueIndex(queue_index
)) {
211 size_t chunk_index
= recyclable_chunks_queue_
[queue_index
];
212 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
214 TraceBufferChunk
* chunk
= chunks_
[chunk_index
];
215 cloned_buffer
->chunks_
.push_back(chunk
? chunk
->Clone().release() : NULL
);
217 return cloned_buffer
.Pass();
220 void EstimateTraceMemoryOverhead(
221 TraceEventMemoryOverhead
* overhead
) override
{
222 overhead
->Add("TraceBufferRingBuffer", sizeof(*this));
223 for (size_t queue_index
= queue_head_
; queue_index
!= queue_tail_
;
224 queue_index
= NextQueueIndex(queue_index
)) {
225 size_t chunk_index
= recyclable_chunks_queue_
[queue_index
];
226 if (chunk_index
>= chunks_
.size()) // Skip uninitialized chunks.
228 chunks_
[chunk_index
]->EstimateTraceMemoryOverhead(overhead
);
233 class ClonedTraceBuffer
: public TraceBuffer
{
235 ClonedTraceBuffer() : current_iteration_index_(0) {}
237 // The only implemented method.
238 const TraceBufferChunk
* NextChunk() override
{
239 return current_iteration_index_
< chunks_
.size() ?
240 chunks_
[current_iteration_index_
++] : NULL
;
243 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
245 return scoped_ptr
<TraceBufferChunk
>();
247 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
>) override
{
250 bool IsFull() const override
{ return false; }
251 size_t Size() const override
{ return 0; }
252 size_t Capacity() const override
{ return 0; }
253 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
256 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
258 return scoped_ptr
<TraceBuffer
>();
260 void EstimateTraceMemoryOverhead(
261 TraceEventMemoryOverhead
* overhead
) override
{
265 size_t current_iteration_index_
;
266 ScopedVector
<TraceBufferChunk
> chunks_
;
269 bool QueueIsEmpty() const {
270 return queue_head_
== queue_tail_
;
273 size_t QueueSize() const {
274 return queue_tail_
> queue_head_
? queue_tail_
- queue_head_
:
275 queue_tail_
+ queue_capacity() - queue_head_
;
278 bool QueueIsFull() const {
279 return QueueSize() == queue_capacity() - 1;
282 size_t queue_capacity() const {
283 // One extra space to help distinguish full state and empty state.
284 return max_chunks_
+ 1;
287 size_t NextQueueIndex(size_t index
) const {
289 if (index
>= queue_capacity())
295 ScopedVector
<TraceBufferChunk
> chunks_
;
297 scoped_ptr
<size_t[]> recyclable_chunks_queue_
;
301 size_t current_iteration_index_
;
302 uint32 current_chunk_seq_
;
304 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer
);
307 class TraceBufferVector
: public TraceBuffer
{
309 TraceBufferVector(size_t max_chunks
)
310 : in_flight_chunk_count_(0),
311 current_iteration_index_(0),
312 max_chunks_(max_chunks
) {
313 chunks_
.reserve(max_chunks_
);
316 scoped_ptr
<TraceBufferChunk
> GetChunk(size_t* index
) override
{
317 // This function may be called when adding normal events or indirectly from
318 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
319 // have to add the metadata events and flush thread-local buffers even if
320 // the buffer is full.
321 *index
= chunks_
.size();
322 chunks_
.push_back(NULL
); // Put NULL in the slot of a in-flight chunk.
323 ++in_flight_chunk_count_
;
324 // + 1 because zero chunk_seq is not allowed.
325 return scoped_ptr
<TraceBufferChunk
>(
326 new TraceBufferChunk(static_cast<uint32
>(*index
) + 1));
329 void ReturnChunk(size_t index
, scoped_ptr
<TraceBufferChunk
> chunk
) override
{
330 DCHECK_GT(in_flight_chunk_count_
, 0u);
331 DCHECK_LT(index
, chunks_
.size());
332 DCHECK(!chunks_
[index
]);
333 --in_flight_chunk_count_
;
334 chunks_
[index
] = chunk
.release();
337 bool IsFull() const override
{ return chunks_
.size() >= max_chunks_
; }
339 size_t Size() const override
{
340 // This is approximate because not all of the chunks are full.
341 return chunks_
.size() * kTraceBufferChunkSize
;
344 size_t Capacity() const override
{
345 return max_chunks_
* kTraceBufferChunkSize
;
348 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) override
{
349 if (handle
.chunk_index
>= chunks_
.size())
351 TraceBufferChunk
* chunk
= chunks_
[handle
.chunk_index
];
352 if (!chunk
|| chunk
->seq() != handle
.chunk_seq
)
354 return chunk
->GetEventAt(handle
.event_index
);
357 const TraceBufferChunk
* NextChunk() override
{
358 while (current_iteration_index_
< chunks_
.size()) {
359 // Skip in-flight chunks.
360 const TraceBufferChunk
* chunk
= chunks_
[current_iteration_index_
++];
367 scoped_ptr
<TraceBuffer
> CloneForIteration() const override
{
369 return scoped_ptr
<TraceBuffer
>();
372 void EstimateTraceMemoryOverhead(
373 TraceEventMemoryOverhead
* overhead
) override
{
374 const size_t chunks_ptr_vector_allocated_size
=
375 sizeof(*this) + max_chunks_
* sizeof(decltype(chunks_
)::value_type
);
376 const size_t chunks_ptr_vector_resident_size
=
377 sizeof(*this) + chunks_
.size() * sizeof(decltype(chunks_
)::value_type
);
378 overhead
->Add("TraceBufferVector", chunks_ptr_vector_allocated_size
,
379 chunks_ptr_vector_resident_size
);
380 for (size_t i
= 0; i
< chunks_
.size(); ++i
) {
381 TraceBufferChunk
* chunk
= chunks_
[i
];
382 // Skip the in-flight (nullptr) chunks. They will be accounted by the
383 // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
385 chunk
->EstimateTraceMemoryOverhead(overhead
);
390 size_t in_flight_chunk_count_
;
391 size_t current_iteration_index_
;
393 ScopedVector
<TraceBufferChunk
> chunks_
;
395 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector
);
398 template <typename T
>
399 void InitializeMetadataEvent(TraceEvent
* trace_event
,
401 const char* metadata_name
, const char* arg_name
,
407 unsigned char arg_type
;
408 unsigned long long arg_value
;
409 ::trace_event_internal::SetTraceValue(value
, &arg_type
, &arg_value
);
410 trace_event
->Initialize(
411 thread_id
, TraceTicks(), ThreadTicks(), TRACE_EVENT_PHASE_METADATA
,
412 &g_category_group_enabled
[g_category_metadata
], metadata_name
,
413 trace_event_internal::kNoId
, trace_event_internal::kNoId
, num_args
,
414 &arg_name
, &arg_type
, &arg_value
, NULL
, TRACE_EVENT_FLAG_NONE
);
417 class AutoThreadLocalBoolean
{
419 explicit AutoThreadLocalBoolean(ThreadLocalBoolean
* thread_local_boolean
)
420 : thread_local_boolean_(thread_local_boolean
) {
421 DCHECK(!thread_local_boolean_
->Get());
422 thread_local_boolean_
->Set(true);
424 ~AutoThreadLocalBoolean() {
425 thread_local_boolean_
->Set(false);
429 ThreadLocalBoolean
* thread_local_boolean_
;
430 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean
);
435 TraceBufferChunk::TraceBufferChunk(uint32 seq
) : next_free_(0), seq_(seq
) {
438 TraceBufferChunk::~TraceBufferChunk() {
441 void TraceBufferChunk::Reset(uint32 new_seq
) {
442 for (size_t i
= 0; i
< next_free_
; ++i
)
446 cached_overhead_estimate_
.reset();
449 TraceEvent
* TraceBufferChunk::AddTraceEvent(size_t* event_index
) {
451 *event_index
= next_free_
++;
452 return &chunk_
[*event_index
];
455 scoped_ptr
<TraceBufferChunk
> TraceBufferChunk::Clone() const {
456 scoped_ptr
<TraceBufferChunk
> cloned_chunk(new TraceBufferChunk(seq_
));
457 cloned_chunk
->next_free_
= next_free_
;
458 for (size_t i
= 0; i
< next_free_
; ++i
)
459 cloned_chunk
->chunk_
[i
].CopyFrom(chunk_
[i
]);
460 return cloned_chunk
.Pass();
463 void TraceBufferChunk::EstimateTraceMemoryOverhead(
464 TraceEventMemoryOverhead
* overhead
) {
465 if (!cached_overhead_estimate_
) {
466 cached_overhead_estimate_
.reset(new TraceEventMemoryOverhead
);
468 // When estimating the size of TraceBufferChunk, exclude the array of trace
469 // events, as they are computed individually below.
470 cached_overhead_estimate_
->Add("TraceBufferChunk",
471 sizeof(*this) - sizeof(chunk_
));
474 const size_t num_cached_estimated_events
=
475 cached_overhead_estimate_
->GetCount("TraceEvent");
476 DCHECK_LE(num_cached_estimated_events
, size());
478 if (IsFull() && num_cached_estimated_events
== size()) {
479 overhead
->Update(*cached_overhead_estimate_
);
483 for (size_t i
= num_cached_estimated_events
; i
< size(); ++i
)
484 chunk_
[i
].EstimateTraceMemoryOverhead(cached_overhead_estimate_
.get());
487 cached_overhead_estimate_
->AddSelf();
489 // The unused TraceEvents in |chunks_| are not cached. They will keep
490 // changing as new TraceEvents are added to this chunk, so they are
491 // computed on the fly.
492 const size_t num_unused_trace_events
= capacity() - size();
493 overhead
->Add("TraceEvent (unused)",
494 num_unused_trace_events
* sizeof(TraceEvent
));
497 overhead
->Update(*cached_overhead_estimate_
);
500 // A helper class that allows the lock to be acquired in the middle of the scope
501 // and unlocks at the end of scope if locked.
502 class TraceLog::OptionalAutoLock
{
504 explicit OptionalAutoLock(Lock
* lock
) : lock_(lock
), locked_(false) {}
506 ~OptionalAutoLock() {
511 void EnsureAcquired() {
521 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock
);
524 // Use this function instead of TraceEventHandle constructor to keep the
525 // overhead of ScopedTracer (trace_event.h) constructor minimum.
526 void MakeHandle(uint32 chunk_seq
, size_t chunk_index
, size_t event_index
,
527 TraceEventHandle
* handle
) {
529 DCHECK(chunk_index
< (1u << 16));
530 DCHECK(event_index
< (1u << 16));
531 handle
->chunk_seq
= chunk_seq
;
532 handle
->chunk_index
= static_cast<uint16
>(chunk_index
);
533 handle
->event_index
= static_cast<uint16
>(event_index
);
536 ////////////////////////////////////////////////////////////////////////////////
540 ////////////////////////////////////////////////////////////////////////////////
544 size_t GetAllocLength(const char* str
) { return str
? strlen(str
) + 1 : 0; }
546 // Copies |*member| into |*buffer|, sets |*member| to point to this new
547 // location, and then advances |*buffer| by the amount written.
548 void CopyTraceEventParameter(char** buffer
,
552 size_t written
= strlcpy(*buffer
, *member
, end
- *buffer
) + 1;
553 DCHECK_LE(static_cast<int>(written
), end
- *buffer
);
561 TraceEvent::TraceEvent()
562 : duration_(TimeDelta::FromInternalValue(-1)),
564 category_group_enabled_(NULL
),
567 phase_(TRACE_EVENT_PHASE_BEGIN
),
569 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
570 arg_names_
[i
] = NULL
;
571 memset(arg_values_
, 0, sizeof(arg_values_
));
574 TraceEvent::~TraceEvent() {
577 void TraceEvent::CopyFrom(const TraceEvent
& other
) {
578 timestamp_
= other
.timestamp_
;
579 thread_timestamp_
= other
.thread_timestamp_
;
580 duration_
= other
.duration_
;
582 context_id_
= other
.context_id_
;
583 category_group_enabled_
= other
.category_group_enabled_
;
585 thread_id_
= other
.thread_id_
;
586 phase_
= other
.phase_
;
587 flags_
= other
.flags_
;
588 parameter_copy_storage_
= other
.parameter_copy_storage_
;
590 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
) {
591 arg_names_
[i
] = other
.arg_names_
[i
];
592 arg_types_
[i
] = other
.arg_types_
[i
];
593 arg_values_
[i
] = other
.arg_values_
[i
];
594 convertable_values_
[i
] = other
.convertable_values_
[i
];
598 void TraceEvent::Initialize(
600 TraceTicks timestamp
,
601 ThreadTicks thread_timestamp
,
603 const unsigned char* category_group_enabled
,
605 unsigned long long id
,
606 unsigned long long context_id
,
608 const char** arg_names
,
609 const unsigned char* arg_types
,
610 const unsigned long long* arg_values
,
611 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
612 unsigned int flags
) {
613 timestamp_
= timestamp
;
614 thread_timestamp_
= thread_timestamp
;
615 duration_
= TimeDelta::FromInternalValue(-1);
617 context_id_
= context_id
;
618 category_group_enabled_
= category_group_enabled
;
620 thread_id_
= thread_id
;
624 // Clamp num_args since it may have been set by a third_party library.
625 num_args
= (num_args
> kTraceMaxNumArgs
) ? kTraceMaxNumArgs
: num_args
;
627 for (; i
< num_args
; ++i
) {
628 arg_names_
[i
] = arg_names
[i
];
629 arg_types_
[i
] = arg_types
[i
];
631 if (arg_types
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
632 convertable_values_
[i
] = convertable_values
[i
];
634 arg_values_
[i
].as_uint
= arg_values
[i
];
636 for (; i
< kTraceMaxNumArgs
; ++i
) {
637 arg_names_
[i
] = NULL
;
638 arg_values_
[i
].as_uint
= 0u;
639 convertable_values_
[i
] = NULL
;
640 arg_types_
[i
] = TRACE_VALUE_TYPE_UINT
;
643 bool copy
= !!(flags
& TRACE_EVENT_FLAG_COPY
);
644 size_t alloc_size
= 0;
646 alloc_size
+= GetAllocLength(name
);
647 for (i
= 0; i
< num_args
; ++i
) {
648 alloc_size
+= GetAllocLength(arg_names_
[i
]);
649 if (arg_types_
[i
] == TRACE_VALUE_TYPE_STRING
)
650 arg_types_
[i
] = TRACE_VALUE_TYPE_COPY_STRING
;
654 bool arg_is_copy
[kTraceMaxNumArgs
];
655 for (i
= 0; i
< num_args
; ++i
) {
656 // No copying of convertable types, we retain ownership.
657 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
660 // We only take a copy of arg_vals if they are of type COPY_STRING.
661 arg_is_copy
[i
] = (arg_types_
[i
] == TRACE_VALUE_TYPE_COPY_STRING
);
663 alloc_size
+= GetAllocLength(arg_values_
[i
].as_string
);
667 parameter_copy_storage_
= new RefCountedString
;
668 parameter_copy_storage_
->data().resize(alloc_size
);
669 char* ptr
= string_as_array(¶meter_copy_storage_
->data());
670 const char* end
= ptr
+ alloc_size
;
672 CopyTraceEventParameter(&ptr
, &name_
, end
);
673 for (i
= 0; i
< num_args
; ++i
) {
674 CopyTraceEventParameter(&ptr
, &arg_names_
[i
], end
);
677 for (i
= 0; i
< num_args
; ++i
) {
678 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
681 CopyTraceEventParameter(&ptr
, &arg_values_
[i
].as_string
, end
);
683 DCHECK_EQ(end
, ptr
) << "Overrun by " << ptr
- end
;
687 void TraceEvent::Reset() {
688 // Only reset fields that won't be initialized in Initialize(), or that may
689 // hold references to other objects.
690 duration_
= TimeDelta::FromInternalValue(-1);
691 parameter_copy_storage_
= NULL
;
692 for (int i
= 0; i
< kTraceMaxNumArgs
; ++i
)
693 convertable_values_
[i
] = NULL
;
696 void TraceEvent::UpdateDuration(const TraceTicks
& now
,
697 const ThreadTicks
& thread_now
) {
698 DCHECK_EQ(duration_
.ToInternalValue(), -1);
699 duration_
= now
- timestamp_
;
700 thread_duration_
= thread_now
- thread_timestamp_
;
703 void TraceEvent::EstimateTraceMemoryOverhead(
704 TraceEventMemoryOverhead
* overhead
) {
705 overhead
->Add("TraceEvent", sizeof(*this));
707 // TODO(primiano): parameter_copy_storage_ is refcounted and, in theory,
708 // could be shared by several events and we might overcount. In practice
709 // this is unlikely but it's worth checking.
710 if (parameter_copy_storage_
)
711 overhead
->AddRefCountedString(*parameter_copy_storage_
.get());
713 for (size_t i
= 0; i
< kTraceMaxNumArgs
; ++i
) {
714 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
715 convertable_values_
[i
]->EstimateTraceMemoryOverhead(overhead
);
720 void TraceEvent::AppendValueAsJSON(unsigned char type
,
721 TraceEvent::TraceValue value
,
724 case TRACE_VALUE_TYPE_BOOL
:
725 *out
+= value
.as_bool
? "true" : "false";
727 case TRACE_VALUE_TYPE_UINT
:
728 StringAppendF(out
, "%" PRIu64
, static_cast<uint64
>(value
.as_uint
));
730 case TRACE_VALUE_TYPE_INT
:
731 StringAppendF(out
, "%" PRId64
, static_cast<int64
>(value
.as_int
));
733 case TRACE_VALUE_TYPE_DOUBLE
: {
734 // FIXME: base/json/json_writer.cc is using the same code,
735 // should be made into a common method.
737 double val
= value
.as_double
;
738 if (std::isfinite(val
)) {
739 real
= DoubleToString(val
);
740 // Ensure that the number has a .0 if there's no decimal or 'e'. This
741 // makes sure that when we read the JSON back, it's interpreted as a
742 // real rather than an int.
743 if (real
.find('.') == std::string::npos
&&
744 real
.find('e') == std::string::npos
&&
745 real
.find('E') == std::string::npos
) {
748 // The JSON spec requires that non-integer values in the range (-1,1)
749 // have a zero before the decimal point - ".52" is not valid, "0.52" is.
750 if (real
[0] == '.') {
752 } else if (real
.length() > 1 && real
[0] == '-' && real
[1] == '.') {
753 // "-.1" bad "-0.1" good
756 } else if (std::isnan(val
)){
757 // The JSON spec doesn't allow NaN and Infinity (since these are
758 // objects in EcmaScript). Use strings instead.
760 } else if (val
< 0) {
761 real
= "\"-Infinity\"";
763 real
= "\"Infinity\"";
765 StringAppendF(out
, "%s", real
.c_str());
768 case TRACE_VALUE_TYPE_POINTER
:
769 // JSON only supports double and int numbers.
770 // So as not to lose bits from a 64-bit pointer, output as a hex string.
771 StringAppendF(out
, "\"0x%" PRIx64
"\"", static_cast<uint64
>(
772 reinterpret_cast<intptr_t>(
775 case TRACE_VALUE_TYPE_STRING
:
776 case TRACE_VALUE_TYPE_COPY_STRING
:
777 EscapeJSONString(value
.as_string
? value
.as_string
: "NULL", true, out
);
780 NOTREACHED() << "Don't know how to print this value";
785 void TraceEvent::AppendAsJSON(
787 const ArgumentFilterPredicate
& argument_filter_predicate
) const {
788 int64 time_int64
= timestamp_
.ToInternalValue();
789 int process_id
= TraceLog::GetInstance()->process_id();
790 const char* category_group_name
=
791 TraceLog::GetCategoryGroupName(category_group_enabled_
);
793 // Category group checked at category creation time.
794 DCHECK(!strchr(name_
, '"'));
795 StringAppendF(out
, "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
797 "\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":",
798 process_id
, thread_id_
, time_int64
, phase_
, category_group_name
,
801 // Output argument names and values, stop at first NULL argument name.
802 bool strip_args
= arg_names_
[0] && !argument_filter_predicate
.is_null() &&
803 !argument_filter_predicate
.Run(category_group_name
, name_
);
806 *out
+= "\"__stripped__\"";
810 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
814 *out
+= arg_names_
[i
];
817 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
818 convertable_values_
[i
]->AppendAsTraceFormat(out
);
820 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], out
);
826 if (phase_
== TRACE_EVENT_PHASE_COMPLETE
) {
827 int64 duration
= duration_
.ToInternalValue();
829 StringAppendF(out
, ",\"dur\":%" PRId64
, duration
);
830 if (!thread_timestamp_
.is_null()) {
831 int64 thread_duration
= thread_duration_
.ToInternalValue();
832 if (thread_duration
!= -1)
833 StringAppendF(out
, ",\"tdur\":%" PRId64
, thread_duration
);
837 // Output tts if thread_timestamp is valid.
838 if (!thread_timestamp_
.is_null()) {
839 int64 thread_time_int64
= thread_timestamp_
.ToInternalValue();
840 StringAppendF(out
, ",\"tts\":%" PRId64
, thread_time_int64
);
843 // Output async tts marker field if flag is set.
844 if (flags_
& TRACE_EVENT_FLAG_ASYNC_TTS
) {
845 StringAppendF(out
, ", \"use_async_tts\":1");
848 // If id_ is set, print it out as a hex string so we don't loose any
849 // bits (it might be a 64-bit pointer).
850 if (flags_
& TRACE_EVENT_FLAG_HAS_ID
)
851 StringAppendF(out
, ",\"id\":\"0x%" PRIx64
"\"", static_cast<uint64
>(id_
));
853 if (flags_
& TRACE_EVENT_FLAG_BIND_TO_ENCLOSING
)
854 StringAppendF(out
, ",\"bp\":\"e\"");
856 // Similar to id_, print the context_id as hex if present.
857 if (flags_
& TRACE_EVENT_FLAG_HAS_CONTEXT_ID
)
858 StringAppendF(out
, ",\"cid\":\"0x%" PRIx64
"\"",
859 static_cast<uint64
>(context_id_
));
861 // Instant events also output their scope.
862 if (phase_
== TRACE_EVENT_PHASE_INSTANT
) {
864 switch (flags_
& TRACE_EVENT_FLAG_SCOPE_MASK
) {
865 case TRACE_EVENT_SCOPE_GLOBAL
:
866 scope
= TRACE_EVENT_SCOPE_NAME_GLOBAL
;
869 case TRACE_EVENT_SCOPE_PROCESS
:
870 scope
= TRACE_EVENT_SCOPE_NAME_PROCESS
;
873 case TRACE_EVENT_SCOPE_THREAD
:
874 scope
= TRACE_EVENT_SCOPE_NAME_THREAD
;
877 StringAppendF(out
, ",\"s\":\"%c\"", scope
);
883 void TraceEvent::AppendPrettyPrinted(std::ostringstream
* out
) const {
884 *out
<< name_
<< "[";
885 *out
<< TraceLog::GetCategoryGroupName(category_group_enabled_
);
889 for (int i
= 0; i
< kTraceMaxNumArgs
&& arg_names_
[i
]; ++i
) {
892 *out
<< arg_names_
[i
] << ":";
893 std::string value_as_text
;
895 if (arg_types_
[i
] == TRACE_VALUE_TYPE_CONVERTABLE
)
896 convertable_values_
[i
]->AppendAsTraceFormat(&value_as_text
);
898 AppendValueAsJSON(arg_types_
[i
], arg_values_
[i
], &value_as_text
);
900 *out
<< value_as_text
;
906 ////////////////////////////////////////////////////////////////////////////////
910 ////////////////////////////////////////////////////////////////////////////////
912 TraceResultBuffer::OutputCallback
913 TraceResultBuffer::SimpleOutput::GetCallback() {
914 return Bind(&SimpleOutput::Append
, Unretained(this));
917 void TraceResultBuffer::SimpleOutput::Append(
918 const std::string
& json_trace_output
) {
919 json_output
+= json_trace_output
;
922 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
925 TraceResultBuffer::~TraceResultBuffer() {
928 void TraceResultBuffer::SetOutputCallback(
929 const OutputCallback
& json_chunk_callback
) {
930 output_callback_
= json_chunk_callback
;
933 void TraceResultBuffer::Start() {
934 append_comma_
= false;
935 output_callback_
.Run("[");
938 void TraceResultBuffer::AddFragment(const std::string
& trace_fragment
) {
940 output_callback_
.Run(",");
941 append_comma_
= true;
942 output_callback_
.Run(trace_fragment
);
945 void TraceResultBuffer::Finish() {
946 output_callback_
.Run("]");
949 ////////////////////////////////////////////////////////////////////////////////
951 // TraceSamplingThread
953 ////////////////////////////////////////////////////////////////////////////////
954 class TraceBucketData
;
955 typedef base::Callback
<void(TraceBucketData
*)> TraceSampleCallback
;
957 class TraceBucketData
{
959 TraceBucketData(base::subtle::AtomicWord
* bucket
,
961 TraceSampleCallback callback
);
964 TRACE_EVENT_API_ATOMIC_WORD
* bucket
;
965 const char* bucket_name
;
966 TraceSampleCallback callback
;
969 // This object must be created on the IO thread.
970 class TraceSamplingThread
: public PlatformThread::Delegate
{
972 TraceSamplingThread();
973 ~TraceSamplingThread() override
;
975 // Implementation of PlatformThread::Delegate:
976 void ThreadMain() override
;
978 static void DefaultSamplingCallback(TraceBucketData
* bucekt_data
);
981 void WaitSamplingEventForTesting();
984 friend class TraceLog
;
987 // Not thread-safe. Once the ThreadMain has been called, this can no longer
989 void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
990 const char* const name
,
991 TraceSampleCallback callback
);
992 // Splits a combined "category\0name" into the two component parts.
993 static void ExtractCategoryAndName(const char* combined
,
994 const char** category
,
996 std::vector
<TraceBucketData
> sample_buckets_
;
997 bool thread_running_
;
998 CancellationFlag cancellation_flag_
;
999 WaitableEvent waitable_event_for_testing_
;
1003 TraceSamplingThread::TraceSamplingThread()
1004 : thread_running_(false),
1005 waitable_event_for_testing_(false, false) {
1008 TraceSamplingThread::~TraceSamplingThread() {
1011 void TraceSamplingThread::ThreadMain() {
1012 PlatformThread::SetName("Sampling Thread");
1013 thread_running_
= true;
1014 const int kSamplingFrequencyMicroseconds
= 1000;
1015 while (!cancellation_flag_
.IsSet()) {
1016 PlatformThread::Sleep(
1017 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds
));
1019 waitable_event_for_testing_
.Signal();
1024 void TraceSamplingThread::DefaultSamplingCallback(
1025 TraceBucketData
* bucket_data
) {
1026 TRACE_EVENT_API_ATOMIC_WORD category_and_name
=
1027 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data
->bucket
);
1028 if (!category_and_name
)
1030 const char* const combined
=
1031 reinterpret_cast<const char* const>(category_and_name
);
1032 const char* category_group
;
1034 ExtractCategoryAndName(combined
, &category_group
, &name
);
1035 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE
,
1036 TraceLog::GetCategoryGroupEnabled(category_group
),
1037 name
, 0, 0, NULL
, NULL
, NULL
, NULL
, 0);
1040 void TraceSamplingThread::GetSamples() {
1041 for (size_t i
= 0; i
< sample_buckets_
.size(); ++i
) {
1042 TraceBucketData
* bucket_data
= &sample_buckets_
[i
];
1043 bucket_data
->callback
.Run(bucket_data
);
1047 void TraceSamplingThread::RegisterSampleBucket(
1048 TRACE_EVENT_API_ATOMIC_WORD
* bucket
,
1049 const char* const name
,
1050 TraceSampleCallback callback
) {
1051 // Access to sample_buckets_ doesn't cause races with the sampling thread
1052 // that uses the sample_buckets_, because it is guaranteed that
1053 // RegisterSampleBucket is called before the sampling thread is created.
1054 DCHECK(!thread_running_
);
1055 sample_buckets_
.push_back(TraceBucketData(bucket
, name
, callback
));
1059 void TraceSamplingThread::ExtractCategoryAndName(const char* combined
,
1060 const char** category
,
1061 const char** name
) {
1062 *category
= combined
;
1063 *name
= &combined
[strlen(combined
) + 1];
1066 void TraceSamplingThread::Stop() {
1067 cancellation_flag_
.Set();
1070 void TraceSamplingThread::WaitSamplingEventForTesting() {
1071 waitable_event_for_testing_
.Wait();
1074 TraceBucketData::TraceBucketData(base::subtle::AtomicWord
* bucket
,
1076 TraceSampleCallback callback
)
1079 callback(callback
) {
1082 TraceBucketData::~TraceBucketData() {
1085 ////////////////////////////////////////////////////////////////////////////////
1089 ////////////////////////////////////////////////////////////////////////////////
1091 class TraceLog::ThreadLocalEventBuffer
1092 : public MessageLoop::DestructionObserver
,
1093 public MemoryDumpProvider
{
1095 ThreadLocalEventBuffer(TraceLog
* trace_log
);
1096 ~ThreadLocalEventBuffer() override
;
1098 TraceEvent
* AddTraceEvent(TraceEventHandle
* handle
);
1100 void ReportOverhead(const TraceTicks
& event_timestamp
,
1101 const ThreadTicks
& event_thread_timestamp
);
1103 TraceEvent
* GetEventByHandle(TraceEventHandle handle
) {
1104 if (!chunk_
|| handle
.chunk_seq
!= chunk_
->seq() ||
1105 handle
.chunk_index
!= chunk_index_
)
1108 return chunk_
->GetEventAt(handle
.event_index
);
1111 int generation() const { return generation_
; }
1114 // MessageLoop::DestructionObserver
1115 void WillDestroyCurrentMessageLoop() override
;
1117 // MemoryDumpProvider implementation.
1118 bool OnMemoryDump(ProcessMemoryDump
* pmd
) override
;
1120 void FlushWhileLocked();
1122 void CheckThisIsCurrentBuffer() const {
1123 DCHECK(trace_log_
->thread_local_event_buffer_
.Get() == this);
1126 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1127 // as long as the thread exists.
1128 TraceLog
* trace_log_
;
1129 scoped_ptr
<TraceBufferChunk
> chunk_
;
1130 size_t chunk_index_
;
1132 TimeDelta overhead_
;
1135 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer
);
1138 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog
* trace_log
)
1139 : trace_log_(trace_log
),
1142 generation_(trace_log
->generation()) {
1143 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
1144 // the following message_loop won't be NULL.
1145 MessageLoop
* message_loop
= MessageLoop::current();
1146 message_loop
->AddDestructionObserver(this);
1148 // This is to report the local memory usage when memory-infra is enabled.
1149 MemoryDumpManager::GetInstance()->RegisterDumpProvider(
1150 this, ThreadTaskRunnerHandle::Get());
1152 AutoLock
lock(trace_log
->lock_
);
1153 trace_log
->thread_message_loops_
.insert(message_loop
);
1156 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
1157 CheckThisIsCurrentBuffer();
1158 MessageLoop::current()->RemoveDestructionObserver(this);
1159 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
1161 // Zero event_count_ happens in either of the following cases:
1162 // - no event generated for the thread;
1163 // - the thread has no message loop;
1164 // - trace_event_overhead is disabled.
1166 InitializeMetadataEvent(AddTraceEvent(NULL
),
1167 static_cast<int>(base::PlatformThread::CurrentId()),
1168 "overhead", "average_overhead",
1169 overhead_
.InMillisecondsF() / event_count_
);
1173 AutoLock
lock(trace_log_
->lock_
);
1175 trace_log_
->thread_message_loops_
.erase(MessageLoop::current());
1177 trace_log_
->thread_local_event_buffer_
.Set(NULL
);
1180 TraceEvent
* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
1181 TraceEventHandle
* handle
) {
1182 CheckThisIsCurrentBuffer();
1184 if (chunk_
&& chunk_
->IsFull()) {
1185 AutoLock
lock(trace_log_
->lock_
);
1190 AutoLock
lock(trace_log_
->lock_
);
1191 chunk_
= trace_log_
->logged_events_
->GetChunk(&chunk_index_
);
1192 trace_log_
->CheckIfBufferIsFullWhileLocked();
1198 TraceEvent
* trace_event
= chunk_
->AddTraceEvent(&event_index
);
1199 if (trace_event
&& handle
)
1200 MakeHandle(chunk_
->seq(), chunk_index_
, event_index
, handle
);
1205 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1206 const TraceTicks
& event_timestamp
,
1207 const ThreadTicks
& event_thread_timestamp
) {
1208 if (!g_category_group_enabled
[g_category_trace_event_overhead
])
1211 CheckThisIsCurrentBuffer();
1214 ThreadTicks thread_now
= ThreadNow();
1215 TraceTicks now
= trace_log_
->OffsetNow();
1216 TimeDelta overhead
= now
- event_timestamp
;
1217 if (overhead
.InMicroseconds() >= kOverheadReportThresholdInMicroseconds
) {
1218 TraceEvent
* trace_event
= AddTraceEvent(NULL
);
1220 trace_event
->Initialize(
1221 static_cast<int>(PlatformThread::CurrentId()), event_timestamp
,
1222 event_thread_timestamp
, TRACE_EVENT_PHASE_COMPLETE
,
1223 &g_category_group_enabled
[g_category_trace_event_overhead
],
1224 "overhead", ::trace_event_internal::kNoId
,
1225 ::trace_event_internal::kNoId
, ::trace_event_internal::kZeroNumArgs
,
1226 NULL
, NULL
, NULL
, NULL
, 0);
1227 trace_event
->UpdateDuration(now
, thread_now
);
1230 overhead_
+= overhead
;
1233 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1237 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(ProcessMemoryDump
* pmd
) {
1240 std::string dump_base_name
= StringPrintf(
1241 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
1242 TraceEventMemoryOverhead overhead
;
1243 chunk_
->EstimateTraceMemoryOverhead(&overhead
);
1244 overhead
.DumpInto(dump_base_name
.c_str(), pmd
);
1248 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1252 trace_log_
->lock_
.AssertAcquired();
1253 if (trace_log_
->CheckGeneration(generation_
)) {
1254 // Return the chunk to the buffer only if the generation matches.
1255 trace_log_
->logged_events_
->ReturnChunk(chunk_index_
, chunk_
.Pass());
1257 // Otherwise this method may be called from the destructor, or TraceLog will
1258 // find the generation mismatch and delete this buffer soon.
1261 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {
1264 TraceLogStatus::~TraceLogStatus() {
1268 TraceLog
* TraceLog::GetInstance() {
1269 return Singleton
<TraceLog
, LeakySingletonTraits
<TraceLog
> >::get();
1272 TraceLog::TraceLog()
1274 num_traces_recorded_(0),
1276 dispatching_to_observer_list_(false),
1277 process_sort_index_(0),
1278 process_id_hash_(0),
1281 trace_options_(kInternalRecordUntilFull
),
1282 sampling_thread_handle_(0),
1283 trace_config_(TraceConfig()),
1284 event_callback_trace_config_(TraceConfig()),
1285 thread_shared_chunk_index_(0),
1287 use_worker_thread_(false) {
1288 // Trace is enabled or disabled on one thread while other threads are
1289 // accessing the enabled flag. We don't care whether edge-case events are
1290 // traced or not, so we allow races on the enabled flag to keep the trace
1292 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1293 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1294 // sizeof(g_category_group_enabled),
1295 // "trace_event category enabled");
1296 for (int i
= 0; i
< MAX_CATEGORY_GROUPS
; ++i
) {
1297 ANNOTATE_BENIGN_RACE(&g_category_group_enabled
[i
],
1298 "trace_event category enabled");
1300 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
1303 SetProcessID(static_cast<int>(GetCurrentProcId()));
1305 // NaCl also shouldn't access the command line.
1306 if (CommandLine::InitializedForCurrentProcess() &&
1307 CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole
)) {
1308 std::string filter
= CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1309 switches::kTraceToConsole
);
1310 if (filter
.empty()) {
1311 filter
= kEchoToConsoleCategoryFilter
;
1314 filter
.append(kEchoToConsoleCategoryFilter
);
1317 LOG(ERROR
) << "Start " << switches::kTraceToConsole
1318 << " with CategoryFilter '" << filter
<< "'.";
1319 SetEnabled(TraceConfig(filter
, ECHO_TO_CONSOLE
), RECORDING_MODE
);
1323 logged_events_
.reset(CreateTraceBuffer());
1325 MemoryDumpManager::GetInstance()->RegisterDumpProvider(this);
1328 TraceLog::~TraceLog() {
1331 void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
1332 // A ThreadLocalEventBuffer needs the message loop
1333 // - to know when the thread exits;
1334 // - to handle the final flush.
1335 // For a thread without a message loop or the message loop may be blocked, the
1336 // trace events will be added into the main buffer directly.
1337 if (thread_blocks_message_loop_
.Get() || !MessageLoop::current())
1339 auto thread_local_event_buffer
= thread_local_event_buffer_
.Get();
1340 if (thread_local_event_buffer
&&
1341 !CheckGeneration(thread_local_event_buffer
->generation())) {
1342 delete thread_local_event_buffer
;
1343 thread_local_event_buffer
= NULL
;
1345 if (!thread_local_event_buffer
) {
1346 thread_local_event_buffer
= new ThreadLocalEventBuffer(this);
1347 thread_local_event_buffer_
.Set(thread_local_event_buffer
);
1351 bool TraceLog::OnMemoryDump(ProcessMemoryDump
* pmd
) {
1352 TraceEventMemoryOverhead overhead
;
1353 overhead
.Add("TraceLog", sizeof(*this));
1355 AutoLock
lock(lock_
);
1357 logged_events_
->EstimateTraceMemoryOverhead(&overhead
);
1360 overhead
.DumpInto("tracing/main_trace_log", pmd
);
1364 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1365 const char* category_group
) {
1366 TraceLog
* tracelog
= GetInstance();
1368 DCHECK(!g_category_group_enabled
[g_category_already_shutdown
]);
1369 return &g_category_group_enabled
[g_category_already_shutdown
];
1371 return tracelog
->GetCategoryGroupEnabledInternal(category_group
);
1374 const char* TraceLog::GetCategoryGroupName(
1375 const unsigned char* category_group_enabled
) {
1376 // Calculate the index of the category group by finding
1377 // category_group_enabled in g_category_group_enabled array.
1378 uintptr_t category_begin
=
1379 reinterpret_cast<uintptr_t>(g_category_group_enabled
);
1380 uintptr_t category_ptr
= reinterpret_cast<uintptr_t>(category_group_enabled
);
1381 DCHECK(category_ptr
>= category_begin
&&
1382 category_ptr
< reinterpret_cast<uintptr_t>(
1383 g_category_group_enabled
+ MAX_CATEGORY_GROUPS
)) <<
1384 "out of bounds category pointer";
1385 uintptr_t category_index
=
1386 (category_ptr
- category_begin
) / sizeof(g_category_group_enabled
[0]);
1387 return g_category_groups
[category_index
];
1390 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index
) {
1391 unsigned char enabled_flag
= 0;
1392 const char* category_group
= g_category_groups
[category_index
];
1393 if (mode_
== RECORDING_MODE
&&
1394 trace_config_
.IsCategoryGroupEnabled(category_group
))
1395 enabled_flag
|= ENABLED_FOR_RECORDING
;
1396 else if (mode_
== MONITORING_MODE
&&
1397 trace_config_
.IsCategoryGroupEnabled(category_group
))
1398 enabled_flag
|= ENABLED_FOR_MONITORING
;
1399 if (event_callback_
&&
1400 event_callback_trace_config_
.IsCategoryGroupEnabled(category_group
))
1401 enabled_flag
|= ENABLED_FOR_EVENT_CALLBACK
;
1403 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
1405 enabled_flag
|= ENABLED_FOR_ETW_EXPORT
;
1409 g_category_group_enabled
[category_index
] = enabled_flag
;
1412 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1413 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1414 for (size_t i
= 0; i
< category_index
; i
++)
1415 UpdateCategoryGroupEnabledFlag(i
);
1418 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
1419 ResetTraceEventSyntheticDelays();
1420 const TraceConfig::StringList
& delays
=
1421 trace_config_
.GetSyntheticDelayValues();
1422 TraceConfig::StringList::const_iterator ci
;
1423 for (ci
= delays
.begin(); ci
!= delays
.end(); ++ci
) {
1424 StringTokenizer
tokens(*ci
, ";");
1425 if (!tokens
.GetNext())
1427 TraceEventSyntheticDelay
* delay
=
1428 TraceEventSyntheticDelay::Lookup(tokens
.token());
1429 while (tokens
.GetNext()) {
1430 std::string token
= tokens
.token();
1432 double target_duration
= strtod(token
.c_str(), &duration_end
);
1433 if (duration_end
!= token
.c_str()) {
1434 delay
->SetTargetDuration(TimeDelta::FromMicroseconds(
1435 static_cast<int64
>(target_duration
* 1e6
)));
1436 } else if (token
== "static") {
1437 delay
->SetMode(TraceEventSyntheticDelay::STATIC
);
1438 } else if (token
== "oneshot") {
1439 delay
->SetMode(TraceEventSyntheticDelay::ONE_SHOT
);
1440 } else if (token
== "alternating") {
1441 delay
->SetMode(TraceEventSyntheticDelay::ALTERNATING
);
1447 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1448 const char* category_group
) {
1449 DCHECK(!strchr(category_group
, '"')) <<
1450 "Category groups may not contain double quote";
1451 // The g_category_groups is append only, avoid using a lock for the fast path.
1452 size_t current_category_index
= base::subtle::Acquire_Load(&g_category_index
);
1454 // Search for pre-existing category group.
1455 for (size_t i
= 0; i
< current_category_index
; ++i
) {
1456 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1457 return &g_category_group_enabled
[i
];
1461 unsigned char* category_group_enabled
= NULL
;
1462 // This is the slow path: the lock is not held in the case above, so more
1463 // than one thread could have reached here trying to add the same category.
1464 // Only hold to lock when actually appending a new category, and
1465 // check the categories groups again.
1466 AutoLock
lock(lock_
);
1467 size_t category_index
= base::subtle::Acquire_Load(&g_category_index
);
1468 for (size_t i
= 0; i
< category_index
; ++i
) {
1469 if (strcmp(g_category_groups
[i
], category_group
) == 0) {
1470 return &g_category_group_enabled
[i
];
1474 // Create a new category group.
1475 DCHECK(category_index
< MAX_CATEGORY_GROUPS
) <<
1476 "must increase MAX_CATEGORY_GROUPS";
1477 if (category_index
< MAX_CATEGORY_GROUPS
) {
1478 // Don't hold on to the category_group pointer, so that we can create
1479 // category groups with strings not known at compile time (this is
1480 // required by SetWatchEvent).
1481 const char* new_group
= strdup(category_group
);
1482 ANNOTATE_LEAKING_OBJECT_PTR(new_group
);
1483 g_category_groups
[category_index
] = new_group
;
1484 DCHECK(!g_category_group_enabled
[category_index
]);
1485 // Note that if both included and excluded patterns in the
1486 // TraceConfig are empty, we exclude nothing,
1487 // thereby enabling this category group.
1488 UpdateCategoryGroupEnabledFlag(category_index
);
1489 category_group_enabled
= &g_category_group_enabled
[category_index
];
1490 // Update the max index now.
1491 base::subtle::Release_Store(&g_category_index
, category_index
+ 1);
1493 category_group_enabled
=
1494 &g_category_group_enabled
[g_category_categories_exhausted
];
1496 return category_group_enabled
;
1499 void TraceLog::GetKnownCategoryGroups(
1500 std::vector
<std::string
>* category_groups
) {
1501 AutoLock
lock(lock_
);
1502 category_groups
->push_back(
1503 g_category_groups
[g_category_trace_event_overhead
]);
1504 size_t category_index
= base::subtle::NoBarrier_Load(&g_category_index
);
1505 for (size_t i
= g_num_builtin_categories
; i
< category_index
; i
++)
1506 category_groups
->push_back(g_category_groups
[i
]);
1509 void TraceLog::SetEnabled(const TraceConfig
& trace_config
, Mode mode
) {
1510 std::vector
<EnabledStateObserver
*> observer_list
;
1512 AutoLock
lock(lock_
);
1514 // Can't enable tracing when Flush() is in progress.
1515 DCHECK(!flush_task_runner_
);
1517 InternalTraceOptions new_options
=
1518 GetInternalOptionsFromTraceConfig(trace_config
);
1520 InternalTraceOptions old_options
= trace_options();
1523 if (new_options
!= old_options
) {
1524 DLOG(ERROR
) << "Attempting to re-enable tracing with a different "
1525 << "set of options.";
1528 if (mode
!= mode_
) {
1529 DLOG(ERROR
) << "Attempting to re-enable tracing with a different mode.";
1532 trace_config_
.Merge(trace_config
);
1533 UpdateCategoryGroupEnabledFlags();
1537 if (dispatching_to_observer_list_
) {
1539 "Cannot manipulate TraceLog::Enabled state from an observer.";
1545 if (new_options
!= old_options
) {
1546 subtle::NoBarrier_Store(&trace_options_
, new_options
);
1547 UseNextTraceBuffer();
1550 num_traces_recorded_
++;
1552 trace_config_
= TraceConfig(trace_config
);
1553 UpdateCategoryGroupEnabledFlags();
1554 UpdateSyntheticDelaysFromTraceConfig();
1556 if (new_options
& kInternalEnableSampling
) {
1557 sampling_thread_
.reset(new TraceSamplingThread
);
1558 sampling_thread_
->RegisterSampleBucket(
1561 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1562 sampling_thread_
->RegisterSampleBucket(
1565 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1566 sampling_thread_
->RegisterSampleBucket(
1569 Bind(&TraceSamplingThread::DefaultSamplingCallback
));
1570 if (!PlatformThread::Create(
1571 0, sampling_thread_
.get(), &sampling_thread_handle_
)) {
1572 DCHECK(false) << "failed to create thread";
1576 dispatching_to_observer_list_
= true;
1577 observer_list
= enabled_state_observer_list_
;
1579 // Notify observers outside the lock in case they trigger trace events.
1580 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1581 observer_list
[i
]->OnTraceLogEnabled();
1584 AutoLock
lock(lock_
);
1585 dispatching_to_observer_list_
= false;
1589 void TraceLog::SetArgumentFilterPredicate(
1590 const TraceEvent::ArgumentFilterPredicate
& argument_filter_predicate
) {
1591 AutoLock
lock(lock_
);
1592 DCHECK(!argument_filter_predicate
.is_null());
1593 DCHECK(argument_filter_predicate_
.is_null());
1594 argument_filter_predicate_
= argument_filter_predicate
;
1597 TraceLog::InternalTraceOptions
TraceLog::GetInternalOptionsFromTraceConfig(
1598 const TraceConfig
& config
) {
1599 InternalTraceOptions ret
=
1600 config
.IsSamplingEnabled() ? kInternalEnableSampling
: kInternalNone
;
1601 if (config
.IsArgumentFilterEnabled())
1602 ret
|= kInternalEnableArgumentFilter
;
1603 switch (config
.GetTraceRecordMode()) {
1604 case RECORD_UNTIL_FULL
:
1605 return ret
| kInternalRecordUntilFull
;
1606 case RECORD_CONTINUOUSLY
:
1607 return ret
| kInternalRecordContinuously
;
1608 case ECHO_TO_CONSOLE
:
1609 return ret
| kInternalEchoToConsole
;
1610 case RECORD_AS_MUCH_AS_POSSIBLE
:
1611 return ret
| kInternalRecordAsMuchAsPossible
;
1614 return kInternalNone
;
1617 TraceConfig
TraceLog::GetCurrentTraceConfig() const {
1618 AutoLock
lock(lock_
);
1619 return trace_config_
;
1622 void TraceLog::SetDisabled() {
1623 AutoLock
lock(lock_
);
1624 SetDisabledWhileLocked();
1627 void TraceLog::SetDisabledWhileLocked() {
1628 lock_
.AssertAcquired();
1633 if (dispatching_to_observer_list_
) {
1635 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1641 if (sampling_thread_
.get()) {
1642 // Stop the sampling thread.
1643 sampling_thread_
->Stop();
1645 PlatformThread::Join(sampling_thread_handle_
);
1647 sampling_thread_handle_
= PlatformThreadHandle();
1648 sampling_thread_
.reset();
1651 trace_config_
.Clear();
1652 subtle::NoBarrier_Store(&watch_category_
, 0);
1653 watch_event_name_
= "";
1654 UpdateCategoryGroupEnabledFlags();
1655 AddMetadataEventsWhileLocked();
1657 dispatching_to_observer_list_
= true;
1658 std::vector
<EnabledStateObserver
*> observer_list
=
1659 enabled_state_observer_list_
;
1662 // Dispatch to observers outside the lock in case the observer triggers a
1664 AutoUnlock
unlock(lock_
);
1665 for (size_t i
= 0; i
< observer_list
.size(); ++i
)
1666 observer_list
[i
]->OnTraceLogDisabled();
1668 dispatching_to_observer_list_
= false;
1671 int TraceLog::GetNumTracesRecorded() {
1672 AutoLock
lock(lock_
);
1675 return num_traces_recorded_
;
1678 void TraceLog::AddEnabledStateObserver(EnabledStateObserver
* listener
) {
1679 enabled_state_observer_list_
.push_back(listener
);
1682 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver
* listener
) {
1683 std::vector
<EnabledStateObserver
*>::iterator it
=
1684 std::find(enabled_state_observer_list_
.begin(),
1685 enabled_state_observer_list_
.end(),
1687 if (it
!= enabled_state_observer_list_
.end())
1688 enabled_state_observer_list_
.erase(it
);
1691 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver
* listener
) const {
1692 std::vector
<EnabledStateObserver
*>::const_iterator it
=
1693 std::find(enabled_state_observer_list_
.begin(),
1694 enabled_state_observer_list_
.end(),
1696 return it
!= enabled_state_observer_list_
.end();
1699 TraceLogStatus
TraceLog::GetStatus() const {
1700 AutoLock
lock(lock_
);
1701 TraceLogStatus result
;
1702 result
.event_capacity
= logged_events_
->Capacity();
1703 result
.event_count
= logged_events_
->Size();
1707 bool TraceLog::BufferIsFull() const {
1708 AutoLock
lock(lock_
);
1709 return logged_events_
->IsFull();
1712 TraceBuffer
* TraceLog::CreateTraceBuffer() {
1713 InternalTraceOptions options
= trace_options();
1714 if (options
& kInternalRecordContinuously
)
1715 return new TraceBufferRingBuffer(kTraceEventRingBufferChunks
);
1716 else if ((options
& kInternalEnableSampling
) && mode_
== MONITORING_MODE
)
1717 return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks
);
1718 else if (options
& kInternalEchoToConsole
)
1719 return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks
);
1720 else if (options
& kInternalRecordAsMuchAsPossible
)
1721 return CreateTraceBufferVectorOfSize(kTraceEventVectorBigBufferChunks
);
1722 return CreateTraceBufferVectorOfSize(kTraceEventVectorBufferChunks
);
1725 TraceBuffer
* TraceLog::CreateTraceBufferVectorOfSize(size_t max_chunks
) {
1726 return new TraceBufferVector(max_chunks
);
1729 TraceEvent
* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1730 TraceEventHandle
* handle
, bool check_buffer_is_full
) {
1731 lock_
.AssertAcquired();
1733 if (thread_shared_chunk_
&& thread_shared_chunk_
->IsFull()) {
1734 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1735 thread_shared_chunk_
.Pass());
1738 if (!thread_shared_chunk_
) {
1739 thread_shared_chunk_
= logged_events_
->GetChunk(
1740 &thread_shared_chunk_index_
);
1741 if (check_buffer_is_full
)
1742 CheckIfBufferIsFullWhileLocked();
1744 if (!thread_shared_chunk_
)
1748 TraceEvent
* trace_event
= thread_shared_chunk_
->AddTraceEvent(&event_index
);
1749 if (trace_event
&& handle
) {
1750 MakeHandle(thread_shared_chunk_
->seq(), thread_shared_chunk_index_
,
1751 event_index
, handle
);
1756 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1757 lock_
.AssertAcquired();
1758 if (logged_events_
->IsFull()) {
1759 if (buffer_limit_reached_timestamp_
.is_null()) {
1760 buffer_limit_reached_timestamp_
= OffsetNow();
1762 SetDisabledWhileLocked();
1766 void TraceLog::SetEventCallbackEnabled(const TraceConfig
& trace_config
,
1768 AutoLock
lock(lock_
);
1769 subtle::NoBarrier_Store(&event_callback_
,
1770 reinterpret_cast<subtle::AtomicWord
>(cb
));
1771 event_callback_trace_config_
= trace_config
;
1772 UpdateCategoryGroupEnabledFlags();
1775 void TraceLog::SetEventCallbackDisabled() {
1776 AutoLock
lock(lock_
);
1777 subtle::NoBarrier_Store(&event_callback_
, 0);
1778 UpdateCategoryGroupEnabledFlags();
1781 // Flush() works as the following:
1782 // 1. Flush() is called in thread A whose task runner is saved in
1783 // flush_task_runner_;
1784 // 2. If thread_message_loops_ is not empty, thread A posts task to each message
1785 // loop to flush the thread local buffers; otherwise finish the flush;
1786 // 3. FlushCurrentThread() deletes the thread local event buffer:
1787 // - The last batch of events of the thread are flushed into the main buffer;
1788 // - The message loop will be removed from thread_message_loops_;
1789 // If this is the last message loop, finish the flush;
1790 // 4. If any thread hasn't finish its flush in time, finish the flush.
1791 void TraceLog::Flush(const TraceLog::OutputCallback
& cb
,
1792 bool use_worker_thread
) {
1793 FlushInternal(cb
, use_worker_thread
, false);
1796 void TraceLog::CancelTracing(const OutputCallback
& cb
) {
1798 FlushInternal(cb
, false, true);
1801 void TraceLog::FlushInternal(const TraceLog::OutputCallback
& cb
,
1802 bool use_worker_thread
,
1803 bool discard_events
) {
1804 use_worker_thread_
= use_worker_thread
;
1806 // Can't flush when tracing is enabled because otherwise PostTask would
1807 // - generate more trace events;
1808 // - deschedule the calling thread on some platforms causing inaccurate
1809 // timing of the trace events.
1810 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
1812 cb
.Run(empty_result
, false);
1813 LOG(WARNING
) << "Ignored TraceLog::Flush called when tracing is enabled";
1817 int generation
= this->generation();
1818 // Copy of thread_message_loops_ to be used without locking.
1819 std::vector
<scoped_refptr
<SingleThreadTaskRunner
> >
1820 thread_message_loop_task_runners
;
1822 AutoLock
lock(lock_
);
1823 DCHECK(!flush_task_runner_
);
1824 flush_task_runner_
= ThreadTaskRunnerHandle::IsSet()
1825 ? ThreadTaskRunnerHandle::Get()
1827 DCHECK_IMPLIES(thread_message_loops_
.size(), flush_task_runner_
);
1828 flush_output_callback_
= cb
;
1830 if (thread_shared_chunk_
) {
1831 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1832 thread_shared_chunk_
.Pass());
1835 if (thread_message_loops_
.size()) {
1836 for (hash_set
<MessageLoop
*>::const_iterator it
=
1837 thread_message_loops_
.begin();
1838 it
!= thread_message_loops_
.end(); ++it
) {
1839 thread_message_loop_task_runners
.push_back((*it
)->task_runner());
1844 if (thread_message_loop_task_runners
.size()) {
1845 for (size_t i
= 0; i
< thread_message_loop_task_runners
.size(); ++i
) {
1846 thread_message_loop_task_runners
[i
]->PostTask(
1847 FROM_HERE
, Bind(&TraceLog::FlushCurrentThread
, Unretained(this),
1848 generation
, discard_events
));
1850 flush_task_runner_
->PostDelayedTask(
1851 FROM_HERE
, Bind(&TraceLog::OnFlushTimeout
, Unretained(this), generation
,
1853 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs
));
1857 FinishFlush(generation
, discard_events
);
1860 // Usually it runs on a different thread.
1861 void TraceLog::ConvertTraceEventsToTraceFormat(
1862 scoped_ptr
<TraceBuffer
> logged_events
,
1863 const OutputCallback
& flush_output_callback
,
1864 const TraceEvent::ArgumentFilterPredicate
& argument_filter_predicate
) {
1865 if (flush_output_callback
.is_null())
1868 // The callback need to be called at least once even if there is no events
1869 // to let the caller know the completion of flush.
1870 scoped_refptr
<RefCountedString
> json_events_str_ptr
= new RefCountedString();
1871 while (const TraceBufferChunk
* chunk
= logged_events
->NextChunk()) {
1872 for (size_t j
= 0; j
< chunk
->size(); ++j
) {
1873 size_t size
= json_events_str_ptr
->size();
1874 if (size
> kTraceEventBufferSizeInBytes
) {
1875 flush_output_callback
.Run(json_events_str_ptr
, true);
1876 json_events_str_ptr
= new RefCountedString();
1878 json_events_str_ptr
->data().append(",\n");
1880 chunk
->GetEventAt(j
)->AppendAsJSON(&(json_events_str_ptr
->data()),
1881 argument_filter_predicate
);
1884 flush_output_callback
.Run(json_events_str_ptr
, false);
1887 void TraceLog::FinishFlush(int generation
, bool discard_events
) {
1888 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1889 OutputCallback flush_output_callback
;
1890 TraceEvent::ArgumentFilterPredicate argument_filter_predicate
;
1892 if (!CheckGeneration(generation
))
1896 AutoLock
lock(lock_
);
1898 previous_logged_events
.swap(logged_events_
);
1899 UseNextTraceBuffer();
1900 thread_message_loops_
.clear();
1902 flush_task_runner_
= NULL
;
1903 flush_output_callback
= flush_output_callback_
;
1904 flush_output_callback_
.Reset();
1906 if (trace_options() & kInternalEnableArgumentFilter
) {
1907 CHECK(!argument_filter_predicate_
.is_null());
1908 argument_filter_predicate
= argument_filter_predicate_
;
1912 if (discard_events
) {
1913 if (!flush_output_callback
.is_null()) {
1914 scoped_refptr
<RefCountedString
> empty_result
= new RefCountedString
;
1915 flush_output_callback
.Run(empty_result
, false);
1920 if (use_worker_thread_
&&
1921 WorkerPool::PostTask(
1922 FROM_HERE
, Bind(&TraceLog::ConvertTraceEventsToTraceFormat
,
1923 Passed(&previous_logged_events
),
1924 flush_output_callback
, argument_filter_predicate
),
1929 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
1930 flush_output_callback
,
1931 argument_filter_predicate
);
1934 // Run in each thread holding a local event buffer.
1935 void TraceLog::FlushCurrentThread(int generation
, bool discard_events
) {
1937 AutoLock
lock(lock_
);
1938 if (!CheckGeneration(generation
) || !flush_task_runner_
) {
1939 // This is late. The corresponding flush has finished.
1944 // This will flush the thread local buffer.
1945 delete thread_local_event_buffer_
.Get();
1947 AutoLock
lock(lock_
);
1948 if (!CheckGeneration(generation
) || !flush_task_runner_
||
1949 thread_message_loops_
.size())
1952 flush_task_runner_
->PostTask(
1953 FROM_HERE
, Bind(&TraceLog::FinishFlush
, Unretained(this), generation
,
1957 void TraceLog::OnFlushTimeout(int generation
, bool discard_events
) {
1959 AutoLock
lock(lock_
);
1960 if (!CheckGeneration(generation
) || !flush_task_runner_
) {
1961 // Flush has finished before timeout.
1966 "The following threads haven't finished flush in time. "
1967 "If this happens stably for some thread, please call "
1968 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1969 "the thread to avoid its trace events from being lost.";
1970 for (hash_set
<MessageLoop
*>::const_iterator it
=
1971 thread_message_loops_
.begin();
1972 it
!= thread_message_loops_
.end(); ++it
) {
1973 LOG(WARNING
) << "Thread: " << (*it
)->thread_name();
1976 FinishFlush(generation
, discard_events
);
1979 void TraceLog::FlushButLeaveBufferIntact(
1980 const TraceLog::OutputCallback
& flush_output_callback
) {
1981 scoped_ptr
<TraceBuffer
> previous_logged_events
;
1982 TraceEvent::ArgumentFilterPredicate argument_filter_predicate
;
1984 AutoLock
lock(lock_
);
1985 AddMetadataEventsWhileLocked();
1986 if (thread_shared_chunk_
) {
1987 // Return the chunk to the main buffer to flush the sampling data.
1988 logged_events_
->ReturnChunk(thread_shared_chunk_index_
,
1989 thread_shared_chunk_
.Pass());
1991 previous_logged_events
= logged_events_
->CloneForIteration().Pass();
1993 if (trace_options() & kInternalEnableArgumentFilter
) {
1994 CHECK(!argument_filter_predicate_
.is_null());
1995 argument_filter_predicate
= argument_filter_predicate_
;
1999 ConvertTraceEventsToTraceFormat(previous_logged_events
.Pass(),
2000 flush_output_callback
,
2001 argument_filter_predicate
);
2004 void TraceLog::UseNextTraceBuffer() {
2005 logged_events_
.reset(CreateTraceBuffer());
2006 subtle::NoBarrier_AtomicIncrement(&generation_
, 1);
2007 thread_shared_chunk_
.reset();
2008 thread_shared_chunk_index_
= 0;
2011 TraceEventHandle
TraceLog::AddTraceEvent(
2013 const unsigned char* category_group_enabled
,
2015 unsigned long long id
,
2017 const char** arg_names
,
2018 const unsigned char* arg_types
,
2019 const unsigned long long* arg_values
,
2020 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
2021 unsigned int flags
) {
2022 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
2023 base::TraceTicks now
= base::TraceTicks::Now();
2024 return AddTraceEventWithThreadIdAndTimestamp(
2025 phase
, category_group_enabled
, name
, id
, ::trace_event_internal::kNoId
,
2026 thread_id
, now
, num_args
, arg_names
, arg_types
, arg_values
,
2027 convertable_values
, flags
);
2030 TraceEventHandle
TraceLog::AddTraceEventWithContextId(
2032 const unsigned char* category_group_enabled
,
2034 unsigned long long id
,
2035 unsigned long long context_id
,
2037 const char** arg_names
,
2038 const unsigned char* arg_types
,
2039 const unsigned long long* arg_values
,
2040 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
2041 unsigned int flags
) {
2042 int thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
2043 base::TraceTicks now
= base::TraceTicks::Now();
2044 return AddTraceEventWithThreadIdAndTimestamp(
2045 phase
, category_group_enabled
, name
, id
, context_id
, thread_id
, now
,
2046 num_args
, arg_names
, arg_types
, arg_values
, convertable_values
,
2047 flags
| TRACE_EVENT_FLAG_HAS_CONTEXT_ID
);
2050 TraceEventHandle
TraceLog::AddTraceEventWithThreadIdAndTimestamp(
2052 const unsigned char* category_group_enabled
,
2054 unsigned long long id
,
2055 unsigned long long context_id
,
2057 const TraceTicks
& timestamp
,
2059 const char** arg_names
,
2060 const unsigned char* arg_types
,
2061 const unsigned long long* arg_values
,
2062 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
2063 unsigned int flags
) {
2064 TraceEventHandle handle
= { 0, 0, 0 };
2065 if (!*category_group_enabled
)
2068 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
2069 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
2070 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
2071 if (thread_is_in_trace_event_
.Get())
2074 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
2077 DCHECK(!timestamp
.is_null());
2079 if (flags
& TRACE_EVENT_FLAG_MANGLE_ID
)
2080 id
= MangleEventId(id
);
2082 TraceTicks offset_event_timestamp
= OffsetTimestamp(timestamp
);
2083 TraceTicks now
= flags
& TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP
?
2084 OffsetNow() : offset_event_timestamp
;
2085 ThreadTicks thread_now
= ThreadNow();
2087 // |thread_local_event_buffer_| can be null if the current thread doesn't have
2088 // a message loop or the message loop is blocked.
2089 InitializeThreadLocalEventBufferIfSupported();
2090 auto thread_local_event_buffer
= thread_local_event_buffer_
.Get();
2092 // Check and update the current thread name only if the event is for the
2093 // current thread to avoid locks in most cases.
2094 if (thread_id
== static_cast<int>(PlatformThread::CurrentId())) {
2095 const char* new_name
= ThreadIdNameManager::GetInstance()->
2097 // Check if the thread name has been set or changed since the previous
2098 // call (if any), but don't bother if the new name is empty. Note this will
2099 // not detect a thread name change within the same char* buffer address: we
2100 // favor common case performance over corner case correctness.
2101 if (new_name
!= g_current_thread_name
.Get().Get() &&
2102 new_name
&& *new_name
) {
2103 g_current_thread_name
.Get().Set(new_name
);
2105 AutoLock
thread_info_lock(thread_info_lock_
);
2107 hash_map
<int, std::string
>::iterator existing_name
=
2108 thread_names_
.find(thread_id
);
2109 if (existing_name
== thread_names_
.end()) {
2110 // This is a new thread id, and a new name.
2111 thread_names_
[thread_id
] = new_name
;
2113 // This is a thread id that we've seen before, but potentially with a
2115 std::vector
<StringPiece
> existing_names
=
2116 base::SplitStringPiece(existing_name
->second
, ",",
2117 base::KEEP_WHITESPACE
,
2118 base::SPLIT_WANT_NONEMPTY
);
2119 bool found
= std::find(existing_names
.begin(),
2120 existing_names
.end(),
2121 new_name
) != existing_names
.end();
2123 if (existing_names
.size())
2124 existing_name
->second
.push_back(',');
2125 existing_name
->second
.append(new_name
);
2132 // This is done sooner rather than later, to avoid creating the event and
2133 // acquiring the lock, which is not needed for ETW as it's already threadsafe.
2134 if (*category_group_enabled
& ENABLED_FOR_ETW_EXPORT
)
2135 TraceEventETWExport::AddEvent(phase
, category_group_enabled
, name
, id
,
2136 num_args
, arg_names
, arg_types
, arg_values
,
2137 convertable_values
);
2140 std::string console_message
;
2141 if (*category_group_enabled
&
2142 (ENABLED_FOR_RECORDING
| ENABLED_FOR_MONITORING
)) {
2143 OptionalAutoLock
lock(&lock_
);
2145 TraceEvent
* trace_event
= NULL
;
2146 if (thread_local_event_buffer
) {
2147 trace_event
= thread_local_event_buffer
->AddTraceEvent(&handle
);
2149 lock
.EnsureAcquired();
2150 trace_event
= AddEventToThreadSharedChunkWhileLocked(&handle
, true);
2154 trace_event
->Initialize(thread_id
, offset_event_timestamp
, thread_now
,
2155 phase
, category_group_enabled
, name
, id
,
2156 context_id
, num_args
, arg_names
, arg_types
,
2157 arg_values
, convertable_values
, flags
);
2159 #if defined(OS_ANDROID)
2160 trace_event
->SendToATrace();
2164 if (trace_options() & kInternalEchoToConsole
) {
2165 console_message
= EventToConsoleMessage(
2166 phase
== TRACE_EVENT_PHASE_COMPLETE
? TRACE_EVENT_PHASE_BEGIN
: phase
,
2167 timestamp
, trace_event
);
2171 if (console_message
.size())
2172 LOG(ERROR
) << console_message
;
2174 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
2175 &watch_category_
)) == category_group_enabled
) {
2176 bool event_name_matches
;
2177 WatchEventCallback watch_event_callback_copy
;
2179 AutoLock
lock(lock_
);
2180 event_name_matches
= watch_event_name_
== name
;
2181 watch_event_callback_copy
= watch_event_callback_
;
2183 if (event_name_matches
) {
2184 if (!watch_event_callback_copy
.is_null())
2185 watch_event_callback_copy
.Run();
2189 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
2190 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
2191 subtle::NoBarrier_Load(&event_callback_
));
2192 if (event_callback
) {
2193 event_callback(offset_event_timestamp
,
2194 phase
== TRACE_EVENT_PHASE_COMPLETE
?
2195 TRACE_EVENT_PHASE_BEGIN
: phase
,
2196 category_group_enabled
, name
, id
,
2197 num_args
, arg_names
, arg_types
, arg_values
,
2202 if (thread_local_event_buffer
)
2203 thread_local_event_buffer
->ReportOverhead(now
, thread_now
);
2208 // May be called when a COMPELETE event ends and the unfinished event has been
2209 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
2210 std::string
TraceLog::EventToConsoleMessage(unsigned char phase
,
2211 const TraceTicks
& timestamp
,
2212 TraceEvent
* trace_event
) {
2213 AutoLock
thread_info_lock(thread_info_lock_
);
2215 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
2216 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
2217 DCHECK(phase
!= TRACE_EVENT_PHASE_COMPLETE
);
2220 int thread_id
= trace_event
?
2221 trace_event
->thread_id() : PlatformThread::CurrentId();
2222 if (phase
== TRACE_EVENT_PHASE_END
) {
2223 duration
= timestamp
- thread_event_start_times_
[thread_id
].top();
2224 thread_event_start_times_
[thread_id
].pop();
2227 std::string thread_name
= thread_names_
[thread_id
];
2228 if (thread_colors_
.find(thread_name
) == thread_colors_
.end())
2229 thread_colors_
[thread_name
] = (thread_colors_
.size() % 6) + 1;
2231 std::ostringstream log
;
2232 log
<< base::StringPrintf("%s: \x1b[0;3%dm",
2233 thread_name
.c_str(),
2234 thread_colors_
[thread_name
]);
2237 if (thread_event_start_times_
.find(thread_id
) !=
2238 thread_event_start_times_
.end())
2239 depth
= thread_event_start_times_
[thread_id
].size();
2241 for (size_t i
= 0; i
< depth
; ++i
)
2245 trace_event
->AppendPrettyPrinted(&log
);
2246 if (phase
== TRACE_EVENT_PHASE_END
)
2247 log
<< base::StringPrintf(" (%.3f ms)", duration
.InMillisecondsF());
2251 if (phase
== TRACE_EVENT_PHASE_BEGIN
)
2252 thread_event_start_times_
[thread_id
].push(timestamp
);
2257 void TraceLog::AddTraceEventEtw(char phase
,
2260 const char* extra
) {
2262 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
2264 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
2265 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
2268 void TraceLog::AddTraceEventEtw(char phase
,
2271 const std::string
& extra
) {
2273 TraceEventETWProvider::Trace(name
, phase
, id
, extra
);
2275 INTERNAL_TRACE_EVENT_ADD(phase
, "ETW Trace Event", name
,
2276 TRACE_EVENT_FLAG_COPY
, "id", id
, "extra", extra
);
2279 void TraceLog::UpdateTraceEventDuration(
2280 const unsigned char* category_group_enabled
,
2282 TraceEventHandle handle
) {
2283 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
2284 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
2285 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
2286 if (thread_is_in_trace_event_
.Get())
2289 AutoThreadLocalBoolean
thread_is_in_trace_event(&thread_is_in_trace_event_
);
2291 ThreadTicks thread_now
= ThreadNow();
2292 TraceTicks now
= OffsetNow();
2294 std::string console_message
;
2295 if (*category_group_enabled
& ENABLED_FOR_RECORDING
) {
2296 OptionalAutoLock
lock(&lock_
);
2298 TraceEvent
* trace_event
= GetEventByHandleInternal(handle
, &lock
);
2300 DCHECK(trace_event
->phase() == TRACE_EVENT_PHASE_COMPLETE
);
2301 trace_event
->UpdateDuration(now
, thread_now
);
2302 #if defined(OS_ANDROID)
2303 trace_event
->SendToATrace();
2307 if (trace_options() & kInternalEchoToConsole
) {
2308 console_message
= EventToConsoleMessage(TRACE_EVENT_PHASE_END
,
2313 if (console_message
.size())
2314 LOG(ERROR
) << console_message
;
2316 if (*category_group_enabled
& ENABLED_FOR_EVENT_CALLBACK
) {
2317 EventCallback event_callback
= reinterpret_cast<EventCallback
>(
2318 subtle::NoBarrier_Load(&event_callback_
));
2319 if (event_callback
) {
2320 event_callback(now
, TRACE_EVENT_PHASE_END
, category_group_enabled
, name
,
2321 trace_event_internal::kNoId
, trace_event_internal::kNoId
,
2322 NULL
, NULL
, NULL
, TRACE_EVENT_FLAG_NONE
);
2327 void TraceLog::SetWatchEvent(const std::string
& category_name
,
2328 const std::string
& event_name
,
2329 const WatchEventCallback
& callback
) {
2330 const unsigned char* category
= GetCategoryGroupEnabled(
2331 category_name
.c_str());
2332 AutoLock
lock(lock_
);
2333 subtle::NoBarrier_Store(&watch_category_
,
2334 reinterpret_cast<subtle::AtomicWord
>(category
));
2335 watch_event_name_
= event_name
;
2336 watch_event_callback_
= callback
;
2339 void TraceLog::CancelWatchEvent() {
2340 AutoLock
lock(lock_
);
2341 subtle::NoBarrier_Store(&watch_category_
, 0);
2342 watch_event_name_
= "";
2343 watch_event_callback_
.Reset();
2346 uint64
TraceLog::MangleEventId(uint64 id
) {
2347 return id
^ process_id_hash_
;
2350 void TraceLog::AddMetadataEventsWhileLocked() {
2351 lock_
.AssertAcquired();
2353 #if !defined(OS_NACL) // NaCl shouldn't expose the process id.
2354 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2356 "num_cpus", "number",
2357 base::SysInfo::NumberOfProcessors());
2361 int current_thread_id
= static_cast<int>(base::PlatformThread::CurrentId());
2362 if (process_sort_index_
!= 0) {
2363 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2365 "process_sort_index", "sort_index",
2366 process_sort_index_
);
2369 if (process_name_
.size()) {
2370 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2372 "process_name", "name",
2376 if (process_labels_
.size() > 0) {
2377 std::vector
<std::string
> labels
;
2378 for(base::hash_map
<int, std::string
>::iterator it
= process_labels_
.begin();
2379 it
!= process_labels_
.end();
2381 labels
.push_back(it
->second
);
2383 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2385 "process_labels", "labels",
2386 base::JoinString(labels
, ","));
2389 // Thread sort indices.
2390 for(hash_map
<int, int>::iterator it
= thread_sort_indices_
.begin();
2391 it
!= thread_sort_indices_
.end();
2393 if (it
->second
== 0)
2395 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2397 "thread_sort_index", "sort_index",
2402 AutoLock
thread_info_lock(thread_info_lock_
);
2403 for(hash_map
<int, std::string
>::iterator it
= thread_names_
.begin();
2404 it
!= thread_names_
.end();
2406 if (it
->second
.empty())
2408 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2410 "thread_name", "name",
2414 // If buffer is full, add a metadata record to report this.
2415 if (!buffer_limit_reached_timestamp_
.is_null()) {
2416 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL
, false),
2418 "trace_buffer_overflowed",
2420 buffer_limit_reached_timestamp_
);
2424 void TraceLog::WaitSamplingEventForTesting() {
2425 if (!sampling_thread_
)
2427 sampling_thread_
->WaitSamplingEventForTesting();
2430 void TraceLog::DeleteForTesting() {
2431 DeleteTraceLogForTesting::Delete();
2434 TraceEvent
* TraceLog::GetEventByHandle(TraceEventHandle handle
) {
2435 return GetEventByHandleInternal(handle
, NULL
);
2438 TraceEvent
* TraceLog::GetEventByHandleInternal(TraceEventHandle handle
,
2439 OptionalAutoLock
* lock
) {
2440 if (!handle
.chunk_seq
)
2443 if (thread_local_event_buffer_
.Get()) {
2444 TraceEvent
* trace_event
=
2445 thread_local_event_buffer_
.Get()->GetEventByHandle(handle
);
2450 // The event has been out-of-control of the thread local buffer.
2451 // Try to get the event from the main buffer with a lock.
2453 lock
->EnsureAcquired();
2455 if (thread_shared_chunk_
&&
2456 handle
.chunk_index
== thread_shared_chunk_index_
) {
2457 return handle
.chunk_seq
== thread_shared_chunk_
->seq() ?
2458 thread_shared_chunk_
->GetEventAt(handle
.event_index
) : NULL
;
2461 return logged_events_
->GetEventByHandle(handle
);
2464 void TraceLog::SetProcessID(int process_id
) {
2465 process_id_
= process_id
;
2466 // Create a FNV hash from the process ID for XORing.
2467 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2468 unsigned long long offset_basis
= 14695981039346656037ull;
2469 unsigned long long fnv_prime
= 1099511628211ull;
2470 unsigned long long pid
= static_cast<unsigned long long>(process_id_
);
2471 process_id_hash_
= (offset_basis
^ pid
) * fnv_prime
;
2474 void TraceLog::SetProcessSortIndex(int sort_index
) {
2475 AutoLock
lock(lock_
);
2476 process_sort_index_
= sort_index
;
2479 void TraceLog::SetProcessName(const std::string
& process_name
) {
2480 AutoLock
lock(lock_
);
2481 process_name_
= process_name
;
2484 void TraceLog::UpdateProcessLabel(
2485 int label_id
, const std::string
& current_label
) {
2486 if(!current_label
.length())
2487 return RemoveProcessLabel(label_id
);
2489 AutoLock
lock(lock_
);
2490 process_labels_
[label_id
] = current_label
;
2493 void TraceLog::RemoveProcessLabel(int label_id
) {
2494 AutoLock
lock(lock_
);
2495 base::hash_map
<int, std::string
>::iterator it
= process_labels_
.find(
2497 if (it
== process_labels_
.end())
2500 process_labels_
.erase(it
);
2503 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id
, int sort_index
) {
2504 AutoLock
lock(lock_
);
2505 thread_sort_indices_
[static_cast<int>(thread_id
)] = sort_index
;
2508 void TraceLog::SetTimeOffset(TimeDelta offset
) {
2509 time_offset_
= offset
;
2512 size_t TraceLog::GetObserverCountForTest() const {
2513 return enabled_state_observer_list_
.size();
2516 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2517 thread_blocks_message_loop_
.Set(true);
2518 if (thread_local_event_buffer_
.Get()) {
2519 // This will flush the thread local buffer.
2520 delete thread_local_event_buffer_
.Get();
2524 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
2525 TraceEventMemoryOverhead
* overhead
) {
2526 overhead
->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this));
2529 } // namespace trace_event
2532 namespace trace_event_internal
{
2534 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2535 const char* category_group
, const char* name
) {
2536 // The single atom works because for now the category_group can only be "gpu".
2537 DCHECK_EQ(strcmp(category_group
, "gpu"), 0);
2538 static TRACE_EVENT_API_ATOMIC_WORD atomic
= 0;
2539 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2540 category_group
, atomic
, category_group_enabled_
);
2542 if (*category_group_enabled_
) {
2544 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2545 TRACE_EVENT_PHASE_COMPLETE
, category_group_enabled_
, name
, kNoId
,
2546 kNoId
, static_cast<int>(base::PlatformThread::CurrentId()),
2547 base::TraceTicks::Now(), 0, NULL
, NULL
, NULL
, NULL
,
2548 TRACE_EVENT_FLAG_NONE
);
2552 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2553 if (*category_group_enabled_
) {
2554 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_
,
2555 name_
, event_handle_
);
2559 } // namespace trace_event_internal