1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #ifndef BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
7 #define BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
13 #include "base/atomicops.h"
14 #include "base/base_export.h"
15 #include "base/callback.h"
16 #include "base/containers/hash_tables.h"
17 #include "base/gtest_prod_util.h"
18 #include "base/memory/ref_counted_memory.h"
19 #include "base/memory/scoped_vector.h"
20 #include "base/observer_list.h"
21 #include "base/single_thread_task_runner.h"
22 #include "base/strings/string_util.h"
23 #include "base/synchronization/condition_variable.h"
24 #include "base/synchronization/lock.h"
25 #include "base/threading/thread.h"
26 #include "base/threading/thread_local.h"
27 #include "base/trace_event/memory_dump_provider.h"
28 #include "base/trace_event/trace_config.h"
29 #include "base/trace_event/trace_event_memory_overhead.h"
31 // Older style trace macros with explicit id and extra data
32 // Only these macros result in publishing data to ETW as currently implemented.
33 // TODO(georgesak): Update/replace these with new ETW macros.
34 #define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
35 base::trace_event::TraceLog::AddTraceEventEtw( \
36 TRACE_EVENT_PHASE_BEGIN, \
37 name, reinterpret_cast<const void*>(id), extra)
39 #define TRACE_EVENT_END_ETW(name, id, extra) \
40 base::trace_event::TraceLog::AddTraceEventEtw( \
41 TRACE_EVENT_PHASE_END, \
42 name, reinterpret_cast<const void*>(id), extra)
44 #define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
45 base::trace_event::TraceLog::AddTraceEventEtw( \
46 TRACE_EVENT_PHASE_INSTANT, \
47 name, reinterpret_cast<const void*>(id), extra)
49 template <typename Type
>
50 struct DefaultSingletonTraits
;
57 namespace trace_event
{
59 // For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
60 // class must implement this interface.
61 class BASE_EXPORT ConvertableToTraceFormat
62 : public RefCounted
<ConvertableToTraceFormat
> {
64 // Append the class info to the provided |out| string. The appended
65 // data must be a valid JSON object. Strings must be properly quoted, and
66 // escaped. There is no processing applied to the content after it is
68 virtual void AppendAsTraceFormat(std::string
* out
) const = 0;
70 virtual void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead
* overhead
);
72 std::string
ToString() const {
74 AppendAsTraceFormat(&result
);
79 virtual ~ConvertableToTraceFormat() {}
82 friend class RefCounted
<ConvertableToTraceFormat
>;
85 struct TraceEventHandle
{
91 const int kTraceMaxNumArgs
= 2;
93 class BASE_EXPORT TraceEvent
{
97 unsigned long long as_uint
;
100 const void* as_pointer
;
101 const char* as_string
;
107 // We don't need to copy TraceEvent except when TraceEventBuffer is cloned.
108 // Use explicit copy method to avoid accidentally misuse of copy.
109 void CopyFrom(const TraceEvent
& other
);
113 TraceTicks timestamp
,
114 ThreadTicks thread_timestamp
,
116 const unsigned char* category_group_enabled
,
118 unsigned long long id
,
120 const char** arg_names
,
121 const unsigned char* arg_types
,
122 const unsigned long long* arg_values
,
123 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
124 unsigned char flags
);
128 void UpdateDuration(const TraceTicks
& now
, const ThreadTicks
& thread_now
);
130 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead
*);
132 // Serialize event data to JSON
133 typedef base::Callback
<bool(const char* category_group_name
,
134 const char* event_name
)> ArgumentFilterPredicate
;
137 const ArgumentFilterPredicate
& argument_filter_predicate
) const;
138 void AppendPrettyPrinted(std::ostringstream
* out
) const;
140 static void AppendValueAsJSON(unsigned char type
,
144 TraceTicks
timestamp() const { return timestamp_
; }
145 ThreadTicks
thread_timestamp() const { return thread_timestamp_
; }
146 char phase() const { return phase_
; }
147 int thread_id() const { return thread_id_
; }
148 TimeDelta
duration() const { return duration_
; }
149 TimeDelta
thread_duration() const { return thread_duration_
; }
150 unsigned long long id() const { return id_
; }
151 unsigned char flags() const { return flags_
; }
153 // Exposed for unittesting:
155 const base::RefCountedString
* parameter_copy_storage() const {
156 return parameter_copy_storage_
.get();
159 const unsigned char* category_group_enabled() const {
160 return category_group_enabled_
;
163 const char* name() const { return name_
; }
165 #if defined(OS_ANDROID)
170 // Note: these are ordered by size (largest first) for optimal packing.
171 TraceTicks timestamp_
;
172 ThreadTicks thread_timestamp_
;
174 TimeDelta thread_duration_
;
175 // id_ can be used to store phase-specific data.
176 unsigned long long id_
;
177 scoped_ptr
<TraceEventMemoryOverhead
> cached_memory_overhead_estimate_
;
178 TraceValue arg_values_
[kTraceMaxNumArgs
];
179 const char* arg_names_
[kTraceMaxNumArgs
];
180 scoped_refptr
<ConvertableToTraceFormat
> convertable_values_
[kTraceMaxNumArgs
];
181 const unsigned char* category_group_enabled_
;
183 scoped_refptr
<base::RefCountedString
> parameter_copy_storage_
;
186 unsigned char flags_
;
187 unsigned char arg_types_
[kTraceMaxNumArgs
];
189 DISALLOW_COPY_AND_ASSIGN(TraceEvent
);
192 // TraceBufferChunk is the basic unit of TraceBuffer.
193 class BASE_EXPORT TraceBufferChunk
{
195 explicit TraceBufferChunk(uint32 seq
);
198 void Reset(uint32 new_seq
);
199 TraceEvent
* AddTraceEvent(size_t* event_index
);
200 bool IsFull() const { return next_free_
== kTraceBufferChunkSize
; }
202 uint32
seq() const { return seq_
; }
203 size_t capacity() const { return kTraceBufferChunkSize
; }
204 size_t size() const { return next_free_
; }
206 TraceEvent
* GetEventAt(size_t index
) {
207 DCHECK(index
< size());
208 return &chunk_
[index
];
210 const TraceEvent
* GetEventAt(size_t index
) const {
211 DCHECK(index
< size());
212 return &chunk_
[index
];
215 scoped_ptr
<TraceBufferChunk
> Clone() const;
217 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead
* overhead
);
219 static const size_t kTraceBufferChunkSize
= 64;
223 scoped_ptr
<TraceEventMemoryOverhead
> cached_overhead_estimate_when_full_
;
224 TraceEvent chunk_
[kTraceBufferChunkSize
];
228 // TraceBuffer holds the events as they are collected.
229 class BASE_EXPORT TraceBuffer
{
231 virtual ~TraceBuffer() {}
233 virtual scoped_ptr
<TraceBufferChunk
> GetChunk(size_t *index
) = 0;
234 virtual void ReturnChunk(size_t index
,
235 scoped_ptr
<TraceBufferChunk
> chunk
) = 0;
237 virtual bool IsFull() const = 0;
238 virtual size_t Size() const = 0;
239 virtual size_t Capacity() const = 0;
240 virtual TraceEvent
* GetEventByHandle(TraceEventHandle handle
) = 0;
242 // For iteration. Each TraceBuffer can only be iterated once.
243 virtual const TraceBufferChunk
* NextChunk() = 0;
245 virtual scoped_ptr
<TraceBuffer
> CloneForIteration() const = 0;
247 // Computes an estimate of the size of the buffer, including all the retained
249 virtual void EstimateTraceMemoryOverhead(
250 TraceEventMemoryOverhead
* overhead
) = 0;
253 // TraceResultBuffer collects and converts trace fragments returned by TraceLog
255 class BASE_EXPORT TraceResultBuffer
{
257 typedef base::Callback
<void(const std::string
&)> OutputCallback
;
259 // If you don't need to stream JSON chunks out efficiently, and just want to
260 // get a complete JSON string after calling Finish, use this struct to collect
261 // JSON trace output.
262 struct BASE_EXPORT SimpleOutput
{
263 OutputCallback
GetCallback();
264 void Append(const std::string
& json_string
);
266 // Do what you want with the json_output_ string after calling
267 // TraceResultBuffer::Finish.
268 std::string json_output
;
272 ~TraceResultBuffer();
274 // Set callback. The callback will be called during Start with the initial
275 // JSON output and during AddFragment and Finish with following JSON output
276 // chunks. The callback target must live past the last calls to
277 // TraceResultBuffer::Start/AddFragment/Finish.
278 void SetOutputCallback(const OutputCallback
& json_chunk_callback
);
280 // Start JSON output. This resets all internal state, so you can reuse
281 // the TraceResultBuffer by calling Start.
284 // Call AddFragment 0 or more times to add trace fragments from TraceLog.
285 void AddFragment(const std::string
& trace_fragment
);
287 // When all fragments have been added, call Finish to complete the JSON
292 OutputCallback output_callback_
;
296 class TraceSamplingThread
;
298 struct BASE_EXPORT TraceLogStatus
{
301 size_t event_capacity
;
305 class BASE_EXPORT TraceLog
: public MemoryDumpProvider
{
313 // The pointer returned from GetCategoryGroupEnabledInternal() points to a
314 // value with zero or more of the following bits. Used in this class only.
315 // The TRACE_EVENT macros should only use the value as a bool.
316 // These values must be in sync with macro values in TraceEvent.h in Blink.
317 enum CategoryGroupEnabledFlags
{
318 // Category group enabled for the recording mode.
319 ENABLED_FOR_RECORDING
= 1 << 0,
320 // Category group enabled for the monitoring mode.
321 ENABLED_FOR_MONITORING
= 1 << 1,
322 // Category group enabled by SetEventCallbackEnabled().
323 ENABLED_FOR_EVENT_CALLBACK
= 1 << 2,
324 // Category group enabled to export events to ETW.
325 ENABLED_FOR_ETW_EXPORT
= 1 << 3
328 static TraceLog
* GetInstance();
330 // Get set of known category groups. This can change as new code paths are
331 // reached. The known category groups are inserted into |category_groups|.
332 void GetKnownCategoryGroups(std::vector
<std::string
>* category_groups
);
334 // Retrieves a copy (for thread-safety) of the current TraceConfig.
335 TraceConfig
GetCurrentTraceConfig() const;
337 // Enables normal tracing (recording trace events in the trace buffer).
338 // See TraceConfig comments for details on how to control what categories
339 // will be traced. If tracing has already been enabled, |category_filter| will
340 // be merged into the current category filter.
341 void SetEnabled(const TraceConfig
& trace_config
, Mode mode
);
343 // Disables normal tracing for all categories.
346 bool IsEnabled() { return mode_
!= DISABLED
; }
348 // The number of times we have begun recording traces. If tracing is off,
349 // returns -1. If tracing is on, then it returns the number of times we have
350 // recorded a trace. By watching for this number to increment, you can
351 // passively discover when a new trace has begun. This is then used to
352 // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
353 int GetNumTracesRecorded();
355 #if defined(OS_ANDROID)
358 void AddClockSyncMetadataEvent();
361 // Enabled state listeners give a callback when tracing is enabled or
362 // disabled. This can be used to tie into other library's tracing systems
364 class BASE_EXPORT EnabledStateObserver
{
366 // Called just after the tracing system becomes enabled, outside of the
367 // |lock_|. TraceLog::IsEnabled() is true at this point.
368 virtual void OnTraceLogEnabled() = 0;
370 // Called just after the tracing system disables, outside of the |lock_|.
371 // TraceLog::IsEnabled() is false at this point.
372 virtual void OnTraceLogDisabled() = 0;
374 void AddEnabledStateObserver(EnabledStateObserver
* listener
);
375 void RemoveEnabledStateObserver(EnabledStateObserver
* listener
);
376 bool HasEnabledStateObserver(EnabledStateObserver
* listener
) const;
378 TraceLogStatus
GetStatus() const;
379 bool BufferIsFull() const;
381 // Computes an estimate of the size of the TraceLog including all the retained
383 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead
* overhead
);
385 // Not using base::Callback because of its limited by 7 parameters.
386 // Also, using primitive type allows directly passing callback from WebCore.
387 // WARNING: It is possible for the previously set callback to be called
388 // after a call to SetEventCallbackEnabled() that replaces or a call to
389 // SetEventCallbackDisabled() that disables the callback.
390 // This callback may be invoked on any thread.
391 // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
392 // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
394 typedef void (*EventCallback
)(TraceTicks timestamp
,
396 const unsigned char* category_group_enabled
,
398 unsigned long long id
,
400 const char* const arg_names
[],
401 const unsigned char arg_types
[],
402 const unsigned long long arg_values
[],
403 unsigned char flags
);
405 // Enable tracing for EventCallback.
406 void SetEventCallbackEnabled(const TraceConfig
& trace_config
,
408 void SetEventCallbackDisabled();
409 void SetArgumentFilterPredicate(
410 const TraceEvent::ArgumentFilterPredicate
& argument_filter_predicate
);
412 // Flush all collected events to the given output callback. The callback will
413 // be called one or more times either synchronously or asynchronously from
414 // the current thread with IPC-bite-size chunks. The string format is
415 // undefined. Use TraceResultBuffer to convert one or more trace strings to
416 // JSON. The callback can be null if the caller doesn't want any data.
417 // Due to the implementation of thread-local buffers, flush can't be
418 // done when tracing is enabled. If called when tracing is enabled, the
419 // callback will be called directly with (empty_string, false) to indicate
420 // the end of this unsuccessful flush. Flush does the serialization
421 // on the same thread if the caller doesn't set use_worker_thread explicitly.
422 typedef base::Callback
<void(const scoped_refptr
<base::RefCountedString
>&,
423 bool has_more_events
)> OutputCallback
;
424 void Flush(const OutputCallback
& cb
, bool use_worker_thread
= false);
425 void FlushButLeaveBufferIntact(const OutputCallback
& flush_output_callback
);
427 // Called by TRACE_EVENT* macros, don't call this directly.
428 // The name parameter is a category group for example:
429 // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
430 static const unsigned char* GetCategoryGroupEnabled(const char* name
);
431 static const char* GetCategoryGroupName(
432 const unsigned char* category_group_enabled
);
434 // Called by TRACE_EVENT* macros, don't call this directly.
435 // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
436 // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
437 TraceEventHandle
AddTraceEvent(
439 const unsigned char* category_group_enabled
,
441 unsigned long long id
,
443 const char** arg_names
,
444 const unsigned char* arg_types
,
445 const unsigned long long* arg_values
,
446 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
447 unsigned char flags
);
448 TraceEventHandle
AddTraceEventWithThreadIdAndTimestamp(
450 const unsigned char* category_group_enabled
,
452 unsigned long long id
,
454 const TraceTicks
& timestamp
,
456 const char** arg_names
,
457 const unsigned char* arg_types
,
458 const unsigned long long* arg_values
,
459 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
460 unsigned char flags
);
461 static void AddTraceEventEtw(char phase
,
462 const char* category_group
,
465 static void AddTraceEventEtw(char phase
,
466 const char* category_group
,
468 const std::string
& extra
);
470 void UpdateTraceEventDuration(const unsigned char* category_group_enabled
,
472 TraceEventHandle handle
);
474 // For every matching event, the callback will be called.
475 typedef base::Callback
<void()> WatchEventCallback
;
476 void SetWatchEvent(const std::string
& category_name
,
477 const std::string
& event_name
,
478 const WatchEventCallback
& callback
);
479 // Cancel the watch event. If tracing is enabled, this may race with the
480 // watch event notification firing.
481 void CancelWatchEvent();
483 int process_id() const { return process_id_
; }
485 uint64
MangleEventId(uint64 id
);
487 // Exposed for unittesting:
489 void WaitSamplingEventForTesting();
491 // Allows deleting our singleton instance.
492 static void DeleteForTesting();
494 // Allow tests to inspect TraceEvents.
495 TraceEvent
* GetEventByHandle(TraceEventHandle handle
);
497 void SetProcessID(int process_id
);
499 // Process sort indices, if set, override the order of a process will appear
500 // relative to other processes in the trace viewer. Processes are sorted first
501 // on their sort index, ascending, then by their name, and then tid.
502 void SetProcessSortIndex(int sort_index
);
504 // Sets the name of the process.
505 void SetProcessName(const std::string
& process_name
);
507 // Processes can have labels in addition to their names. Use labels, for
508 // instance, to list out the web page titles that a process is handling.
509 void UpdateProcessLabel(int label_id
, const std::string
& current_label
);
510 void RemoveProcessLabel(int label_id
);
512 // Thread sort indices, if set, override the order of a thread will appear
513 // within its process in the trace viewer. Threads are sorted first on their
514 // sort index, ascending, then by their name, and then tid.
515 void SetThreadSortIndex(PlatformThreadId
, int sort_index
);
517 // Allow setting an offset between the current TraceTicks time and the time
518 // that should be reported.
519 void SetTimeOffset(TimeDelta offset
);
521 size_t GetObserverCountForTest() const;
523 // Call this method if the current thread may block the message loop to
524 // prevent the thread from using the thread-local buffer because the thread
525 // may not handle the flush request in time causing lost of unflushed events.
526 void SetCurrentThreadBlocksMessageLoop();
529 typedef unsigned int InternalTraceOptions
;
531 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
532 TraceBufferRingBufferGetReturnChunk
);
533 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
534 TraceBufferRingBufferHalfIteration
);
535 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
536 TraceBufferRingBufferFullIteration
);
537 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
538 TraceBufferVectorReportFull
);
539 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
540 ConvertTraceConfigToInternalOptions
);
541 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
542 TraceRecordAsMuchAsPossibleMode
);
544 // This allows constructor and destructor to be private and usable only
545 // by the Singleton class.
546 friend struct DefaultSingletonTraits
<TraceLog
>;
548 // MemoryDumpProvider implementation.
549 bool OnMemoryDump(ProcessMemoryDump
* pmd
) override
;
551 // Enable/disable each category group based on the current mode_,
552 // category_filter_, event_callback_ and event_callback_category_filter_.
553 // Enable the category group in the enabled mode if category_filter_ matches
554 // the category group, or event_callback_ is not null and
555 // event_callback_category_filter_ matches the category group.
556 void UpdateCategoryGroupEnabledFlags();
557 void UpdateCategoryGroupEnabledFlag(size_t category_index
);
559 // Configure synthetic delays based on the values set in the current
561 void UpdateSyntheticDelaysFromTraceConfig();
563 InternalTraceOptions
GetInternalOptionsFromTraceConfig(
564 const TraceConfig
& config
);
566 class ThreadLocalEventBuffer
;
567 class OptionalAutoLock
;
570 ~TraceLog() override
;
571 const unsigned char* GetCategoryGroupEnabledInternal(const char* name
);
572 void AddMetadataEventsWhileLocked();
574 InternalTraceOptions
trace_options() const {
575 return static_cast<InternalTraceOptions
>(
576 subtle::NoBarrier_Load(&trace_options_
));
579 TraceBuffer
* trace_buffer() const { return logged_events_
.get(); }
580 TraceBuffer
* CreateTraceBuffer();
581 TraceBuffer
* CreateTraceBufferVectorOfSize(size_t max_chunks
);
583 std::string
EventToConsoleMessage(unsigned char phase
,
584 const TraceTicks
& timestamp
,
585 TraceEvent
* trace_event
);
587 TraceEvent
* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle
* handle
,
588 bool check_buffer_is_full
);
589 void CheckIfBufferIsFullWhileLocked();
590 void SetDisabledWhileLocked();
592 TraceEvent
* GetEventByHandleInternal(TraceEventHandle handle
,
593 OptionalAutoLock
* lock
);
595 // |generation| is used in the following callbacks to check if the callback
596 // is called for the flush of the current |logged_events_|.
597 void FlushCurrentThread(int generation
);
598 // Usually it runs on a different thread.
599 static void ConvertTraceEventsToTraceFormat(
600 scoped_ptr
<TraceBuffer
> logged_events
,
601 const TraceLog::OutputCallback
& flush_output_callback
,
602 const TraceEvent::ArgumentFilterPredicate
& argument_filter_predicate
);
603 void FinishFlush(int generation
);
604 void OnFlushTimeout(int generation
);
606 int generation() const {
607 return static_cast<int>(subtle::NoBarrier_Load(&generation_
));
609 bool CheckGeneration(int generation
) const {
610 return generation
== this->generation();
612 void UseNextTraceBuffer();
614 TraceTicks
OffsetNow() const {
615 return OffsetTimestamp(TraceTicks::Now());
617 TraceTicks
OffsetTimestamp(const TraceTicks
& timestamp
) const {
618 return timestamp
- time_offset_
;
621 // Internal representation of trace options since we store the currently used
622 // trace option as an AtomicWord.
623 static const InternalTraceOptions kInternalNone
;
624 static const InternalTraceOptions kInternalRecordUntilFull
;
625 static const InternalTraceOptions kInternalRecordContinuously
;
626 static const InternalTraceOptions kInternalEchoToConsole
;
627 static const InternalTraceOptions kInternalEnableSampling
;
628 static const InternalTraceOptions kInternalRecordAsMuchAsPossible
;
629 static const InternalTraceOptions kInternalEnableArgumentFilter
;
631 // This lock protects TraceLog member accesses (except for members protected
632 // by thread_info_lock_) from arbitrary threads.
634 // This lock protects accesses to thread_names_, thread_event_start_times_
635 // and thread_colors_.
636 Lock thread_info_lock_
;
638 int num_traces_recorded_
;
639 scoped_ptr
<TraceBuffer
> logged_events_
;
640 subtle::AtomicWord
/* EventCallback */ event_callback_
;
641 bool dispatching_to_observer_list_
;
642 std::vector
<EnabledStateObserver
*> enabled_state_observer_list_
;
644 std::string process_name_
;
645 base::hash_map
<int, std::string
> process_labels_
;
646 int process_sort_index_
;
647 base::hash_map
<int, int> thread_sort_indices_
;
648 base::hash_map
<int, std::string
> thread_names_
;
650 // The following two maps are used only when ECHO_TO_CONSOLE.
651 base::hash_map
<int, std::stack
<TraceTicks
> > thread_event_start_times_
;
652 base::hash_map
<std::string
, int> thread_colors_
;
654 TraceTicks buffer_limit_reached_timestamp_
;
656 // XORed with TraceID to make it unlikely to collide with other processes.
657 unsigned long long process_id_hash_
;
661 TimeDelta time_offset_
;
663 // Allow tests to wake up when certain events occur.
664 WatchEventCallback watch_event_callback_
;
665 subtle::AtomicWord
/* const unsigned char* */ watch_category_
;
666 std::string watch_event_name_
;
668 subtle::AtomicWord
/* Options */ trace_options_
;
670 // Sampling thread handles.
671 scoped_ptr
<TraceSamplingThread
> sampling_thread_
;
672 PlatformThreadHandle sampling_thread_handle_
;
674 TraceConfig trace_config_
;
675 TraceConfig event_callback_trace_config_
;
677 ThreadLocalPointer
<ThreadLocalEventBuffer
> thread_local_event_buffer_
;
678 ThreadLocalBoolean thread_blocks_message_loop_
;
679 ThreadLocalBoolean thread_is_in_trace_event_
;
681 // Contains the message loops of threads that have had at least one event
682 // added into the local event buffer. Not using SingleThreadTaskRunner
683 // because we need to know the life time of the message loops.
684 hash_set
<MessageLoop
*> thread_message_loops_
;
686 // For events which can't be added into the thread local buffer, e.g. events
687 // from threads without a message loop.
688 scoped_ptr
<TraceBufferChunk
> thread_shared_chunk_
;
689 size_t thread_shared_chunk_index_
;
691 // Set when asynchronous Flush is in progress.
692 OutputCallback flush_output_callback_
;
693 scoped_refptr
<SingleThreadTaskRunner
> flush_task_runner_
;
694 TraceEvent::ArgumentFilterPredicate argument_filter_predicate_
;
695 subtle::AtomicWord generation_
;
696 bool use_worker_thread_
;
698 DISALLOW_COPY_AND_ASSIGN(TraceLog
);
701 } // namespace trace_event
704 #endif // BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_