1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #ifndef BASE_DEBUG_TRACE_EVENT_IMPL_H_
7 #define BASE_DEBUG_TRACE_EVENT_IMPL_H_
13 #include "base/atomicops.h"
14 #include "base/callback.h"
15 #include "base/containers/hash_tables.h"
16 #include "base/gtest_prod_util.h"
17 #include "base/memory/ref_counted_memory.h"
18 #include "base/memory/scoped_vector.h"
19 #include "base/observer_list.h"
20 #include "base/strings/string_util.h"
21 #include "base/synchronization/condition_variable.h"
22 #include "base/synchronization/lock.h"
23 #include "base/threading/thread.h"
24 #include "base/threading/thread_local.h"
25 #include "base/timer/timer.h"
27 // Older style trace macros with explicit id and extra data
28 // Only these macros result in publishing data to ETW as currently implemented.
29 #define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
30 base::debug::TraceLog::AddTraceEventEtw( \
31 TRACE_EVENT_PHASE_BEGIN, \
32 name, reinterpret_cast<const void*>(id), extra)
34 #define TRACE_EVENT_END_ETW(name, id, extra) \
35 base::debug::TraceLog::AddTraceEventEtw( \
36 TRACE_EVENT_PHASE_END, \
37 name, reinterpret_cast<const void*>(id), extra)
39 #define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
40 base::debug::TraceLog::AddTraceEventEtw( \
41 TRACE_EVENT_PHASE_INSTANT, \
42 name, reinterpret_cast<const void*>(id), extra)
44 template <typename Type
>
45 struct DefaultSingletonTraits
;
47 #if defined(COMPILER_GCC)
48 namespace BASE_HASH_NAMESPACE
{
50 struct hash
<base::MessageLoop
*> {
51 std::size_t operator()(base::MessageLoop
* value
) const {
52 return reinterpret_cast<std::size_t>(value
);
55 } // BASE_HASH_NAMESPACE
65 // For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
66 // class must implement this interface.
67 class BASE_EXPORT ConvertableToTraceFormat
68 : public RefCounted
<ConvertableToTraceFormat
> {
70 // Append the class info to the provided |out| string. The appended
71 // data must be a valid JSON object. Strings must be properly quoted, and
72 // escaped. There is no processing applied to the content after it is
74 virtual void AppendAsTraceFormat(std::string
* out
) const = 0;
76 std::string
ToString() const {
78 AppendAsTraceFormat(&result
);
83 virtual ~ConvertableToTraceFormat() {}
86 friend class RefCounted
<ConvertableToTraceFormat
>;
89 struct TraceEventHandle
{
95 const int kTraceMaxNumArgs
= 2;
97 class BASE_EXPORT TraceEvent
{
101 unsigned long long as_uint
;
104 const void* as_pointer
;
105 const char* as_string
;
111 // We don't need to copy TraceEvent except when TraceEventBuffer is cloned.
112 // Use explicit copy method to avoid accidentally misuse of copy.
113 void CopyFrom(const TraceEvent
& other
);
118 TimeTicks thread_timestamp
,
120 const unsigned char* category_group_enabled
,
122 unsigned long long id
,
124 const char** arg_names
,
125 const unsigned char* arg_types
,
126 const unsigned long long* arg_values
,
127 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
128 unsigned char flags
);
132 void UpdateDuration(const TimeTicks
& now
, const TimeTicks
& thread_now
);
134 // Serialize event data to JSON
135 void AppendAsJSON(std::string
* out
) const;
136 void AppendPrettyPrinted(std::ostringstream
* out
) const;
138 static void AppendValueAsJSON(unsigned char type
,
142 TimeTicks
timestamp() const { return timestamp_
; }
143 TimeTicks
thread_timestamp() const { return thread_timestamp_
; }
144 char phase() const { return phase_
; }
145 int thread_id() const { return thread_id_
; }
146 TimeDelta
duration() const { return duration_
; }
147 TimeDelta
thread_duration() const { return thread_duration_
; }
148 unsigned long long id() const { return id_
; }
149 unsigned char flags() const { return flags_
; }
151 // Exposed for unittesting:
153 const base::RefCountedString
* parameter_copy_storage() const {
154 return parameter_copy_storage_
.get();
157 const unsigned char* category_group_enabled() const {
158 return category_group_enabled_
;
161 const char* name() const { return name_
; }
163 #if defined(OS_ANDROID)
168 // Note: these are ordered by size (largest first) for optimal packing.
169 TimeTicks timestamp_
;
170 TimeTicks thread_timestamp_
;
172 TimeDelta thread_duration_
;
173 // id_ can be used to store phase-specific data.
174 unsigned long long id_
;
175 TraceValue arg_values_
[kTraceMaxNumArgs
];
176 const char* arg_names_
[kTraceMaxNumArgs
];
177 scoped_refptr
<ConvertableToTraceFormat
> convertable_values_
[kTraceMaxNumArgs
];
178 const unsigned char* category_group_enabled_
;
180 scoped_refptr
<base::RefCountedString
> parameter_copy_storage_
;
183 unsigned char flags_
;
184 unsigned char arg_types_
[kTraceMaxNumArgs
];
186 DISALLOW_COPY_AND_ASSIGN(TraceEvent
);
189 // TraceBufferChunk is the basic unit of TraceBuffer.
190 class BASE_EXPORT TraceBufferChunk
{
192 TraceBufferChunk(uint32 seq
)
197 void Reset(uint32 new_seq
);
198 TraceEvent
* AddTraceEvent(size_t* event_index
);
199 bool IsFull() const { return next_free_
== kTraceBufferChunkSize
; }
201 uint32
seq() const { return seq_
; }
202 size_t capacity() const { return kTraceBufferChunkSize
; }
203 size_t size() const { return next_free_
; }
205 TraceEvent
* GetEventAt(size_t index
) {
206 DCHECK(index
< size());
207 return &chunk_
[index
];
209 const TraceEvent
* GetEventAt(size_t index
) const {
210 DCHECK(index
< size());
211 return &chunk_
[index
];
214 scoped_ptr
<TraceBufferChunk
> Clone() const;
216 static const size_t kTraceBufferChunkSize
= 64;
220 TraceEvent chunk_
[kTraceBufferChunkSize
];
224 // TraceBuffer holds the events as they are collected.
225 class BASE_EXPORT TraceBuffer
{
227 virtual ~TraceBuffer() {}
229 virtual scoped_ptr
<TraceBufferChunk
> GetChunk(size_t *index
) = 0;
230 virtual void ReturnChunk(size_t index
,
231 scoped_ptr
<TraceBufferChunk
> chunk
) = 0;
233 virtual bool IsFull() const = 0;
234 virtual size_t Size() const = 0;
235 virtual size_t Capacity() const = 0;
236 virtual TraceEvent
* GetEventByHandle(TraceEventHandle handle
) = 0;
238 // For iteration. Each TraceBuffer can only be iterated once.
239 virtual const TraceBufferChunk
* NextChunk() = 0;
241 virtual scoped_ptr
<TraceBuffer
> CloneForIteration() const = 0;
244 // TraceResultBuffer collects and converts trace fragments returned by TraceLog
246 class BASE_EXPORT TraceResultBuffer
{
248 typedef base::Callback
<void(const std::string
&)> OutputCallback
;
250 // If you don't need to stream JSON chunks out efficiently, and just want to
251 // get a complete JSON string after calling Finish, use this struct to collect
252 // JSON trace output.
253 struct BASE_EXPORT SimpleOutput
{
254 OutputCallback
GetCallback();
255 void Append(const std::string
& json_string
);
257 // Do what you want with the json_output_ string after calling
258 // TraceResultBuffer::Finish.
259 std::string json_output
;
263 ~TraceResultBuffer();
265 // Set callback. The callback will be called during Start with the initial
266 // JSON output and during AddFragment and Finish with following JSON output
267 // chunks. The callback target must live past the last calls to
268 // TraceResultBuffer::Start/AddFragment/Finish.
269 void SetOutputCallback(const OutputCallback
& json_chunk_callback
);
271 // Start JSON output. This resets all internal state, so you can reuse
272 // the TraceResultBuffer by calling Start.
275 // Call AddFragment 0 or more times to add trace fragments from TraceLog.
276 void AddFragment(const std::string
& trace_fragment
);
278 // When all fragments have been added, call Finish to complete the JSON
283 OutputCallback output_callback_
;
287 class BASE_EXPORT CategoryFilter
{
289 typedef std::vector
<std::string
> StringList
;
291 // The default category filter, used when none is provided.
292 // Allows all categories through, except if they end in the suffix 'Debug' or
294 static const char* kDefaultCategoryFilterString
;
296 // |filter_string| is a comma-delimited list of category wildcards.
297 // A category can have an optional '-' prefix to make it an excluded category.
298 // All the same rules apply above, so for example, having both included and
299 // excluded categories in the same list would not be supported.
301 // Example: CategoryFilter"test_MyTest*");
302 // Example: CategoryFilter("test_MyTest*,test_OtherStuff");
303 // Example: CategoryFilter("-excluded_category1,-excluded_category2");
304 // Example: CategoryFilter("-*,webkit"); would disable everything but webkit.
305 // Example: CategoryFilter("-webkit"); would enable everything but webkit.
307 // Category filters can also be used to configure synthetic delays.
309 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16)"); would make swap
310 // buffers always take at least 16 ms.
311 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;oneshot)"); would
312 // make swap buffers take at least 16 ms the first time it is
314 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;alternating)");
315 // would make swap buffers take at least 16 ms every other time it
317 explicit CategoryFilter(const std::string
& filter_string
);
321 CategoryFilter(const CategoryFilter
& cf
);
325 CategoryFilter
& operator=(const CategoryFilter
& rhs
);
327 // Writes the string representation of the CategoryFilter. This is a comma
328 // separated string, similar in nature to the one used to determine
329 // enabled/disabled category patterns, except here there is an arbitrary
330 // order, included categories go first, then excluded categories. Excluded
331 // categories are distinguished from included categories by the prefix '-'.
332 std::string
ToString() const;
334 // Determines whether category group would be enabled or
335 // disabled by this category filter.
336 bool IsCategoryGroupEnabled(const char* category_group
) const;
338 // Return a list of the synthetic delays specified in this category filter.
339 const StringList
& GetSyntheticDelayValues() const;
341 // Merges nested_filter with the current CategoryFilter
342 void Merge(const CategoryFilter
& nested_filter
);
344 // Clears both included/excluded pattern lists. This would be equivalent to
345 // creating a CategoryFilter with an empty string, through the constructor.
346 // i.e: CategoryFilter().
348 // When using an empty filter, all categories are considered included as we
349 // are not excluding anything.
353 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
, CategoryFilter
);
355 static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(
356 const std::string
& str
);
358 void Initialize(const std::string
& filter_string
);
359 void WriteString(const StringList
& values
,
361 bool included
) const;
362 void WriteString(const StringList
& delays
, std::string
* out
) const;
363 bool HasIncludedPatterns() const;
365 bool DoesCategoryGroupContainCategory(const char* category_group
,
366 const char* category
) const;
368 StringList included_
;
369 StringList disabled_
;
370 StringList excluded_
;
374 class TraceSamplingThread
;
376 // Options determines how the trace buffer stores data.
377 enum TraceRecordMode
{
378 // Record until the trace buffer is full.
381 // Record until the user ends the trace. The trace buffer is a fixed size
382 // and we use it as a ring buffer during recording.
385 // Echo to console. Events are discarded.
388 // Record until the trace buffer is full, but with a huge buffer size.
389 RECORD_AS_MUCH_AS_POSSIBLE
392 struct BASE_EXPORT TraceOptions
{
395 : record_mode(RECORD_UNTIL_FULL
),
396 enable_sampling(false),
397 enable_systrace(false) {}
399 TraceOptions(TraceRecordMode record_mode
)
400 : record_mode(record_mode
),
401 enable_sampling(false),
402 enable_systrace(false) {}
404 // |options_string| is a comma-delimited list of trace options.
405 // Possible options are: "record-until-full", "record-continuously",
406 // "trace-to-console", "enable-sampling" and "enable-systrace".
407 // The first 3 options are trace recoding modes and hence
408 // mutually exclusive. If more than one trace recording modes appear in the
409 // options_string, the last one takes precedence. If none of the trace
410 // recording mode is specified, recording mode is RECORD_UNTIL_FULL.
412 // The trace option will first be reset to the default option
413 // (record_mode set to RECORD_UNTIL_FULL, enable_sampling and enable_systrace
414 // set to false) before options parsed from |options_string| are applied on
416 // If |options_string| is invalid, the final state of trace_options is
419 // Example: trace_options.SetFromString("record-until-full")
420 // Example: trace_options.SetFromString(
421 // "record-continuously, enable-sampling")
422 // Example: trace_options.SetFromString("record-until-full, trace-to-console")
423 // will set ECHO_TO_CONSOLE as the recording mode.
425 // Returns true on success.
426 bool SetFromString(const std::string
& options_string
);
428 std::string
ToString() const;
430 TraceRecordMode record_mode
;
431 bool enable_sampling
;
432 bool enable_systrace
;
435 class BASE_EXPORT TraceLog
{
443 // The pointer returned from GetCategoryGroupEnabledInternal() points to a
444 // value with zero or more of the following bits. Used in this class only.
445 // The TRACE_EVENT macros should only use the value as a bool.
446 // These values must be in sync with macro values in TraceEvent.h in Blink.
447 enum CategoryGroupEnabledFlags
{
448 // Category group enabled for the recording mode.
449 ENABLED_FOR_RECORDING
= 1 << 0,
450 // Category group enabled for the monitoring mode.
451 ENABLED_FOR_MONITORING
= 1 << 1,
452 // Category group enabled by SetEventCallbackEnabled().
453 ENABLED_FOR_EVENT_CALLBACK
= 1 << 2,
456 static TraceLog
* GetInstance();
458 // Get set of known category groups. This can change as new code paths are
459 // reached. The known category groups are inserted into |category_groups|.
460 void GetKnownCategoryGroups(std::vector
<std::string
>* category_groups
);
462 // Retrieves a copy (for thread-safety) of the current CategoryFilter.
463 CategoryFilter
GetCurrentCategoryFilter();
465 // Retrieves a copy (for thread-safety) of the current TraceOptions.
466 TraceOptions
GetCurrentTraceOptions() const;
468 // Enables normal tracing (recording trace events in the trace buffer).
469 // See CategoryFilter comments for details on how to control what categories
470 // will be traced. If tracing has already been enabled, |category_filter| will
471 // be merged into the current category filter.
472 void SetEnabled(const CategoryFilter
& category_filter
,
473 Mode mode
, const TraceOptions
& options
);
475 // Disables normal tracing for all categories.
478 bool IsEnabled() { return mode_
!= DISABLED
; }
480 // The number of times we have begun recording traces. If tracing is off,
481 // returns -1. If tracing is on, then it returns the number of times we have
482 // recorded a trace. By watching for this number to increment, you can
483 // passively discover when a new trace has begun. This is then used to
484 // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
485 int GetNumTracesRecorded();
487 #if defined(OS_ANDROID)
490 void AddClockSyncMetadataEvent();
493 // Enabled state listeners give a callback when tracing is enabled or
494 // disabled. This can be used to tie into other library's tracing systems
496 class EnabledStateObserver
{
498 // Called just after the tracing system becomes enabled, outside of the
499 // |lock_|. TraceLog::IsEnabled() is true at this point.
500 virtual void OnTraceLogEnabled() = 0;
502 // Called just after the tracing system disables, outside of the |lock_|.
503 // TraceLog::IsEnabled() is false at this point.
504 virtual void OnTraceLogDisabled() = 0;
506 void AddEnabledStateObserver(EnabledStateObserver
* listener
);
507 void RemoveEnabledStateObserver(EnabledStateObserver
* listener
);
508 bool HasEnabledStateObserver(EnabledStateObserver
* listener
) const;
510 float GetBufferPercentFull() const;
511 bool BufferIsFull() const;
513 // Not using base::Callback because of its limited by 7 parameters.
514 // Also, using primitive type allows directly passing callback from WebCore.
515 // WARNING: It is possible for the previously set callback to be called
516 // after a call to SetEventCallbackEnabled() that replaces or a call to
517 // SetEventCallbackDisabled() that disables the callback.
518 // This callback may be invoked on any thread.
519 // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
520 // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
522 typedef void (*EventCallback
)(TimeTicks timestamp
,
524 const unsigned char* category_group_enabled
,
526 unsigned long long id
,
528 const char* const arg_names
[],
529 const unsigned char arg_types
[],
530 const unsigned long long arg_values
[],
531 unsigned char flags
);
533 // Enable tracing for EventCallback.
534 void SetEventCallbackEnabled(const CategoryFilter
& category_filter
,
536 void SetEventCallbackDisabled();
538 // Flush all collected events to the given output callback. The callback will
539 // be called one or more times either synchronously or asynchronously from
540 // the current thread with IPC-bite-size chunks. The string format is
541 // undefined. Use TraceResultBuffer to convert one or more trace strings to
542 // JSON. The callback can be null if the caller doesn't want any data.
543 // Due to the implementation of thread-local buffers, flush can't be
544 // done when tracing is enabled. If called when tracing is enabled, the
545 // callback will be called directly with (empty_string, false) to indicate
546 // the end of this unsuccessful flush.
547 typedef base::Callback
<void(const scoped_refptr
<base::RefCountedString
>&,
548 bool has_more_events
)> OutputCallback
;
549 void Flush(const OutputCallback
& cb
);
550 void FlushButLeaveBufferIntact(const OutputCallback
& flush_output_callback
);
552 // Called by TRACE_EVENT* macros, don't call this directly.
553 // The name parameter is a category group for example:
554 // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
555 static const unsigned char* GetCategoryGroupEnabled(const char* name
);
556 static const char* GetCategoryGroupName(
557 const unsigned char* category_group_enabled
);
559 // Called by TRACE_EVENT* macros, don't call this directly.
560 // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
561 // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
562 TraceEventHandle
AddTraceEvent(
564 const unsigned char* category_group_enabled
,
566 unsigned long long id
,
568 const char** arg_names
,
569 const unsigned char* arg_types
,
570 const unsigned long long* arg_values
,
571 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
572 unsigned char flags
);
573 TraceEventHandle
AddTraceEventWithThreadIdAndTimestamp(
575 const unsigned char* category_group_enabled
,
577 unsigned long long id
,
579 const TimeTicks
& timestamp
,
581 const char** arg_names
,
582 const unsigned char* arg_types
,
583 const unsigned long long* arg_values
,
584 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
585 unsigned char flags
);
586 static void AddTraceEventEtw(char phase
,
587 const char* category_group
,
590 static void AddTraceEventEtw(char phase
,
591 const char* category_group
,
593 const std::string
& extra
);
595 void UpdateTraceEventDuration(const unsigned char* category_group_enabled
,
597 TraceEventHandle handle
);
599 // For every matching event, the callback will be called.
600 typedef base::Callback
<void()> WatchEventCallback
;
601 void SetWatchEvent(const std::string
& category_name
,
602 const std::string
& event_name
,
603 const WatchEventCallback
& callback
);
604 // Cancel the watch event. If tracing is enabled, this may race with the
605 // watch event notification firing.
606 void CancelWatchEvent();
608 int process_id() const { return process_id_
; }
610 // Exposed for unittesting:
612 void WaitSamplingEventForTesting();
614 // Allows deleting our singleton instance.
615 static void DeleteForTesting();
617 // Allow tests to inspect TraceEvents.
618 size_t GetEventsSize() const { return logged_events_
->Size(); }
619 TraceEvent
* GetEventByHandle(TraceEventHandle handle
);
621 void SetProcessID(int process_id
);
623 // Process sort indices, if set, override the order of a process will appear
624 // relative to other processes in the trace viewer. Processes are sorted first
625 // on their sort index, ascending, then by their name, and then tid.
626 void SetProcessSortIndex(int sort_index
);
628 // Sets the name of the process.
629 void SetProcessName(const std::string
& process_name
);
631 // Processes can have labels in addition to their names. Use labels, for
632 // instance, to list out the web page titles that a process is handling.
633 void UpdateProcessLabel(int label_id
, const std::string
& current_label
);
634 void RemoveProcessLabel(int label_id
);
636 // Thread sort indices, if set, override the order of a thread will appear
637 // within its process in the trace viewer. Threads are sorted first on their
638 // sort index, ascending, then by their name, and then tid.
639 void SetThreadSortIndex(PlatformThreadId
, int sort_index
);
641 // Allow setting an offset between the current TimeTicks time and the time
642 // that should be reported.
643 void SetTimeOffset(TimeDelta offset
);
645 size_t GetObserverCountForTest() const;
647 // Call this method if the current thread may block the message loop to
648 // prevent the thread from using the thread-local buffer because the thread
649 // may not handle the flush request in time causing lost of unflushed events.
650 void SetCurrentThreadBlocksMessageLoop();
653 typedef unsigned int InternalTraceOptions
;
655 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
656 TraceBufferRingBufferGetReturnChunk
);
657 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
658 TraceBufferRingBufferHalfIteration
);
659 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
660 TraceBufferRingBufferFullIteration
);
661 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
662 TraceBufferVectorReportFull
);
663 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
664 ConvertTraceOptionsToInternalOptions
);
665 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
666 TraceRecordAsMuchAsPossibleMode
);
668 // This allows constructor and destructor to be private and usable only
669 // by the Singleton class.
670 friend struct DefaultSingletonTraits
<TraceLog
>;
672 // Enable/disable each category group based on the current mode_,
673 // category_filter_, event_callback_ and event_callback_category_filter_.
674 // Enable the category group in the enabled mode if category_filter_ matches
675 // the category group, or event_callback_ is not null and
676 // event_callback_category_filter_ matches the category group.
677 void UpdateCategoryGroupEnabledFlags();
678 void UpdateCategoryGroupEnabledFlag(size_t category_index
);
680 // Configure synthetic delays based on the values set in the current
682 void UpdateSyntheticDelaysFromCategoryFilter();
684 InternalTraceOptions
GetInternalOptionsFromTraceOptions(
685 const TraceOptions
& options
);
687 class ThreadLocalEventBuffer
;
688 class OptionalAutoLock
;
692 const unsigned char* GetCategoryGroupEnabledInternal(const char* name
);
693 void AddMetadataEventsWhileLocked();
695 InternalTraceOptions
trace_options() const {
696 return static_cast<InternalTraceOptions
>(
697 subtle::NoBarrier_Load(&trace_options_
));
700 TraceBuffer
* trace_buffer() const { return logged_events_
.get(); }
701 TraceBuffer
* CreateTraceBuffer();
702 TraceBuffer
* CreateTraceBufferVectorOfSize(size_t max_chunks
);
704 std::string
EventToConsoleMessage(unsigned char phase
,
705 const TimeTicks
& timestamp
,
706 TraceEvent
* trace_event
);
708 TraceEvent
* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle
* handle
,
709 bool check_buffer_is_full
);
710 void CheckIfBufferIsFullWhileLocked();
711 void SetDisabledWhileLocked();
713 TraceEvent
* GetEventByHandleInternal(TraceEventHandle handle
,
714 OptionalAutoLock
* lock
);
716 // |generation| is used in the following callbacks to check if the callback
717 // is called for the flush of the current |logged_events_|.
718 void FlushCurrentThread(int generation
);
719 void ConvertTraceEventsToTraceFormat(scoped_ptr
<TraceBuffer
> logged_events
,
720 const TraceLog::OutputCallback
& flush_output_callback
);
721 void FinishFlush(int generation
);
722 void OnFlushTimeout(int generation
);
724 int generation() const {
725 return static_cast<int>(subtle::NoBarrier_Load(&generation_
));
727 bool CheckGeneration(int generation
) const {
728 return generation
== this->generation();
730 void UseNextTraceBuffer();
732 TimeTicks
OffsetNow() const {
733 return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime());
735 TimeTicks
OffsetTimestamp(const TimeTicks
& timestamp
) const {
736 return timestamp
- time_offset_
;
739 // Internal representation of trace options since we store the currently used
740 // trace option as an AtomicWord.
741 static const InternalTraceOptions kInternalNone
;
742 static const InternalTraceOptions kInternalRecordUntilFull
;
743 static const InternalTraceOptions kInternalRecordContinuously
;
744 static const InternalTraceOptions kInternalEchoToConsole
;
745 static const InternalTraceOptions kInternalEnableSampling
;
746 static const InternalTraceOptions kInternalRecordAsMuchAsPossible
;
748 // This lock protects TraceLog member accesses (except for members protected
749 // by thread_info_lock_) from arbitrary threads.
751 // This lock protects accesses to thread_names_, thread_event_start_times_
752 // and thread_colors_.
753 Lock thread_info_lock_
;
756 int num_traces_recorded_
;
757 scoped_ptr
<TraceBuffer
> logged_events_
;
758 subtle::AtomicWord
/* EventCallback */ event_callback_
;
759 bool dispatching_to_observer_list_
;
760 std::vector
<EnabledStateObserver
*> enabled_state_observer_list_
;
762 std::string process_name_
;
763 base::hash_map
<int, std::string
> process_labels_
;
764 int process_sort_index_
;
765 base::hash_map
<int, int> thread_sort_indices_
;
766 base::hash_map
<int, std::string
> thread_names_
;
768 // The following two maps are used only when ECHO_TO_CONSOLE.
769 base::hash_map
<int, std::stack
<TimeTicks
> > thread_event_start_times_
;
770 base::hash_map
<std::string
, int> thread_colors_
;
772 TimeTicks buffer_limit_reached_timestamp_
;
774 // XORed with TraceID to make it unlikely to collide with other processes.
775 unsigned long long process_id_hash_
;
779 TimeDelta time_offset_
;
781 // Allow tests to wake up when certain events occur.
782 WatchEventCallback watch_event_callback_
;
783 subtle::AtomicWord
/* const unsigned char* */ watch_category_
;
784 std::string watch_event_name_
;
786 subtle::AtomicWord
/* Options */ trace_options_
;
788 // Sampling thread handles.
789 scoped_ptr
<TraceSamplingThread
> sampling_thread_
;
790 PlatformThreadHandle sampling_thread_handle_
;
792 CategoryFilter category_filter_
;
793 CategoryFilter event_callback_category_filter_
;
795 ThreadLocalPointer
<ThreadLocalEventBuffer
> thread_local_event_buffer_
;
796 ThreadLocalBoolean thread_blocks_message_loop_
;
797 ThreadLocalBoolean thread_is_in_trace_event_
;
799 // Contains the message loops of threads that have had at least one event
800 // added into the local event buffer. Not using MessageLoopProxy because we
801 // need to know the life time of the message loops.
802 hash_set
<MessageLoop
*> thread_message_loops_
;
804 // For events which can't be added into the thread local buffer, e.g. events
805 // from threads without a message loop.
806 scoped_ptr
<TraceBufferChunk
> thread_shared_chunk_
;
807 size_t thread_shared_chunk_index_
;
809 // Set when asynchronous Flush is in progress.
810 OutputCallback flush_output_callback_
;
811 scoped_refptr
<MessageLoopProxy
> flush_message_loop_proxy_
;
812 subtle::AtomicWord generation_
;
814 DISALLOW_COPY_AND_ASSIGN(TraceLog
);
820 #endif // BASE_DEBUG_TRACE_EVENT_IMPL_H_