1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #ifndef BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
7 #define BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
13 #include "base/atomicops.h"
14 #include "base/base_export.h"
15 #include "base/callback.h"
16 #include "base/containers/hash_tables.h"
17 #include "base/gtest_prod_util.h"
18 #include "base/memory/ref_counted_memory.h"
19 #include "base/memory/scoped_vector.h"
20 #include "base/observer_list.h"
21 #include "base/strings/string_util.h"
22 #include "base/synchronization/condition_variable.h"
23 #include "base/synchronization/lock.h"
24 #include "base/threading/thread.h"
25 #include "base/threading/thread_local.h"
27 // Older style trace macros with explicit id and extra data
28 // Only these macros result in publishing data to ETW as currently implemented.
29 // TODO(georgesak): Update/replace these with new ETW macros.
30 #define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
31 base::trace_event::TraceLog::AddTraceEventEtw( \
32 TRACE_EVENT_PHASE_BEGIN, \
33 name, reinterpret_cast<const void*>(id), extra)
35 #define TRACE_EVENT_END_ETW(name, id, extra) \
36 base::trace_event::TraceLog::AddTraceEventEtw( \
37 TRACE_EVENT_PHASE_END, \
38 name, reinterpret_cast<const void*>(id), extra)
40 #define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
41 base::trace_event::TraceLog::AddTraceEventEtw( \
42 TRACE_EVENT_PHASE_INSTANT, \
43 name, reinterpret_cast<const void*>(id), extra)
45 template <typename Type
>
46 struct DefaultSingletonTraits
;
53 namespace trace_event
{
55 // For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
56 // class must implement this interface.
57 class BASE_EXPORT ConvertableToTraceFormat
58 : public RefCounted
<ConvertableToTraceFormat
> {
60 // Append the class info to the provided |out| string. The appended
61 // data must be a valid JSON object. Strings must be properly quoted, and
62 // escaped. There is no processing applied to the content after it is
64 virtual void AppendAsTraceFormat(std::string
* out
) const = 0;
66 std::string
ToString() const {
68 AppendAsTraceFormat(&result
);
73 virtual ~ConvertableToTraceFormat() {}
76 friend class RefCounted
<ConvertableToTraceFormat
>;
79 struct TraceEventHandle
{
85 const int kTraceMaxNumArgs
= 2;
87 class BASE_EXPORT TraceEvent
{
91 unsigned long long as_uint
;
94 const void* as_pointer
;
95 const char* as_string
;
101 // We don't need to copy TraceEvent except when TraceEventBuffer is cloned.
102 // Use explicit copy method to avoid accidentally misuse of copy.
103 void CopyFrom(const TraceEvent
& other
);
108 TimeTicks thread_timestamp
,
110 const unsigned char* category_group_enabled
,
112 unsigned long long id
,
114 const char** arg_names
,
115 const unsigned char* arg_types
,
116 const unsigned long long* arg_values
,
117 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
118 unsigned char flags
);
122 void UpdateDuration(const TimeTicks
& now
, const TimeTicks
& thread_now
);
124 // Serialize event data to JSON
125 void AppendAsJSON(std::string
* out
) const;
126 void AppendPrettyPrinted(std::ostringstream
* out
) const;
128 static void AppendValueAsJSON(unsigned char type
,
132 TimeTicks
timestamp() const { return timestamp_
; }
133 TimeTicks
thread_timestamp() const { return thread_timestamp_
; }
134 char phase() const { return phase_
; }
135 int thread_id() const { return thread_id_
; }
136 TimeDelta
duration() const { return duration_
; }
137 TimeDelta
thread_duration() const { return thread_duration_
; }
138 unsigned long long id() const { return id_
; }
139 unsigned char flags() const { return flags_
; }
141 // Exposed for unittesting:
143 const base::RefCountedString
* parameter_copy_storage() const {
144 return parameter_copy_storage_
.get();
147 const unsigned char* category_group_enabled() const {
148 return category_group_enabled_
;
151 const char* name() const { return name_
; }
153 #if defined(OS_ANDROID)
158 // Note: these are ordered by size (largest first) for optimal packing.
159 TimeTicks timestamp_
;
160 TimeTicks thread_timestamp_
;
162 TimeDelta thread_duration_
;
163 // id_ can be used to store phase-specific data.
164 unsigned long long id_
;
165 TraceValue arg_values_
[kTraceMaxNumArgs
];
166 const char* arg_names_
[kTraceMaxNumArgs
];
167 scoped_refptr
<ConvertableToTraceFormat
> convertable_values_
[kTraceMaxNumArgs
];
168 const unsigned char* category_group_enabled_
;
170 scoped_refptr
<base::RefCountedString
> parameter_copy_storage_
;
173 unsigned char flags_
;
174 unsigned char arg_types_
[kTraceMaxNumArgs
];
176 DISALLOW_COPY_AND_ASSIGN(TraceEvent
);
179 // TraceBufferChunk is the basic unit of TraceBuffer.
180 class BASE_EXPORT TraceBufferChunk
{
182 explicit TraceBufferChunk(uint32 seq
)
187 void Reset(uint32 new_seq
);
188 TraceEvent
* AddTraceEvent(size_t* event_index
);
189 bool IsFull() const { return next_free_
== kTraceBufferChunkSize
; }
191 uint32
seq() const { return seq_
; }
192 size_t capacity() const { return kTraceBufferChunkSize
; }
193 size_t size() const { return next_free_
; }
195 TraceEvent
* GetEventAt(size_t index
) {
196 DCHECK(index
< size());
197 return &chunk_
[index
];
199 const TraceEvent
* GetEventAt(size_t index
) const {
200 DCHECK(index
< size());
201 return &chunk_
[index
];
204 scoped_ptr
<TraceBufferChunk
> Clone() const;
206 static const size_t kTraceBufferChunkSize
= 64;
210 TraceEvent chunk_
[kTraceBufferChunkSize
];
214 // TraceBuffer holds the events as they are collected.
215 class BASE_EXPORT TraceBuffer
{
217 virtual ~TraceBuffer() {}
219 virtual scoped_ptr
<TraceBufferChunk
> GetChunk(size_t *index
) = 0;
220 virtual void ReturnChunk(size_t index
,
221 scoped_ptr
<TraceBufferChunk
> chunk
) = 0;
223 virtual bool IsFull() const = 0;
224 virtual size_t Size() const = 0;
225 virtual size_t Capacity() const = 0;
226 virtual TraceEvent
* GetEventByHandle(TraceEventHandle handle
) = 0;
228 // For iteration. Each TraceBuffer can only be iterated once.
229 virtual const TraceBufferChunk
* NextChunk() = 0;
231 virtual scoped_ptr
<TraceBuffer
> CloneForIteration() const = 0;
234 // TraceResultBuffer collects and converts trace fragments returned by TraceLog
236 class BASE_EXPORT TraceResultBuffer
{
238 typedef base::Callback
<void(const std::string
&)> OutputCallback
;
240 // If you don't need to stream JSON chunks out efficiently, and just want to
241 // get a complete JSON string after calling Finish, use this struct to collect
242 // JSON trace output.
243 struct BASE_EXPORT SimpleOutput
{
244 OutputCallback
GetCallback();
245 void Append(const std::string
& json_string
);
247 // Do what you want with the json_output_ string after calling
248 // TraceResultBuffer::Finish.
249 std::string json_output
;
253 ~TraceResultBuffer();
255 // Set callback. The callback will be called during Start with the initial
256 // JSON output and during AddFragment and Finish with following JSON output
257 // chunks. The callback target must live past the last calls to
258 // TraceResultBuffer::Start/AddFragment/Finish.
259 void SetOutputCallback(const OutputCallback
& json_chunk_callback
);
261 // Start JSON output. This resets all internal state, so you can reuse
262 // the TraceResultBuffer by calling Start.
265 // Call AddFragment 0 or more times to add trace fragments from TraceLog.
266 void AddFragment(const std::string
& trace_fragment
);
268 // When all fragments have been added, call Finish to complete the JSON
273 OutputCallback output_callback_
;
277 class BASE_EXPORT CategoryFilter
{
279 typedef std::vector
<std::string
> StringList
;
281 // The default category filter, used when none is provided.
282 // Allows all categories through, except if they end in the suffix 'Debug' or
284 static const char kDefaultCategoryFilterString
[];
286 // |filter_string| is a comma-delimited list of category wildcards.
287 // A category can have an optional '-' prefix to make it an excluded category.
288 // All the same rules apply above, so for example, having both included and
289 // excluded categories in the same list would not be supported.
291 // Example: CategoryFilter"test_MyTest*");
292 // Example: CategoryFilter("test_MyTest*,test_OtherStuff");
293 // Example: CategoryFilter("-excluded_category1,-excluded_category2");
294 // Example: CategoryFilter("-*,webkit"); would disable everything but webkit.
295 // Example: CategoryFilter("-webkit"); would enable everything but webkit.
297 // Category filters can also be used to configure synthetic delays.
299 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16)"); would make swap
300 // buffers always take at least 16 ms.
301 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;oneshot)"); would
302 // make swap buffers take at least 16 ms the first time it is
304 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;alternating)");
305 // would make swap buffers take at least 16 ms every other time it
307 explicit CategoryFilter(const std::string
& filter_string
);
311 CategoryFilter(const CategoryFilter
& cf
);
315 CategoryFilter
& operator=(const CategoryFilter
& rhs
);
317 // Writes the string representation of the CategoryFilter. This is a comma
318 // separated string, similar in nature to the one used to determine
319 // enabled/disabled category patterns, except here there is an arbitrary
320 // order, included categories go first, then excluded categories. Excluded
321 // categories are distinguished from included categories by the prefix '-'.
322 std::string
ToString() const;
324 // Returns true if at least one category in the list is enabled by this
326 bool IsCategoryGroupEnabled(const char* category_group
) const;
328 // Return a list of the synthetic delays specified in this category filter.
329 const StringList
& GetSyntheticDelayValues() const;
331 // Merges nested_filter with the current CategoryFilter
332 void Merge(const CategoryFilter
& nested_filter
);
334 // Clears both included/excluded pattern lists. This would be equivalent to
335 // creating a CategoryFilter with an empty string, through the constructor.
336 // i.e: CategoryFilter().
338 // When using an empty filter, all categories are considered included as we
339 // are not excluding anything.
343 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
, CategoryFilter
);
345 // Returns true if category is enable according to this filter.
346 bool IsCategoryEnabled(const char* category_name
) const;
348 static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(
349 const std::string
& str
);
351 void Initialize(const std::string
& filter_string
);
352 void WriteString(const StringList
& values
,
354 bool included
) const;
355 void WriteString(const StringList
& delays
, std::string
* out
) const;
356 bool HasIncludedPatterns() const;
358 StringList included_
;
359 StringList disabled_
;
360 StringList excluded_
;
364 class TraceSamplingThread
;
366 // Options determines how the trace buffer stores data.
367 enum TraceRecordMode
{
368 // Record until the trace buffer is full.
371 // Record until the user ends the trace. The trace buffer is a fixed size
372 // and we use it as a ring buffer during recording.
375 // Echo to console. Events are discarded.
378 // Record until the trace buffer is full, but with a huge buffer size.
379 RECORD_AS_MUCH_AS_POSSIBLE
382 struct BASE_EXPORT TraceOptions
{
384 : record_mode(RECORD_UNTIL_FULL
),
385 enable_sampling(false),
386 enable_systrace(false) {}
388 explicit TraceOptions(TraceRecordMode record_mode
)
389 : record_mode(record_mode
),
390 enable_sampling(false),
391 enable_systrace(false) {}
393 // |options_string| is a comma-delimited list of trace options.
394 // Possible options are: "record-until-full", "record-continuously",
395 // "trace-to-console", "enable-sampling" and "enable-systrace".
396 // The first 3 options are trace recoding modes and hence
397 // mutually exclusive. If more than one trace recording modes appear in the
398 // options_string, the last one takes precedence. If none of the trace
399 // recording mode is specified, recording mode is RECORD_UNTIL_FULL.
401 // The trace option will first be reset to the default option
402 // (record_mode set to RECORD_UNTIL_FULL, enable_sampling and enable_systrace
403 // set to false) before options parsed from |options_string| are applied on
405 // If |options_string| is invalid, the final state of trace_options is
408 // Example: trace_options.SetFromString("record-until-full")
409 // Example: trace_options.SetFromString(
410 // "record-continuously, enable-sampling")
411 // Example: trace_options.SetFromString("record-until-full, trace-to-console")
412 // will set ECHO_TO_CONSOLE as the recording mode.
414 // Returns true on success.
415 bool SetFromString(const std::string
& options_string
);
417 std::string
ToString() const;
419 TraceRecordMode record_mode
;
420 bool enable_sampling
;
421 bool enable_systrace
;
424 struct BASE_EXPORT TraceLogStatus
{
427 size_t event_capacity
;
431 class BASE_EXPORT TraceLog
{
439 // The pointer returned from GetCategoryGroupEnabledInternal() points to a
440 // value with zero or more of the following bits. Used in this class only.
441 // The TRACE_EVENT macros should only use the value as a bool.
442 // These values must be in sync with macro values in TraceEvent.h in Blink.
443 enum CategoryGroupEnabledFlags
{
444 // Category group enabled for the recording mode.
445 ENABLED_FOR_RECORDING
= 1 << 0,
446 // Category group enabled for the monitoring mode.
447 ENABLED_FOR_MONITORING
= 1 << 1,
448 // Category group enabled by SetEventCallbackEnabled().
449 ENABLED_FOR_EVENT_CALLBACK
= 1 << 2,
450 // Category group enabled to export events to ETW.
451 ENABLED_FOR_ETW_EXPORT
= 1 << 3
454 static TraceLog
* GetInstance();
456 // Get set of known category groups. This can change as new code paths are
457 // reached. The known category groups are inserted into |category_groups|.
458 void GetKnownCategoryGroups(std::vector
<std::string
>* category_groups
);
460 // Retrieves a copy (for thread-safety) of the current CategoryFilter.
461 CategoryFilter
GetCurrentCategoryFilter();
463 // Retrieves a copy (for thread-safety) of the current TraceOptions.
464 TraceOptions
GetCurrentTraceOptions() const;
466 // Enables normal tracing (recording trace events in the trace buffer).
467 // See CategoryFilter comments for details on how to control what categories
468 // will be traced. If tracing has already been enabled, |category_filter| will
469 // be merged into the current category filter.
470 void SetEnabled(const CategoryFilter
& category_filter
,
471 Mode mode
, const TraceOptions
& options
);
473 // Disables normal tracing for all categories.
476 bool IsEnabled() { return mode_
!= DISABLED
; }
478 // The number of times we have begun recording traces. If tracing is off,
479 // returns -1. If tracing is on, then it returns the number of times we have
480 // recorded a trace. By watching for this number to increment, you can
481 // passively discover when a new trace has begun. This is then used to
482 // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
483 int GetNumTracesRecorded();
485 #if defined(OS_ANDROID)
488 void AddClockSyncMetadataEvent();
491 // Enabled state listeners give a callback when tracing is enabled or
492 // disabled. This can be used to tie into other library's tracing systems
494 class BASE_EXPORT EnabledStateObserver
{
496 // Called just after the tracing system becomes enabled, outside of the
497 // |lock_|. TraceLog::IsEnabled() is true at this point.
498 virtual void OnTraceLogEnabled() = 0;
500 // Called just after the tracing system disables, outside of the |lock_|.
501 // TraceLog::IsEnabled() is false at this point.
502 virtual void OnTraceLogDisabled() = 0;
504 void AddEnabledStateObserver(EnabledStateObserver
* listener
);
505 void RemoveEnabledStateObserver(EnabledStateObserver
* listener
);
506 bool HasEnabledStateObserver(EnabledStateObserver
* listener
) const;
508 TraceLogStatus
GetStatus() const;
509 bool BufferIsFull() const;
511 // Not using base::Callback because of its limited by 7 parameters.
512 // Also, using primitive type allows directly passing callback from WebCore.
513 // WARNING: It is possible for the previously set callback to be called
514 // after a call to SetEventCallbackEnabled() that replaces or a call to
515 // SetEventCallbackDisabled() that disables the callback.
516 // This callback may be invoked on any thread.
517 // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
518 // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
520 typedef void (*EventCallback
)(TimeTicks timestamp
,
522 const unsigned char* category_group_enabled
,
524 unsigned long long id
,
526 const char* const arg_names
[],
527 const unsigned char arg_types
[],
528 const unsigned long long arg_values
[],
529 unsigned char flags
);
531 // Enable tracing for EventCallback.
532 void SetEventCallbackEnabled(const CategoryFilter
& category_filter
,
534 void SetEventCallbackDisabled();
536 // Flush all collected events to the given output callback. The callback will
537 // be called one or more times either synchronously or asynchronously from
538 // the current thread with IPC-bite-size chunks. The string format is
539 // undefined. Use TraceResultBuffer to convert one or more trace strings to
540 // JSON. The callback can be null if the caller doesn't want any data.
541 // Due to the implementation of thread-local buffers, flush can't be
542 // done when tracing is enabled. If called when tracing is enabled, the
543 // callback will be called directly with (empty_string, false) to indicate
544 // the end of this unsuccessful flush. Flush does the serialization
545 // on the same thread if the caller doesn't set use_worker_thread explicitly.
546 typedef base::Callback
<void(const scoped_refptr
<base::RefCountedString
>&,
547 bool has_more_events
)> OutputCallback
;
548 void Flush(const OutputCallback
& cb
, bool use_worker_thread
= false);
549 void FlushButLeaveBufferIntact(const OutputCallback
& flush_output_callback
);
551 // Called by TRACE_EVENT* macros, don't call this directly.
552 // The name parameter is a category group for example:
553 // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
554 static const unsigned char* GetCategoryGroupEnabled(const char* name
);
555 static const char* GetCategoryGroupName(
556 const unsigned char* category_group_enabled
);
558 // Called by TRACE_EVENT* macros, don't call this directly.
559 // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
560 // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
561 TraceEventHandle
AddTraceEvent(
563 const unsigned char* category_group_enabled
,
565 unsigned long long id
,
567 const char** arg_names
,
568 const unsigned char* arg_types
,
569 const unsigned long long* arg_values
,
570 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
571 unsigned char flags
);
572 TraceEventHandle
AddTraceEventWithThreadIdAndTimestamp(
574 const unsigned char* category_group_enabled
,
576 unsigned long long id
,
578 const TimeTicks
& timestamp
,
580 const char** arg_names
,
581 const unsigned char* arg_types
,
582 const unsigned long long* arg_values
,
583 const scoped_refptr
<ConvertableToTraceFormat
>* convertable_values
,
584 unsigned char flags
);
585 static void AddTraceEventEtw(char phase
,
586 const char* category_group
,
589 static void AddTraceEventEtw(char phase
,
590 const char* category_group
,
592 const std::string
& extra
);
594 void UpdateTraceEventDuration(const unsigned char* category_group_enabled
,
596 TraceEventHandle handle
);
598 // For every matching event, the callback will be called.
599 typedef base::Callback
<void()> WatchEventCallback
;
600 void SetWatchEvent(const std::string
& category_name
,
601 const std::string
& event_name
,
602 const WatchEventCallback
& callback
);
603 // Cancel the watch event. If tracing is enabled, this may race with the
604 // watch event notification firing.
605 void CancelWatchEvent();
607 int process_id() const { return process_id_
; }
609 uint64
MangleEventId(uint64 id
);
611 // Exposed for unittesting:
613 void WaitSamplingEventForTesting();
615 // Allows deleting our singleton instance.
616 static void DeleteForTesting();
618 // Allow tests to inspect TraceEvents.
619 TraceEvent
* GetEventByHandle(TraceEventHandle handle
);
621 void SetProcessID(int process_id
);
623 // Process sort indices, if set, override the order of a process will appear
624 // relative to other processes in the trace viewer. Processes are sorted first
625 // on their sort index, ascending, then by their name, and then tid.
626 void SetProcessSortIndex(int sort_index
);
628 // Sets the name of the process.
629 void SetProcessName(const std::string
& process_name
);
631 // Processes can have labels in addition to their names. Use labels, for
632 // instance, to list out the web page titles that a process is handling.
633 void UpdateProcessLabel(int label_id
, const std::string
& current_label
);
634 void RemoveProcessLabel(int label_id
);
636 // Thread sort indices, if set, override the order of a thread will appear
637 // within its process in the trace viewer. Threads are sorted first on their
638 // sort index, ascending, then by their name, and then tid.
639 void SetThreadSortIndex(PlatformThreadId
, int sort_index
);
641 // Allow setting an offset between the current TimeTicks time and the time
642 // that should be reported.
643 void SetTimeOffset(TimeDelta offset
);
645 size_t GetObserverCountForTest() const;
647 // Call this method if the current thread may block the message loop to
648 // prevent the thread from using the thread-local buffer because the thread
649 // may not handle the flush request in time causing lost of unflushed events.
650 void SetCurrentThreadBlocksMessageLoop();
653 typedef unsigned int InternalTraceOptions
;
655 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
656 TraceBufferRingBufferGetReturnChunk
);
657 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
658 TraceBufferRingBufferHalfIteration
);
659 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
660 TraceBufferRingBufferFullIteration
);
661 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
662 TraceBufferVectorReportFull
);
663 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
664 ConvertTraceOptionsToInternalOptions
);
665 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture
,
666 TraceRecordAsMuchAsPossibleMode
);
668 // This allows constructor and destructor to be private and usable only
669 // by the Singleton class.
670 friend struct DefaultSingletonTraits
<TraceLog
>;
672 // Enable/disable each category group based on the current mode_,
673 // category_filter_, event_callback_ and event_callback_category_filter_.
674 // Enable the category group in the enabled mode if category_filter_ matches
675 // the category group, or event_callback_ is not null and
676 // event_callback_category_filter_ matches the category group.
677 void UpdateCategoryGroupEnabledFlags();
678 void UpdateCategoryGroupEnabledFlag(size_t category_index
);
680 // Configure synthetic delays based on the values set in the current
682 void UpdateSyntheticDelaysFromCategoryFilter();
684 InternalTraceOptions
GetInternalOptionsFromTraceOptions(
685 const TraceOptions
& options
);
687 class ThreadLocalEventBuffer
;
688 class OptionalAutoLock
;
692 const unsigned char* GetCategoryGroupEnabledInternal(const char* name
);
693 void AddMetadataEventsWhileLocked();
695 InternalTraceOptions
trace_options() const {
696 return static_cast<InternalTraceOptions
>(
697 subtle::NoBarrier_Load(&trace_options_
));
700 TraceBuffer
* trace_buffer() const { return logged_events_
.get(); }
701 TraceBuffer
* CreateTraceBuffer();
702 TraceBuffer
* CreateTraceBufferVectorOfSize(size_t max_chunks
);
704 std::string
EventToConsoleMessage(unsigned char phase
,
705 const TimeTicks
& timestamp
,
706 TraceEvent
* trace_event
);
708 TraceEvent
* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle
* handle
,
709 bool check_buffer_is_full
);
710 void CheckIfBufferIsFullWhileLocked();
711 void SetDisabledWhileLocked();
713 TraceEvent
* GetEventByHandleInternal(TraceEventHandle handle
,
714 OptionalAutoLock
* lock
);
716 // |generation| is used in the following callbacks to check if the callback
717 // is called for the flush of the current |logged_events_|.
718 void FlushCurrentThread(int generation
);
719 // Usually it runs on a different thread.
720 static void ConvertTraceEventsToTraceFormat(
721 scoped_ptr
<TraceBuffer
> logged_events
,
722 const TraceLog::OutputCallback
& flush_output_callback
);
723 void FinishFlush(int generation
);
724 void OnFlushTimeout(int generation
);
726 int generation() const {
727 return static_cast<int>(subtle::NoBarrier_Load(&generation_
));
729 bool CheckGeneration(int generation
) const {
730 return generation
== this->generation();
732 void UseNextTraceBuffer();
734 TimeTicks
OffsetNow() const {
735 return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime());
737 TimeTicks
OffsetTimestamp(const TimeTicks
& timestamp
) const {
738 return timestamp
- time_offset_
;
741 // Internal representation of trace options since we store the currently used
742 // trace option as an AtomicWord.
743 static const InternalTraceOptions kInternalNone
;
744 static const InternalTraceOptions kInternalRecordUntilFull
;
745 static const InternalTraceOptions kInternalRecordContinuously
;
746 static const InternalTraceOptions kInternalEchoToConsole
;
747 static const InternalTraceOptions kInternalEnableSampling
;
748 static const InternalTraceOptions kInternalRecordAsMuchAsPossible
;
750 // This lock protects TraceLog member accesses (except for members protected
751 // by thread_info_lock_) from arbitrary threads.
753 // This lock protects accesses to thread_names_, thread_event_start_times_
754 // and thread_colors_.
755 Lock thread_info_lock_
;
757 int num_traces_recorded_
;
758 scoped_ptr
<TraceBuffer
> logged_events_
;
759 subtle::AtomicWord
/* EventCallback */ event_callback_
;
760 bool dispatching_to_observer_list_
;
761 std::vector
<EnabledStateObserver
*> enabled_state_observer_list_
;
763 std::string process_name_
;
764 base::hash_map
<int, std::string
> process_labels_
;
765 int process_sort_index_
;
766 base::hash_map
<int, int> thread_sort_indices_
;
767 base::hash_map
<int, std::string
> thread_names_
;
769 // The following two maps are used only when ECHO_TO_CONSOLE.
770 base::hash_map
<int, std::stack
<TimeTicks
> > thread_event_start_times_
;
771 base::hash_map
<std::string
, int> thread_colors_
;
773 TimeTicks buffer_limit_reached_timestamp_
;
775 // XORed with TraceID to make it unlikely to collide with other processes.
776 unsigned long long process_id_hash_
;
780 TimeDelta time_offset_
;
782 // Allow tests to wake up when certain events occur.
783 WatchEventCallback watch_event_callback_
;
784 subtle::AtomicWord
/* const unsigned char* */ watch_category_
;
785 std::string watch_event_name_
;
787 subtle::AtomicWord
/* Options */ trace_options_
;
789 // Sampling thread handles.
790 scoped_ptr
<TraceSamplingThread
> sampling_thread_
;
791 PlatformThreadHandle sampling_thread_handle_
;
793 CategoryFilter category_filter_
;
794 CategoryFilter event_callback_category_filter_
;
796 ThreadLocalPointer
<ThreadLocalEventBuffer
> thread_local_event_buffer_
;
797 ThreadLocalBoolean thread_blocks_message_loop_
;
798 ThreadLocalBoolean thread_is_in_trace_event_
;
800 // Contains the message loops of threads that have had at least one event
801 // added into the local event buffer. Not using MessageLoopProxy because we
802 // need to know the life time of the message loops.
803 hash_set
<MessageLoop
*> thread_message_loops_
;
805 // For events which can't be added into the thread local buffer, e.g. events
806 // from threads without a message loop.
807 scoped_ptr
<TraceBufferChunk
> thread_shared_chunk_
;
808 size_t thread_shared_chunk_index_
;
810 // Set when asynchronous Flush is in progress.
811 OutputCallback flush_output_callback_
;
812 scoped_refptr
<MessageLoopProxy
> flush_message_loop_proxy_
;
813 subtle::AtomicWord generation_
;
814 bool use_worker_thread_
;
816 DISALLOW_COPY_AND_ASSIGN(TraceLog
);
819 } // namespace trace_event
822 #endif // BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_