Revert of Enabling audio quality test on mac. (patchset #1 id:1 of https://codereview...
[chromium-blink-merge.git] / base / debug / trace_event_impl.h
blobf91554112257b0808186d77b6abb47cbb2a381b8
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #ifndef BASE_DEBUG_TRACE_EVENT_IMPL_H_
7 #define BASE_DEBUG_TRACE_EVENT_IMPL_H_
9 #include <stack>
10 #include <string>
11 #include <vector>
13 #include "base/atomicops.h"
14 #include "base/base_export.h"
15 #include "base/callback.h"
16 #include "base/containers/hash_tables.h"
17 #include "base/gtest_prod_util.h"
18 #include "base/memory/ref_counted_memory.h"
19 #include "base/memory/scoped_vector.h"
20 #include "base/observer_list.h"
21 #include "base/strings/string_util.h"
22 #include "base/synchronization/condition_variable.h"
23 #include "base/synchronization/lock.h"
24 #include "base/threading/thread.h"
25 #include "base/threading/thread_local.h"
26 #include "base/timer/timer.h"
28 // Older style trace macros with explicit id and extra data
29 // Only these macros result in publishing data to ETW as currently implemented.
30 #define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
31 base::debug::TraceLog::AddTraceEventEtw( \
32 TRACE_EVENT_PHASE_BEGIN, \
33 name, reinterpret_cast<const void*>(id), extra)
35 #define TRACE_EVENT_END_ETW(name, id, extra) \
36 base::debug::TraceLog::AddTraceEventEtw( \
37 TRACE_EVENT_PHASE_END, \
38 name, reinterpret_cast<const void*>(id), extra)
40 #define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
41 base::debug::TraceLog::AddTraceEventEtw( \
42 TRACE_EVENT_PHASE_INSTANT, \
43 name, reinterpret_cast<const void*>(id), extra)
45 template <typename Type>
46 struct DefaultSingletonTraits;
48 #if defined(COMPILER_GCC)
49 namespace BASE_HASH_NAMESPACE {
50 template <>
51 struct hash<base::MessageLoop*> {
52 std::size_t operator()(base::MessageLoop* value) const {
53 return reinterpret_cast<std::size_t>(value);
56 } // BASE_HASH_NAMESPACE
57 #endif
59 namespace base {
61 class WaitableEvent;
62 class MessageLoop;
64 namespace debug {
66 // For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
67 // class must implement this interface.
68 class BASE_EXPORT ConvertableToTraceFormat
69 : public RefCounted<ConvertableToTraceFormat> {
70 public:
71 // Append the class info to the provided |out| string. The appended
72 // data must be a valid JSON object. Strings must be properly quoted, and
73 // escaped. There is no processing applied to the content after it is
74 // appended.
75 virtual void AppendAsTraceFormat(std::string* out) const = 0;
77 std::string ToString() const {
78 std::string result;
79 AppendAsTraceFormat(&result);
80 return result;
83 protected:
84 virtual ~ConvertableToTraceFormat() {}
86 private:
87 friend class RefCounted<ConvertableToTraceFormat>;
90 struct TraceEventHandle {
91 uint32 chunk_seq;
92 uint16 chunk_index;
93 uint16 event_index;
96 const int kTraceMaxNumArgs = 2;
98 class BASE_EXPORT TraceEvent {
99 public:
100 union TraceValue {
101 bool as_bool;
102 unsigned long long as_uint;
103 long long as_int;
104 double as_double;
105 const void* as_pointer;
106 const char* as_string;
109 TraceEvent();
110 ~TraceEvent();
112 // We don't need to copy TraceEvent except when TraceEventBuffer is cloned.
113 // Use explicit copy method to avoid accidentally misuse of copy.
114 void CopyFrom(const TraceEvent& other);
116 void Initialize(
117 int thread_id,
118 TimeTicks timestamp,
119 TimeTicks thread_timestamp,
120 char phase,
121 const unsigned char* category_group_enabled,
122 const char* name,
123 unsigned long long id,
124 int num_args,
125 const char** arg_names,
126 const unsigned char* arg_types,
127 const unsigned long long* arg_values,
128 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
129 unsigned char flags);
131 void Reset();
133 void UpdateDuration(const TimeTicks& now, const TimeTicks& thread_now);
135 // Serialize event data to JSON
136 void AppendAsJSON(std::string* out) const;
137 void AppendPrettyPrinted(std::ostringstream* out) const;
139 static void AppendValueAsJSON(unsigned char type,
140 TraceValue value,
141 std::string* out);
143 TimeTicks timestamp() const { return timestamp_; }
144 TimeTicks thread_timestamp() const { return thread_timestamp_; }
145 char phase() const { return phase_; }
146 int thread_id() const { return thread_id_; }
147 TimeDelta duration() const { return duration_; }
148 TimeDelta thread_duration() const { return thread_duration_; }
149 unsigned long long id() const { return id_; }
150 unsigned char flags() const { return flags_; }
152 // Exposed for unittesting:
154 const base::RefCountedString* parameter_copy_storage() const {
155 return parameter_copy_storage_.get();
158 const unsigned char* category_group_enabled() const {
159 return category_group_enabled_;
162 const char* name() const { return name_; }
164 #if defined(OS_ANDROID)
165 void SendToATrace();
166 #endif
168 private:
169 // Note: these are ordered by size (largest first) for optimal packing.
170 TimeTicks timestamp_;
171 TimeTicks thread_timestamp_;
172 TimeDelta duration_;
173 TimeDelta thread_duration_;
174 // id_ can be used to store phase-specific data.
175 unsigned long long id_;
176 TraceValue arg_values_[kTraceMaxNumArgs];
177 const char* arg_names_[kTraceMaxNumArgs];
178 scoped_refptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs];
179 const unsigned char* category_group_enabled_;
180 const char* name_;
181 scoped_refptr<base::RefCountedString> parameter_copy_storage_;
182 int thread_id_;
183 char phase_;
184 unsigned char flags_;
185 unsigned char arg_types_[kTraceMaxNumArgs];
187 DISALLOW_COPY_AND_ASSIGN(TraceEvent);
190 // TraceBufferChunk is the basic unit of TraceBuffer.
191 class BASE_EXPORT TraceBufferChunk {
192 public:
193 TraceBufferChunk(uint32 seq)
194 : next_free_(0),
195 seq_(seq) {
198 void Reset(uint32 new_seq);
199 TraceEvent* AddTraceEvent(size_t* event_index);
200 bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
202 uint32 seq() const { return seq_; }
203 size_t capacity() const { return kTraceBufferChunkSize; }
204 size_t size() const { return next_free_; }
206 TraceEvent* GetEventAt(size_t index) {
207 DCHECK(index < size());
208 return &chunk_[index];
210 const TraceEvent* GetEventAt(size_t index) const {
211 DCHECK(index < size());
212 return &chunk_[index];
215 scoped_ptr<TraceBufferChunk> Clone() const;
217 static const size_t kTraceBufferChunkSize = 64;
219 private:
220 size_t next_free_;
221 TraceEvent chunk_[kTraceBufferChunkSize];
222 uint32 seq_;
225 // TraceBuffer holds the events as they are collected.
226 class BASE_EXPORT TraceBuffer {
227 public:
228 virtual ~TraceBuffer() {}
230 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t *index) = 0;
231 virtual void ReturnChunk(size_t index,
232 scoped_ptr<TraceBufferChunk> chunk) = 0;
234 virtual bool IsFull() const = 0;
235 virtual size_t Size() const = 0;
236 virtual size_t Capacity() const = 0;
237 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
239 // For iteration. Each TraceBuffer can only be iterated once.
240 virtual const TraceBufferChunk* NextChunk() = 0;
242 virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0;
245 // TraceResultBuffer collects and converts trace fragments returned by TraceLog
246 // to JSON output.
247 class BASE_EXPORT TraceResultBuffer {
248 public:
249 typedef base::Callback<void(const std::string&)> OutputCallback;
251 // If you don't need to stream JSON chunks out efficiently, and just want to
252 // get a complete JSON string after calling Finish, use this struct to collect
253 // JSON trace output.
254 struct BASE_EXPORT SimpleOutput {
255 OutputCallback GetCallback();
256 void Append(const std::string& json_string);
258 // Do what you want with the json_output_ string after calling
259 // TraceResultBuffer::Finish.
260 std::string json_output;
263 TraceResultBuffer();
264 ~TraceResultBuffer();
266 // Set callback. The callback will be called during Start with the initial
267 // JSON output and during AddFragment and Finish with following JSON output
268 // chunks. The callback target must live past the last calls to
269 // TraceResultBuffer::Start/AddFragment/Finish.
270 void SetOutputCallback(const OutputCallback& json_chunk_callback);
272 // Start JSON output. This resets all internal state, so you can reuse
273 // the TraceResultBuffer by calling Start.
274 void Start();
276 // Call AddFragment 0 or more times to add trace fragments from TraceLog.
277 void AddFragment(const std::string& trace_fragment);
279 // When all fragments have been added, call Finish to complete the JSON
280 // formatted output.
281 void Finish();
283 private:
284 OutputCallback output_callback_;
285 bool append_comma_;
288 class BASE_EXPORT CategoryFilter {
289 public:
290 typedef std::vector<std::string> StringList;
292 // The default category filter, used when none is provided.
293 // Allows all categories through, except if they end in the suffix 'Debug' or
294 // 'Test'.
295 static const char* kDefaultCategoryFilterString;
297 // |filter_string| is a comma-delimited list of category wildcards.
298 // A category can have an optional '-' prefix to make it an excluded category.
299 // All the same rules apply above, so for example, having both included and
300 // excluded categories in the same list would not be supported.
302 // Example: CategoryFilter"test_MyTest*");
303 // Example: CategoryFilter("test_MyTest*,test_OtherStuff");
304 // Example: CategoryFilter("-excluded_category1,-excluded_category2");
305 // Example: CategoryFilter("-*,webkit"); would disable everything but webkit.
306 // Example: CategoryFilter("-webkit"); would enable everything but webkit.
308 // Category filters can also be used to configure synthetic delays.
310 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16)"); would make swap
311 // buffers always take at least 16 ms.
312 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;oneshot)"); would
313 // make swap buffers take at least 16 ms the first time it is
314 // called.
315 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;alternating)");
316 // would make swap buffers take at least 16 ms every other time it
317 // is called.
318 explicit CategoryFilter(const std::string& filter_string);
320 CategoryFilter();
322 CategoryFilter(const CategoryFilter& cf);
324 ~CategoryFilter();
326 CategoryFilter& operator=(const CategoryFilter& rhs);
328 // Writes the string representation of the CategoryFilter. This is a comma
329 // separated string, similar in nature to the one used to determine
330 // enabled/disabled category patterns, except here there is an arbitrary
331 // order, included categories go first, then excluded categories. Excluded
332 // categories are distinguished from included categories by the prefix '-'.
333 std::string ToString() const;
335 // Determines whether category group would be enabled or
336 // disabled by this category filter.
337 bool IsCategoryGroupEnabled(const char* category_group) const;
339 // Return a list of the synthetic delays specified in this category filter.
340 const StringList& GetSyntheticDelayValues() const;
342 // Merges nested_filter with the current CategoryFilter
343 void Merge(const CategoryFilter& nested_filter);
345 // Clears both included/excluded pattern lists. This would be equivalent to
346 // creating a CategoryFilter with an empty string, through the constructor.
347 // i.e: CategoryFilter().
349 // When using an empty filter, all categories are considered included as we
350 // are not excluding anything.
351 void Clear();
353 private:
354 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, CategoryFilter);
356 static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(
357 const std::string& str);
359 void Initialize(const std::string& filter_string);
360 void WriteString(const StringList& values,
361 std::string* out,
362 bool included) const;
363 void WriteString(const StringList& delays, std::string* out) const;
364 bool HasIncludedPatterns() const;
366 bool DoesCategoryGroupContainCategory(const char* category_group,
367 const char* category) const;
369 StringList included_;
370 StringList disabled_;
371 StringList excluded_;
372 StringList delays_;
375 class TraceSamplingThread;
377 // Options determines how the trace buffer stores data.
378 enum TraceRecordMode {
379 // Record until the trace buffer is full.
380 RECORD_UNTIL_FULL,
382 // Record until the user ends the trace. The trace buffer is a fixed size
383 // and we use it as a ring buffer during recording.
384 RECORD_CONTINUOUSLY,
386 // Echo to console. Events are discarded.
387 ECHO_TO_CONSOLE,
389 // Record until the trace buffer is full, but with a huge buffer size.
390 RECORD_AS_MUCH_AS_POSSIBLE
393 struct BASE_EXPORT TraceOptions {
395 TraceOptions()
396 : record_mode(RECORD_UNTIL_FULL),
397 enable_sampling(false),
398 enable_systrace(false) {}
400 TraceOptions(TraceRecordMode record_mode)
401 : record_mode(record_mode),
402 enable_sampling(false),
403 enable_systrace(false) {}
405 // |options_string| is a comma-delimited list of trace options.
406 // Possible options are: "record-until-full", "record-continuously",
407 // "trace-to-console", "enable-sampling" and "enable-systrace".
408 // The first 3 options are trace recoding modes and hence
409 // mutually exclusive. If more than one trace recording modes appear in the
410 // options_string, the last one takes precedence. If none of the trace
411 // recording mode is specified, recording mode is RECORD_UNTIL_FULL.
413 // The trace option will first be reset to the default option
414 // (record_mode set to RECORD_UNTIL_FULL, enable_sampling and enable_systrace
415 // set to false) before options parsed from |options_string| are applied on
416 // it.
417 // If |options_string| is invalid, the final state of trace_options is
418 // undefined.
420 // Example: trace_options.SetFromString("record-until-full")
421 // Example: trace_options.SetFromString(
422 // "record-continuously, enable-sampling")
423 // Example: trace_options.SetFromString("record-until-full, trace-to-console")
424 // will set ECHO_TO_CONSOLE as the recording mode.
426 // Returns true on success.
427 bool SetFromString(const std::string& options_string);
429 std::string ToString() const;
431 TraceRecordMode record_mode;
432 bool enable_sampling;
433 bool enable_systrace;
436 class BASE_EXPORT TraceLog {
437 public:
438 enum Mode {
439 DISABLED = 0,
440 RECORDING_MODE,
441 MONITORING_MODE,
444 // The pointer returned from GetCategoryGroupEnabledInternal() points to a
445 // value with zero or more of the following bits. Used in this class only.
446 // The TRACE_EVENT macros should only use the value as a bool.
447 // These values must be in sync with macro values in TraceEvent.h in Blink.
448 enum CategoryGroupEnabledFlags {
449 // Category group enabled for the recording mode.
450 ENABLED_FOR_RECORDING = 1 << 0,
451 // Category group enabled for the monitoring mode.
452 ENABLED_FOR_MONITORING = 1 << 1,
453 // Category group enabled by SetEventCallbackEnabled().
454 ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
457 static TraceLog* GetInstance();
459 // Get set of known category groups. This can change as new code paths are
460 // reached. The known category groups are inserted into |category_groups|.
461 void GetKnownCategoryGroups(std::vector<std::string>* category_groups);
463 // Retrieves a copy (for thread-safety) of the current CategoryFilter.
464 CategoryFilter GetCurrentCategoryFilter();
466 // Retrieves a copy (for thread-safety) of the current TraceOptions.
467 TraceOptions GetCurrentTraceOptions() const;
469 // Enables normal tracing (recording trace events in the trace buffer).
470 // See CategoryFilter comments for details on how to control what categories
471 // will be traced. If tracing has already been enabled, |category_filter| will
472 // be merged into the current category filter.
473 void SetEnabled(const CategoryFilter& category_filter,
474 Mode mode, const TraceOptions& options);
476 // Disables normal tracing for all categories.
477 void SetDisabled();
479 bool IsEnabled() { return mode_ != DISABLED; }
481 // The number of times we have begun recording traces. If tracing is off,
482 // returns -1. If tracing is on, then it returns the number of times we have
483 // recorded a trace. By watching for this number to increment, you can
484 // passively discover when a new trace has begun. This is then used to
485 // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
486 int GetNumTracesRecorded();
488 #if defined(OS_ANDROID)
489 void StartATrace();
490 void StopATrace();
491 void AddClockSyncMetadataEvent();
492 #endif
494 // Enabled state listeners give a callback when tracing is enabled or
495 // disabled. This can be used to tie into other library's tracing systems
496 // on-demand.
497 class BASE_EXPORT EnabledStateObserver {
498 public:
499 // Called just after the tracing system becomes enabled, outside of the
500 // |lock_|. TraceLog::IsEnabled() is true at this point.
501 virtual void OnTraceLogEnabled() = 0;
503 // Called just after the tracing system disables, outside of the |lock_|.
504 // TraceLog::IsEnabled() is false at this point.
505 virtual void OnTraceLogDisabled() = 0;
507 void AddEnabledStateObserver(EnabledStateObserver* listener);
508 void RemoveEnabledStateObserver(EnabledStateObserver* listener);
509 bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
511 float GetBufferPercentFull() const;
512 bool BufferIsFull() const;
514 // Not using base::Callback because of its limited by 7 parameters.
515 // Also, using primitive type allows directly passing callback from WebCore.
516 // WARNING: It is possible for the previously set callback to be called
517 // after a call to SetEventCallbackEnabled() that replaces or a call to
518 // SetEventCallbackDisabled() that disables the callback.
519 // This callback may be invoked on any thread.
520 // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
521 // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
522 // interface simple.
523 typedef void (*EventCallback)(TimeTicks timestamp,
524 char phase,
525 const unsigned char* category_group_enabled,
526 const char* name,
527 unsigned long long id,
528 int num_args,
529 const char* const arg_names[],
530 const unsigned char arg_types[],
531 const unsigned long long arg_values[],
532 unsigned char flags);
534 // Enable tracing for EventCallback.
535 void SetEventCallbackEnabled(const CategoryFilter& category_filter,
536 EventCallback cb);
537 void SetEventCallbackDisabled();
539 // Flush all collected events to the given output callback. The callback will
540 // be called one or more times either synchronously or asynchronously from
541 // the current thread with IPC-bite-size chunks. The string format is
542 // undefined. Use TraceResultBuffer to convert one or more trace strings to
543 // JSON. The callback can be null if the caller doesn't want any data.
544 // Due to the implementation of thread-local buffers, flush can't be
545 // done when tracing is enabled. If called when tracing is enabled, the
546 // callback will be called directly with (empty_string, false) to indicate
547 // the end of this unsuccessful flush.
548 typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
549 bool has_more_events)> OutputCallback;
550 void Flush(const OutputCallback& cb);
551 void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback);
553 // Called by TRACE_EVENT* macros, don't call this directly.
554 // The name parameter is a category group for example:
555 // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
556 static const unsigned char* GetCategoryGroupEnabled(const char* name);
557 static const char* GetCategoryGroupName(
558 const unsigned char* category_group_enabled);
560 // Called by TRACE_EVENT* macros, don't call this directly.
561 // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
562 // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
563 TraceEventHandle AddTraceEvent(
564 char phase,
565 const unsigned char* category_group_enabled,
566 const char* name,
567 unsigned long long id,
568 int num_args,
569 const char** arg_names,
570 const unsigned char* arg_types,
571 const unsigned long long* arg_values,
572 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
573 unsigned char flags);
574 TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
575 char phase,
576 const unsigned char* category_group_enabled,
577 const char* name,
578 unsigned long long id,
579 int thread_id,
580 const TimeTicks& timestamp,
581 int num_args,
582 const char** arg_names,
583 const unsigned char* arg_types,
584 const unsigned long long* arg_values,
585 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
586 unsigned char flags);
587 static void AddTraceEventEtw(char phase,
588 const char* category_group,
589 const void* id,
590 const char* extra);
591 static void AddTraceEventEtw(char phase,
592 const char* category_group,
593 const void* id,
594 const std::string& extra);
596 void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
597 const char* name,
598 TraceEventHandle handle);
600 // For every matching event, the callback will be called.
601 typedef base::Callback<void()> WatchEventCallback;
602 void SetWatchEvent(const std::string& category_name,
603 const std::string& event_name,
604 const WatchEventCallback& callback);
605 // Cancel the watch event. If tracing is enabled, this may race with the
606 // watch event notification firing.
607 void CancelWatchEvent();
609 int process_id() const { return process_id_; }
611 // Exposed for unittesting:
613 void WaitSamplingEventForTesting();
615 // Allows deleting our singleton instance.
616 static void DeleteForTesting();
618 // Allow tests to inspect TraceEvents.
619 size_t GetEventsSize() const { return logged_events_->Size(); }
620 TraceEvent* GetEventByHandle(TraceEventHandle handle);
622 void SetProcessID(int process_id);
624 // Process sort indices, if set, override the order of a process will appear
625 // relative to other processes in the trace viewer. Processes are sorted first
626 // on their sort index, ascending, then by their name, and then tid.
627 void SetProcessSortIndex(int sort_index);
629 // Sets the name of the process.
630 void SetProcessName(const std::string& process_name);
632 // Processes can have labels in addition to their names. Use labels, for
633 // instance, to list out the web page titles that a process is handling.
634 void UpdateProcessLabel(int label_id, const std::string& current_label);
635 void RemoveProcessLabel(int label_id);
637 // Thread sort indices, if set, override the order of a thread will appear
638 // within its process in the trace viewer. Threads are sorted first on their
639 // sort index, ascending, then by their name, and then tid.
640 void SetThreadSortIndex(PlatformThreadId , int sort_index);
642 // Allow setting an offset between the current TimeTicks time and the time
643 // that should be reported.
644 void SetTimeOffset(TimeDelta offset);
646 size_t GetObserverCountForTest() const;
648 // Call this method if the current thread may block the message loop to
649 // prevent the thread from using the thread-local buffer because the thread
650 // may not handle the flush request in time causing lost of unflushed events.
651 void SetCurrentThreadBlocksMessageLoop();
653 private:
654 typedef unsigned int InternalTraceOptions;
656 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
657 TraceBufferRingBufferGetReturnChunk);
658 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
659 TraceBufferRingBufferHalfIteration);
660 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
661 TraceBufferRingBufferFullIteration);
662 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
663 TraceBufferVectorReportFull);
664 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
665 ConvertTraceOptionsToInternalOptions);
666 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
667 TraceRecordAsMuchAsPossibleMode);
669 // This allows constructor and destructor to be private and usable only
670 // by the Singleton class.
671 friend struct DefaultSingletonTraits<TraceLog>;
673 // Enable/disable each category group based on the current mode_,
674 // category_filter_, event_callback_ and event_callback_category_filter_.
675 // Enable the category group in the enabled mode if category_filter_ matches
676 // the category group, or event_callback_ is not null and
677 // event_callback_category_filter_ matches the category group.
678 void UpdateCategoryGroupEnabledFlags();
679 void UpdateCategoryGroupEnabledFlag(size_t category_index);
681 // Configure synthetic delays based on the values set in the current
682 // category filter.
683 void UpdateSyntheticDelaysFromCategoryFilter();
685 InternalTraceOptions GetInternalOptionsFromTraceOptions(
686 const TraceOptions& options);
688 class ThreadLocalEventBuffer;
689 class OptionalAutoLock;
691 TraceLog();
692 ~TraceLog();
693 const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
694 void AddMetadataEventsWhileLocked();
696 InternalTraceOptions trace_options() const {
697 return static_cast<InternalTraceOptions>(
698 subtle::NoBarrier_Load(&trace_options_));
701 TraceBuffer* trace_buffer() const { return logged_events_.get(); }
702 TraceBuffer* CreateTraceBuffer();
703 TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks);
705 std::string EventToConsoleMessage(unsigned char phase,
706 const TimeTicks& timestamp,
707 TraceEvent* trace_event);
709 TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
710 bool check_buffer_is_full);
711 void CheckIfBufferIsFullWhileLocked();
712 void SetDisabledWhileLocked();
714 TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
715 OptionalAutoLock* lock);
717 // |generation| is used in the following callbacks to check if the callback
718 // is called for the flush of the current |logged_events_|.
719 void FlushCurrentThread(int generation);
720 void ConvertTraceEventsToTraceFormat(scoped_ptr<TraceBuffer> logged_events,
721 const TraceLog::OutputCallback& flush_output_callback);
722 void FinishFlush(int generation);
723 void OnFlushTimeout(int generation);
725 int generation() const {
726 return static_cast<int>(subtle::NoBarrier_Load(&generation_));
728 bool CheckGeneration(int generation) const {
729 return generation == this->generation();
731 void UseNextTraceBuffer();
733 TimeTicks OffsetNow() const {
734 return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime());
736 TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const {
737 return timestamp - time_offset_;
740 // Internal representation of trace options since we store the currently used
741 // trace option as an AtomicWord.
742 static const InternalTraceOptions kInternalNone;
743 static const InternalTraceOptions kInternalRecordUntilFull;
744 static const InternalTraceOptions kInternalRecordContinuously;
745 static const InternalTraceOptions kInternalEchoToConsole;
746 static const InternalTraceOptions kInternalEnableSampling;
747 static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
749 // This lock protects TraceLog member accesses (except for members protected
750 // by thread_info_lock_) from arbitrary threads.
751 mutable Lock lock_;
752 // This lock protects accesses to thread_names_, thread_event_start_times_
753 // and thread_colors_.
754 Lock thread_info_lock_;
755 int locked_line_;
756 Mode mode_;
757 int num_traces_recorded_;
758 scoped_ptr<TraceBuffer> logged_events_;
759 subtle::AtomicWord /* EventCallback */ event_callback_;
760 bool dispatching_to_observer_list_;
761 std::vector<EnabledStateObserver*> enabled_state_observer_list_;
763 std::string process_name_;
764 base::hash_map<int, std::string> process_labels_;
765 int process_sort_index_;
766 base::hash_map<int, int> thread_sort_indices_;
767 base::hash_map<int, std::string> thread_names_;
769 // The following two maps are used only when ECHO_TO_CONSOLE.
770 base::hash_map<int, std::stack<TimeTicks> > thread_event_start_times_;
771 base::hash_map<std::string, int> thread_colors_;
773 TimeTicks buffer_limit_reached_timestamp_;
775 // XORed with TraceID to make it unlikely to collide with other processes.
776 unsigned long long process_id_hash_;
778 int process_id_;
780 TimeDelta time_offset_;
782 // Allow tests to wake up when certain events occur.
783 WatchEventCallback watch_event_callback_;
784 subtle::AtomicWord /* const unsigned char* */ watch_category_;
785 std::string watch_event_name_;
787 subtle::AtomicWord /* Options */ trace_options_;
789 // Sampling thread handles.
790 scoped_ptr<TraceSamplingThread> sampling_thread_;
791 PlatformThreadHandle sampling_thread_handle_;
793 CategoryFilter category_filter_;
794 CategoryFilter event_callback_category_filter_;
796 ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
797 ThreadLocalBoolean thread_blocks_message_loop_;
798 ThreadLocalBoolean thread_is_in_trace_event_;
800 // Contains the message loops of threads that have had at least one event
801 // added into the local event buffer. Not using MessageLoopProxy because we
802 // need to know the life time of the message loops.
803 hash_set<MessageLoop*> thread_message_loops_;
805 // For events which can't be added into the thread local buffer, e.g. events
806 // from threads without a message loop.
807 scoped_ptr<TraceBufferChunk> thread_shared_chunk_;
808 size_t thread_shared_chunk_index_;
810 // Set when asynchronous Flush is in progress.
811 OutputCallback flush_output_callback_;
812 scoped_refptr<MessageLoopProxy> flush_message_loop_proxy_;
813 subtle::AtomicWord generation_;
815 DISALLOW_COPY_AND_ASSIGN(TraceLog);
818 } // namespace debug
819 } // namespace base
821 #endif // BASE_DEBUG_TRACE_EVENT_IMPL_H_