Add method to get filters for a bookmark in Enhanced bookmark bridge
[chromium-blink-merge.git] / base / debug / trace_event_impl.h
blob6075e2dee78698a897ed2e5485d20a59de721edf
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
6 #ifndef BASE_DEBUG_TRACE_EVENT_IMPL_H_
7 #define BASE_DEBUG_TRACE_EVENT_IMPL_H_
9 #include <stack>
10 #include <string>
11 #include <vector>
13 #include "base/atomicops.h"
14 #include "base/base_export.h"
15 #include "base/callback.h"
16 #include "base/containers/hash_tables.h"
17 #include "base/gtest_prod_util.h"
18 #include "base/memory/ref_counted_memory.h"
19 #include "base/memory/scoped_vector.h"
20 #include "base/observer_list.h"
21 #include "base/strings/string_util.h"
22 #include "base/synchronization/condition_variable.h"
23 #include "base/synchronization/lock.h"
24 #include "base/threading/thread.h"
25 #include "base/threading/thread_local.h"
27 // Older style trace macros with explicit id and extra data
28 // Only these macros result in publishing data to ETW as currently implemented.
29 #define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
30 base::debug::TraceLog::AddTraceEventEtw( \
31 TRACE_EVENT_PHASE_BEGIN, \
32 name, reinterpret_cast<const void*>(id), extra)
34 #define TRACE_EVENT_END_ETW(name, id, extra) \
35 base::debug::TraceLog::AddTraceEventEtw( \
36 TRACE_EVENT_PHASE_END, \
37 name, reinterpret_cast<const void*>(id), extra)
39 #define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
40 base::debug::TraceLog::AddTraceEventEtw( \
41 TRACE_EVENT_PHASE_INSTANT, \
42 name, reinterpret_cast<const void*>(id), extra)
44 template <typename Type>
45 struct DefaultSingletonTraits;
47 namespace base {
49 class WaitableEvent;
50 class MessageLoop;
52 namespace debug {
54 // For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
55 // class must implement this interface.
56 class BASE_EXPORT ConvertableToTraceFormat
57 : public RefCounted<ConvertableToTraceFormat> {
58 public:
59 // Append the class info to the provided |out| string. The appended
60 // data must be a valid JSON object. Strings must be properly quoted, and
61 // escaped. There is no processing applied to the content after it is
62 // appended.
63 virtual void AppendAsTraceFormat(std::string* out) const = 0;
65 std::string ToString() const {
66 std::string result;
67 AppendAsTraceFormat(&result);
68 return result;
71 protected:
72 virtual ~ConvertableToTraceFormat() {}
74 private:
75 friend class RefCounted<ConvertableToTraceFormat>;
78 struct TraceEventHandle {
79 uint32 chunk_seq;
80 uint16 chunk_index;
81 uint16 event_index;
84 const int kTraceMaxNumArgs = 2;
86 class BASE_EXPORT TraceEvent {
87 public:
88 union TraceValue {
89 bool as_bool;
90 unsigned long long as_uint;
91 long long as_int;
92 double as_double;
93 const void* as_pointer;
94 const char* as_string;
97 TraceEvent();
98 ~TraceEvent();
100 // We don't need to copy TraceEvent except when TraceEventBuffer is cloned.
101 // Use explicit copy method to avoid accidentally misuse of copy.
102 void CopyFrom(const TraceEvent& other);
104 void Initialize(
105 int thread_id,
106 TimeTicks timestamp,
107 TimeTicks thread_timestamp,
108 char phase,
109 const unsigned char* category_group_enabled,
110 const char* name,
111 unsigned long long id,
112 int num_args,
113 const char** arg_names,
114 const unsigned char* arg_types,
115 const unsigned long long* arg_values,
116 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
117 unsigned char flags);
119 void Reset();
121 void UpdateDuration(const TimeTicks& now, const TimeTicks& thread_now);
123 // Serialize event data to JSON
124 void AppendAsJSON(std::string* out) const;
125 void AppendPrettyPrinted(std::ostringstream* out) const;
127 static void AppendValueAsJSON(unsigned char type,
128 TraceValue value,
129 std::string* out);
131 TimeTicks timestamp() const { return timestamp_; }
132 TimeTicks thread_timestamp() const { return thread_timestamp_; }
133 char phase() const { return phase_; }
134 int thread_id() const { return thread_id_; }
135 TimeDelta duration() const { return duration_; }
136 TimeDelta thread_duration() const { return thread_duration_; }
137 unsigned long long id() const { return id_; }
138 unsigned char flags() const { return flags_; }
140 // Exposed for unittesting:
142 const base::RefCountedString* parameter_copy_storage() const {
143 return parameter_copy_storage_.get();
146 const unsigned char* category_group_enabled() const {
147 return category_group_enabled_;
150 const char* name() const { return name_; }
152 #if defined(OS_ANDROID)
153 void SendToATrace();
154 #endif
156 private:
157 // Note: these are ordered by size (largest first) for optimal packing.
158 TimeTicks timestamp_;
159 TimeTicks thread_timestamp_;
160 TimeDelta duration_;
161 TimeDelta thread_duration_;
162 // id_ can be used to store phase-specific data.
163 unsigned long long id_;
164 TraceValue arg_values_[kTraceMaxNumArgs];
165 const char* arg_names_[kTraceMaxNumArgs];
166 scoped_refptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs];
167 const unsigned char* category_group_enabled_;
168 const char* name_;
169 scoped_refptr<base::RefCountedString> parameter_copy_storage_;
170 int thread_id_;
171 char phase_;
172 unsigned char flags_;
173 unsigned char arg_types_[kTraceMaxNumArgs];
175 DISALLOW_COPY_AND_ASSIGN(TraceEvent);
178 // TraceBufferChunk is the basic unit of TraceBuffer.
179 class BASE_EXPORT TraceBufferChunk {
180 public:
181 explicit TraceBufferChunk(uint32 seq)
182 : next_free_(0),
183 seq_(seq) {
186 void Reset(uint32 new_seq);
187 TraceEvent* AddTraceEvent(size_t* event_index);
188 bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
190 uint32 seq() const { return seq_; }
191 size_t capacity() const { return kTraceBufferChunkSize; }
192 size_t size() const { return next_free_; }
194 TraceEvent* GetEventAt(size_t index) {
195 DCHECK(index < size());
196 return &chunk_[index];
198 const TraceEvent* GetEventAt(size_t index) const {
199 DCHECK(index < size());
200 return &chunk_[index];
203 scoped_ptr<TraceBufferChunk> Clone() const;
205 static const size_t kTraceBufferChunkSize = 64;
207 private:
208 size_t next_free_;
209 TraceEvent chunk_[kTraceBufferChunkSize];
210 uint32 seq_;
213 // TraceBuffer holds the events as they are collected.
214 class BASE_EXPORT TraceBuffer {
215 public:
216 virtual ~TraceBuffer() {}
218 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t *index) = 0;
219 virtual void ReturnChunk(size_t index,
220 scoped_ptr<TraceBufferChunk> chunk) = 0;
222 virtual bool IsFull() const = 0;
223 virtual size_t Size() const = 0;
224 virtual size_t Capacity() const = 0;
225 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
227 // For iteration. Each TraceBuffer can only be iterated once.
228 virtual const TraceBufferChunk* NextChunk() = 0;
230 virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0;
233 // TraceResultBuffer collects and converts trace fragments returned by TraceLog
234 // to JSON output.
235 class BASE_EXPORT TraceResultBuffer {
236 public:
237 typedef base::Callback<void(const std::string&)> OutputCallback;
239 // If you don't need to stream JSON chunks out efficiently, and just want to
240 // get a complete JSON string after calling Finish, use this struct to collect
241 // JSON trace output.
242 struct BASE_EXPORT SimpleOutput {
243 OutputCallback GetCallback();
244 void Append(const std::string& json_string);
246 // Do what you want with the json_output_ string after calling
247 // TraceResultBuffer::Finish.
248 std::string json_output;
251 TraceResultBuffer();
252 ~TraceResultBuffer();
254 // Set callback. The callback will be called during Start with the initial
255 // JSON output and during AddFragment and Finish with following JSON output
256 // chunks. The callback target must live past the last calls to
257 // TraceResultBuffer::Start/AddFragment/Finish.
258 void SetOutputCallback(const OutputCallback& json_chunk_callback);
260 // Start JSON output. This resets all internal state, so you can reuse
261 // the TraceResultBuffer by calling Start.
262 void Start();
264 // Call AddFragment 0 or more times to add trace fragments from TraceLog.
265 void AddFragment(const std::string& trace_fragment);
267 // When all fragments have been added, call Finish to complete the JSON
268 // formatted output.
269 void Finish();
271 private:
272 OutputCallback output_callback_;
273 bool append_comma_;
276 class BASE_EXPORT CategoryFilter {
277 public:
278 typedef std::vector<std::string> StringList;
280 // The default category filter, used when none is provided.
281 // Allows all categories through, except if they end in the suffix 'Debug' or
282 // 'Test'.
283 static const char kDefaultCategoryFilterString[];
285 // |filter_string| is a comma-delimited list of category wildcards.
286 // A category can have an optional '-' prefix to make it an excluded category.
287 // All the same rules apply above, so for example, having both included and
288 // excluded categories in the same list would not be supported.
290 // Example: CategoryFilter"test_MyTest*");
291 // Example: CategoryFilter("test_MyTest*,test_OtherStuff");
292 // Example: CategoryFilter("-excluded_category1,-excluded_category2");
293 // Example: CategoryFilter("-*,webkit"); would disable everything but webkit.
294 // Example: CategoryFilter("-webkit"); would enable everything but webkit.
296 // Category filters can also be used to configure synthetic delays.
298 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16)"); would make swap
299 // buffers always take at least 16 ms.
300 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;oneshot)"); would
301 // make swap buffers take at least 16 ms the first time it is
302 // called.
303 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;alternating)");
304 // would make swap buffers take at least 16 ms every other time it
305 // is called.
306 explicit CategoryFilter(const std::string& filter_string);
308 CategoryFilter();
310 CategoryFilter(const CategoryFilter& cf);
312 ~CategoryFilter();
314 CategoryFilter& operator=(const CategoryFilter& rhs);
316 // Writes the string representation of the CategoryFilter. This is a comma
317 // separated string, similar in nature to the one used to determine
318 // enabled/disabled category patterns, except here there is an arbitrary
319 // order, included categories go first, then excluded categories. Excluded
320 // categories are distinguished from included categories by the prefix '-'.
321 std::string ToString() const;
323 // Determines whether category group would be enabled or
324 // disabled by this category filter.
325 bool IsCategoryGroupEnabled(const char* category_group) const;
327 // Return a list of the synthetic delays specified in this category filter.
328 const StringList& GetSyntheticDelayValues() const;
330 // Merges nested_filter with the current CategoryFilter
331 void Merge(const CategoryFilter& nested_filter);
333 // Clears both included/excluded pattern lists. This would be equivalent to
334 // creating a CategoryFilter with an empty string, through the constructor.
335 // i.e: CategoryFilter().
337 // When using an empty filter, all categories are considered included as we
338 // are not excluding anything.
339 void Clear();
341 private:
342 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, CategoryFilter);
344 static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(
345 const std::string& str);
347 void Initialize(const std::string& filter_string);
348 void WriteString(const StringList& values,
349 std::string* out,
350 bool included) const;
351 void WriteString(const StringList& delays, std::string* out) const;
352 bool HasIncludedPatterns() const;
354 bool DoesCategoryGroupContainCategory(const char* category_group,
355 const char* category) const;
357 StringList included_;
358 StringList disabled_;
359 StringList excluded_;
360 StringList delays_;
363 class TraceSamplingThread;
365 // Options determines how the trace buffer stores data.
366 enum TraceRecordMode {
367 // Record until the trace buffer is full.
368 RECORD_UNTIL_FULL,
370 // Record until the user ends the trace. The trace buffer is a fixed size
371 // and we use it as a ring buffer during recording.
372 RECORD_CONTINUOUSLY,
374 // Echo to console. Events are discarded.
375 ECHO_TO_CONSOLE,
377 // Record until the trace buffer is full, but with a huge buffer size.
378 RECORD_AS_MUCH_AS_POSSIBLE
381 struct BASE_EXPORT TraceOptions {
382 TraceOptions()
383 : record_mode(RECORD_UNTIL_FULL),
384 enable_sampling(false),
385 enable_systrace(false) {}
387 explicit TraceOptions(TraceRecordMode record_mode)
388 : record_mode(record_mode),
389 enable_sampling(false),
390 enable_systrace(false) {}
392 // |options_string| is a comma-delimited list of trace options.
393 // Possible options are: "record-until-full", "record-continuously",
394 // "trace-to-console", "enable-sampling" and "enable-systrace".
395 // The first 3 options are trace recoding modes and hence
396 // mutually exclusive. If more than one trace recording modes appear in the
397 // options_string, the last one takes precedence. If none of the trace
398 // recording mode is specified, recording mode is RECORD_UNTIL_FULL.
400 // The trace option will first be reset to the default option
401 // (record_mode set to RECORD_UNTIL_FULL, enable_sampling and enable_systrace
402 // set to false) before options parsed from |options_string| are applied on
403 // it.
404 // If |options_string| is invalid, the final state of trace_options is
405 // undefined.
407 // Example: trace_options.SetFromString("record-until-full")
408 // Example: trace_options.SetFromString(
409 // "record-continuously, enable-sampling")
410 // Example: trace_options.SetFromString("record-until-full, trace-to-console")
411 // will set ECHO_TO_CONSOLE as the recording mode.
413 // Returns true on success.
414 bool SetFromString(const std::string& options_string);
416 std::string ToString() const;
418 TraceRecordMode record_mode;
419 bool enable_sampling;
420 bool enable_systrace;
423 class BASE_EXPORT TraceLog {
424 public:
425 enum Mode {
426 DISABLED = 0,
427 RECORDING_MODE,
428 MONITORING_MODE,
431 // The pointer returned from GetCategoryGroupEnabledInternal() points to a
432 // value with zero or more of the following bits. Used in this class only.
433 // The TRACE_EVENT macros should only use the value as a bool.
434 // These values must be in sync with macro values in TraceEvent.h in Blink.
435 enum CategoryGroupEnabledFlags {
436 // Category group enabled for the recording mode.
437 ENABLED_FOR_RECORDING = 1 << 0,
438 // Category group enabled for the monitoring mode.
439 ENABLED_FOR_MONITORING = 1 << 1,
440 // Category group enabled by SetEventCallbackEnabled().
441 ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
444 static TraceLog* GetInstance();
446 // Get set of known category groups. This can change as new code paths are
447 // reached. The known category groups are inserted into |category_groups|.
448 void GetKnownCategoryGroups(std::vector<std::string>* category_groups);
450 // Retrieves a copy (for thread-safety) of the current CategoryFilter.
451 CategoryFilter GetCurrentCategoryFilter();
453 // Retrieves a copy (for thread-safety) of the current TraceOptions.
454 TraceOptions GetCurrentTraceOptions() const;
456 // Enables normal tracing (recording trace events in the trace buffer).
457 // See CategoryFilter comments for details on how to control what categories
458 // will be traced. If tracing has already been enabled, |category_filter| will
459 // be merged into the current category filter.
460 void SetEnabled(const CategoryFilter& category_filter,
461 Mode mode, const TraceOptions& options);
463 // Disables normal tracing for all categories.
464 void SetDisabled();
466 bool IsEnabled() { return mode_ != DISABLED; }
468 // The number of times we have begun recording traces. If tracing is off,
469 // returns -1. If tracing is on, then it returns the number of times we have
470 // recorded a trace. By watching for this number to increment, you can
471 // passively discover when a new trace has begun. This is then used to
472 // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
473 int GetNumTracesRecorded();
475 #if defined(OS_ANDROID)
476 void StartATrace();
477 void StopATrace();
478 void AddClockSyncMetadataEvent();
479 #endif
481 // Enabled state listeners give a callback when tracing is enabled or
482 // disabled. This can be used to tie into other library's tracing systems
483 // on-demand.
484 class BASE_EXPORT EnabledStateObserver {
485 public:
486 // Called just after the tracing system becomes enabled, outside of the
487 // |lock_|. TraceLog::IsEnabled() is true at this point.
488 virtual void OnTraceLogEnabled() = 0;
490 // Called just after the tracing system disables, outside of the |lock_|.
491 // TraceLog::IsEnabled() is false at this point.
492 virtual void OnTraceLogDisabled() = 0;
494 void AddEnabledStateObserver(EnabledStateObserver* listener);
495 void RemoveEnabledStateObserver(EnabledStateObserver* listener);
496 bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
498 float GetBufferPercentFull() const;
499 bool BufferIsFull() const;
501 // Not using base::Callback because of its limited by 7 parameters.
502 // Also, using primitive type allows directly passing callback from WebCore.
503 // WARNING: It is possible for the previously set callback to be called
504 // after a call to SetEventCallbackEnabled() that replaces or a call to
505 // SetEventCallbackDisabled() that disables the callback.
506 // This callback may be invoked on any thread.
507 // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
508 // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
509 // interface simple.
510 typedef void (*EventCallback)(TimeTicks timestamp,
511 char phase,
512 const unsigned char* category_group_enabled,
513 const char* name,
514 unsigned long long id,
515 int num_args,
516 const char* const arg_names[],
517 const unsigned char arg_types[],
518 const unsigned long long arg_values[],
519 unsigned char flags);
521 // Enable tracing for EventCallback.
522 void SetEventCallbackEnabled(const CategoryFilter& category_filter,
523 EventCallback cb);
524 void SetEventCallbackDisabled();
526 // Flush all collected events to the given output callback. The callback will
527 // be called one or more times either synchronously or asynchronously from
528 // the current thread with IPC-bite-size chunks. The string format is
529 // undefined. Use TraceResultBuffer to convert one or more trace strings to
530 // JSON. The callback can be null if the caller doesn't want any data.
531 // Due to the implementation of thread-local buffers, flush can't be
532 // done when tracing is enabled. If called when tracing is enabled, the
533 // callback will be called directly with (empty_string, false) to indicate
534 // the end of this unsuccessful flush.
535 typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
536 bool has_more_events)> OutputCallback;
537 void Flush(const OutputCallback& cb);
538 void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback);
540 // Called by TRACE_EVENT* macros, don't call this directly.
541 // The name parameter is a category group for example:
542 // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
543 static const unsigned char* GetCategoryGroupEnabled(const char* name);
544 static const char* GetCategoryGroupName(
545 const unsigned char* category_group_enabled);
547 // Called by TRACE_EVENT* macros, don't call this directly.
548 // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
549 // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
550 TraceEventHandle AddTraceEvent(
551 char phase,
552 const unsigned char* category_group_enabled,
553 const char* name,
554 unsigned long long id,
555 int num_args,
556 const char** arg_names,
557 const unsigned char* arg_types,
558 const unsigned long long* arg_values,
559 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
560 unsigned char flags);
561 TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
562 char phase,
563 const unsigned char* category_group_enabled,
564 const char* name,
565 unsigned long long id,
566 int thread_id,
567 const TimeTicks& timestamp,
568 int num_args,
569 const char** arg_names,
570 const unsigned char* arg_types,
571 const unsigned long long* arg_values,
572 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
573 unsigned char flags);
574 static void AddTraceEventEtw(char phase,
575 const char* category_group,
576 const void* id,
577 const char* extra);
578 static void AddTraceEventEtw(char phase,
579 const char* category_group,
580 const void* id,
581 const std::string& extra);
583 void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
584 const char* name,
585 TraceEventHandle handle);
587 // For every matching event, the callback will be called.
588 typedef base::Callback<void()> WatchEventCallback;
589 void SetWatchEvent(const std::string& category_name,
590 const std::string& event_name,
591 const WatchEventCallback& callback);
592 // Cancel the watch event. If tracing is enabled, this may race with the
593 // watch event notification firing.
594 void CancelWatchEvent();
596 int process_id() const { return process_id_; }
598 // Exposed for unittesting:
600 void WaitSamplingEventForTesting();
602 // Allows deleting our singleton instance.
603 static void DeleteForTesting();
605 // Allow tests to inspect TraceEvents.
606 size_t GetEventsSize() const { return logged_events_->Size(); }
607 TraceEvent* GetEventByHandle(TraceEventHandle handle);
609 void SetProcessID(int process_id);
611 // Process sort indices, if set, override the order of a process will appear
612 // relative to other processes in the trace viewer. Processes are sorted first
613 // on their sort index, ascending, then by their name, and then tid.
614 void SetProcessSortIndex(int sort_index);
616 // Sets the name of the process.
617 void SetProcessName(const std::string& process_name);
619 // Processes can have labels in addition to their names. Use labels, for
620 // instance, to list out the web page titles that a process is handling.
621 void UpdateProcessLabel(int label_id, const std::string& current_label);
622 void RemoveProcessLabel(int label_id);
624 // Thread sort indices, if set, override the order of a thread will appear
625 // within its process in the trace viewer. Threads are sorted first on their
626 // sort index, ascending, then by their name, and then tid.
627 void SetThreadSortIndex(PlatformThreadId , int sort_index);
629 // Allow setting an offset between the current TimeTicks time and the time
630 // that should be reported.
631 void SetTimeOffset(TimeDelta offset);
633 size_t GetObserverCountForTest() const;
635 // Call this method if the current thread may block the message loop to
636 // prevent the thread from using the thread-local buffer because the thread
637 // may not handle the flush request in time causing lost of unflushed events.
638 void SetCurrentThreadBlocksMessageLoop();
640 private:
641 typedef unsigned int InternalTraceOptions;
643 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
644 TraceBufferRingBufferGetReturnChunk);
645 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
646 TraceBufferRingBufferHalfIteration);
647 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
648 TraceBufferRingBufferFullIteration);
649 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
650 TraceBufferVectorReportFull);
651 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
652 ConvertTraceOptionsToInternalOptions);
653 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
654 TraceRecordAsMuchAsPossibleMode);
656 // This allows constructor and destructor to be private and usable only
657 // by the Singleton class.
658 friend struct DefaultSingletonTraits<TraceLog>;
660 // Enable/disable each category group based on the current mode_,
661 // category_filter_, event_callback_ and event_callback_category_filter_.
662 // Enable the category group in the enabled mode if category_filter_ matches
663 // the category group, or event_callback_ is not null and
664 // event_callback_category_filter_ matches the category group.
665 void UpdateCategoryGroupEnabledFlags();
666 void UpdateCategoryGroupEnabledFlag(size_t category_index);
668 // Configure synthetic delays based on the values set in the current
669 // category filter.
670 void UpdateSyntheticDelaysFromCategoryFilter();
672 InternalTraceOptions GetInternalOptionsFromTraceOptions(
673 const TraceOptions& options);
675 class ThreadLocalEventBuffer;
676 class OptionalAutoLock;
678 TraceLog();
679 ~TraceLog();
680 const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
681 void AddMetadataEventsWhileLocked();
683 InternalTraceOptions trace_options() const {
684 return static_cast<InternalTraceOptions>(
685 subtle::NoBarrier_Load(&trace_options_));
688 TraceBuffer* trace_buffer() const { return logged_events_.get(); }
689 TraceBuffer* CreateTraceBuffer();
690 TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks);
692 std::string EventToConsoleMessage(unsigned char phase,
693 const TimeTicks& timestamp,
694 TraceEvent* trace_event);
696 TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
697 bool check_buffer_is_full);
698 void CheckIfBufferIsFullWhileLocked();
699 void SetDisabledWhileLocked();
701 TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
702 OptionalAutoLock* lock);
704 // |generation| is used in the following callbacks to check if the callback
705 // is called for the flush of the current |logged_events_|.
706 void FlushCurrentThread(int generation);
707 void ConvertTraceEventsToTraceFormat(scoped_ptr<TraceBuffer> logged_events,
708 const TraceLog::OutputCallback& flush_output_callback);
709 void FinishFlush(int generation);
710 void OnFlushTimeout(int generation);
712 int generation() const {
713 return static_cast<int>(subtle::NoBarrier_Load(&generation_));
715 bool CheckGeneration(int generation) const {
716 return generation == this->generation();
718 void UseNextTraceBuffer();
720 TimeTicks OffsetNow() const {
721 return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime());
723 TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const {
724 return timestamp - time_offset_;
727 // Internal representation of trace options since we store the currently used
728 // trace option as an AtomicWord.
729 static const InternalTraceOptions kInternalNone;
730 static const InternalTraceOptions kInternalRecordUntilFull;
731 static const InternalTraceOptions kInternalRecordContinuously;
732 static const InternalTraceOptions kInternalEchoToConsole;
733 static const InternalTraceOptions kInternalEnableSampling;
734 static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
736 // This lock protects TraceLog member accesses (except for members protected
737 // by thread_info_lock_) from arbitrary threads.
738 mutable Lock lock_;
739 // This lock protects accesses to thread_names_, thread_event_start_times_
740 // and thread_colors_.
741 Lock thread_info_lock_;
742 int locked_line_;
743 Mode mode_;
744 int num_traces_recorded_;
745 scoped_ptr<TraceBuffer> logged_events_;
746 subtle::AtomicWord /* EventCallback */ event_callback_;
747 bool dispatching_to_observer_list_;
748 std::vector<EnabledStateObserver*> enabled_state_observer_list_;
750 std::string process_name_;
751 base::hash_map<int, std::string> process_labels_;
752 int process_sort_index_;
753 base::hash_map<int, int> thread_sort_indices_;
754 base::hash_map<int, std::string> thread_names_;
756 // The following two maps are used only when ECHO_TO_CONSOLE.
757 base::hash_map<int, std::stack<TimeTicks> > thread_event_start_times_;
758 base::hash_map<std::string, int> thread_colors_;
760 TimeTicks buffer_limit_reached_timestamp_;
762 // XORed with TraceID to make it unlikely to collide with other processes.
763 unsigned long long process_id_hash_;
765 int process_id_;
767 TimeDelta time_offset_;
769 // Allow tests to wake up when certain events occur.
770 WatchEventCallback watch_event_callback_;
771 subtle::AtomicWord /* const unsigned char* */ watch_category_;
772 std::string watch_event_name_;
774 subtle::AtomicWord /* Options */ trace_options_;
776 // Sampling thread handles.
777 scoped_ptr<TraceSamplingThread> sampling_thread_;
778 PlatformThreadHandle sampling_thread_handle_;
780 CategoryFilter category_filter_;
781 CategoryFilter event_callback_category_filter_;
783 ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
784 ThreadLocalBoolean thread_blocks_message_loop_;
785 ThreadLocalBoolean thread_is_in_trace_event_;
787 // Contains the message loops of threads that have had at least one event
788 // added into the local event buffer. Not using MessageLoopProxy because we
789 // need to know the life time of the message loops.
790 hash_set<MessageLoop*> thread_message_loops_;
792 // For events which can't be added into the thread local buffer, e.g. events
793 // from threads without a message loop.
794 scoped_ptr<TraceBufferChunk> thread_shared_chunk_;
795 size_t thread_shared_chunk_index_;
797 // Set when asynchronous Flush is in progress.
798 OutputCallback flush_output_callback_;
799 scoped_refptr<MessageLoopProxy> flush_message_loop_proxy_;
800 subtle::AtomicWord generation_;
802 DISALLOW_COPY_AND_ASSIGN(TraceLog);
805 } // namespace debug
806 } // namespace base
808 #endif // BASE_DEBUG_TRACE_EVENT_IMPL_H_