Add missing pnacl libraries and headers and tools.
[chromium-blink-merge.git] / base / debug / trace_event_impl.cc
blob6e3516e2c2bface6552466a6828ff201a8a97bee
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/debug/trace_event_impl.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/debug/leak_annotations.h"
11 #include "base/debug/trace_event.h"
12 #include "base/format_macros.h"
13 #include "base/lazy_instance.h"
14 #include "base/memory/singleton.h"
15 #include "base/process_util.h"
16 #include "base/stl_util.h"
17 #include "base/string_util.h"
18 #include "base/stringprintf.h"
19 #include "base/strings/string_split.h"
20 #include "base/strings/string_tokenizer.h"
21 #include "base/synchronization/cancellation_flag.h"
22 #include "base/synchronization/waitable_event.h"
23 #include "base/sys_info.h"
24 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
25 #include "base/threading/platform_thread.h"
26 #include "base/threading/thread_id_name_manager.h"
27 #include "base/threading/thread_local.h"
28 #include "base/time.h"
29 #include "base/utf_string_conversions.h"
31 #if defined(OS_WIN)
32 #include "base/debug/trace_event_win.h"
33 #endif
35 class DeleteTraceLogForTesting {
36 public:
37 static void Delete() {
38 Singleton<base::debug::TraceLog,
39 StaticMemorySingletonTraits<base::debug::TraceLog> >::OnExit(0);
43 // Not supported in split-dll build. http://crbug.com/237249
44 #if !defined(CHROME_SPLIT_DLL)
45 // The thread buckets for the sampling profiler.
46 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state0;
47 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state1;
48 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state2;
49 #endif
51 namespace base {
52 namespace debug {
54 // Controls the number of trace events we will buffer in-memory
55 // before throwing them away.
56 const size_t kTraceEventBufferSize = 500000;
57 const size_t kTraceEventBatchSize = 1000;
58 const size_t kTraceEventInitialBufferSize = 1024;
60 #define MAX_CATEGORY_GROUPS 100
62 namespace {
64 // Parallel arrays g_category_groups and g_category_group_enabled are separate
65 // so that a pointer to a member of g_category_group_enabled can be easily
66 // converted to an index into g_category_groups. This allows macros to deal
67 // only with char enabled pointers from g_category_group_enabled, and we can
68 // convert internally to determine the category name from the char enabled
69 // pointer.
70 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
71 "tracing already shutdown",
72 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
73 "__metadata",
76 // The enabled flag is char instead of bool so that the API can be used from C.
77 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
78 const int g_category_already_shutdown = 0;
79 const int g_category_categories_exhausted = 1;
80 const int g_category_metadata = 2;
81 const int g_num_builtin_categories = 3;
82 int g_category_index = g_num_builtin_categories; // Skip default categories.
84 // The name of the current thread. This is used to decide if the current
85 // thread name has changed. We combine all the seen thread names into the
86 // output name for the thread.
87 LazyInstance<ThreadLocalPointer<const char> >::Leaky
88 g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
90 const char kRecordUntilFull[] = "record-until-full";
91 const char kRecordContinuously[] = "record-continuously";
93 size_t NextIndex(size_t index) {
94 index++;
95 if (index >= kTraceEventBufferSize)
96 index = 0;
97 return index;
100 } // namespace
102 class TraceBufferRingBuffer : public TraceBuffer {
103 public:
104 TraceBufferRingBuffer()
105 : unused_event_index_(0),
106 oldest_event_index_(0) {
107 logged_events_.reserve(kTraceEventInitialBufferSize);
110 virtual ~TraceBufferRingBuffer() {}
112 virtual void AddEvent(const TraceEvent& event) OVERRIDE {
113 if (unused_event_index_ < Size())
114 logged_events_[unused_event_index_] = event;
115 else
116 logged_events_.push_back(event);
118 unused_event_index_ = NextIndex(unused_event_index_);
119 if (unused_event_index_ == oldest_event_index_) {
120 oldest_event_index_ = NextIndex(oldest_event_index_);
124 virtual bool HasMoreEvents() const OVERRIDE {
125 return oldest_event_index_ != unused_event_index_;
128 virtual const TraceEvent& NextEvent() OVERRIDE {
129 DCHECK(HasMoreEvents());
131 size_t next = oldest_event_index_;
132 oldest_event_index_ = NextIndex(oldest_event_index_);
133 return GetEventAt(next);
136 virtual bool IsFull() const OVERRIDE {
137 return false;
140 virtual size_t CountEnabledByName(
141 const unsigned char* category,
142 const std::string& event_name) const OVERRIDE {
143 size_t notify_count = 0;
144 size_t index = oldest_event_index_;
145 while (index != unused_event_index_) {
146 const TraceEvent& event = GetEventAt(index);
147 if (category == event.category_group_enabled() &&
148 strcmp(event_name.c_str(), event.name()) == 0) {
149 ++notify_count;
151 index = NextIndex(index);
153 return notify_count;
156 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
157 DCHECK(index < logged_events_.size());
158 return logged_events_[index];
161 virtual size_t Size() const OVERRIDE {
162 return logged_events_.size();
165 private:
166 size_t unused_event_index_;
167 size_t oldest_event_index_;
168 std::vector<TraceEvent> logged_events_;
170 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
173 class TraceBufferVector : public TraceBuffer {
174 public:
175 TraceBufferVector() : current_iteration_index_(0) {
176 logged_events_.reserve(kTraceEventInitialBufferSize);
179 virtual ~TraceBufferVector() {
182 virtual void AddEvent(const TraceEvent& event) OVERRIDE {
183 // Note, we have two callers which need to be handled. The first is
184 // AddTraceEventWithThreadIdAndTimestamp() which checks Size() and does an
185 // early exit if full. The second is AddThreadNameMetadataEvents().
186 // We can not DECHECK(!IsFull()) because we have to add the metadata
187 // events even if the buffer is full.
188 logged_events_.push_back(event);
191 virtual bool HasMoreEvents() const OVERRIDE {
192 return current_iteration_index_ < Size();
195 virtual const TraceEvent& NextEvent() OVERRIDE {
196 DCHECK(HasMoreEvents());
197 return GetEventAt(current_iteration_index_++);
200 virtual bool IsFull() const OVERRIDE {
201 return Size() >= kTraceEventBufferSize;
204 virtual size_t CountEnabledByName(
205 const unsigned char* category,
206 const std::string& event_name) const OVERRIDE {
207 size_t notify_count = 0;
208 for (size_t i = 0; i < Size(); i++) {
209 const TraceEvent& event = GetEventAt(i);
210 if (category == event.category_group_enabled() &&
211 strcmp(event_name.c_str(), event.name()) == 0) {
212 ++notify_count;
215 return notify_count;
218 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
219 DCHECK(index < logged_events_.size());
220 return logged_events_[index];
223 virtual size_t Size() const OVERRIDE {
224 return logged_events_.size();
227 private:
228 size_t current_iteration_index_;
229 std::vector<TraceEvent> logged_events_;
231 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
234 class TraceBufferDiscardsEvents : public TraceBuffer {
235 public:
236 virtual ~TraceBufferDiscardsEvents() { }
238 virtual void AddEvent(const TraceEvent& event) OVERRIDE {}
239 virtual bool HasMoreEvents() const OVERRIDE { return false; }
241 virtual const TraceEvent& NextEvent() OVERRIDE {
242 NOTREACHED();
243 return *static_cast<TraceEvent*>(NULL);
246 virtual bool IsFull() const OVERRIDE { return false; }
248 virtual size_t CountEnabledByName(
249 const unsigned char* category,
250 const std::string& event_name) const OVERRIDE {
251 return 0;
254 virtual size_t Size() const OVERRIDE { return 0; }
256 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
257 NOTREACHED();
258 return *static_cast<TraceEvent*>(NULL);
262 ////////////////////////////////////////////////////////////////////////////////
264 // TraceEvent
266 ////////////////////////////////////////////////////////////////////////////////
268 namespace {
270 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
272 // Copies |*member| into |*buffer|, sets |*member| to point to this new
273 // location, and then advances |*buffer| by the amount written.
274 void CopyTraceEventParameter(char** buffer,
275 const char** member,
276 const char* end) {
277 if (*member) {
278 size_t written = strlcpy(*buffer, *member, end - *buffer) + 1;
279 DCHECK_LE(static_cast<int>(written), end - *buffer);
280 *member = *buffer;
281 *buffer += written;
285 } // namespace
287 TraceEvent::TraceEvent()
288 : id_(0u),
289 category_group_enabled_(NULL),
290 name_(NULL),
291 thread_id_(0),
292 phase_(TRACE_EVENT_PHASE_BEGIN),
293 flags_(0) {
294 arg_names_[0] = NULL;
295 arg_names_[1] = NULL;
296 memset(arg_values_, 0, sizeof(arg_values_));
299 TraceEvent::TraceEvent(
300 int thread_id,
301 TimeTicks timestamp,
302 char phase,
303 const unsigned char* category_group_enabled,
304 const char* name,
305 unsigned long long id,
306 int num_args,
307 const char** arg_names,
308 const unsigned char* arg_types,
309 const unsigned long long* arg_values,
310 scoped_ptr<ConvertableToTraceFormat> convertable_values[],
311 unsigned char flags)
312 : timestamp_(timestamp),
313 id_(id),
314 category_group_enabled_(category_group_enabled),
315 name_(name),
316 thread_id_(thread_id),
317 phase_(phase),
318 flags_(flags) {
319 // Clamp num_args since it may have been set by a third_party library.
320 num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
321 int i = 0;
322 for (; i < num_args; ++i) {
323 arg_names_[i] = arg_names[i];
324 arg_types_[i] = arg_types[i];
326 if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
327 convertable_values_[i].reset(convertable_values[i].release());
328 else
329 arg_values_[i].as_uint = arg_values[i];
331 for (; i < kTraceMaxNumArgs; ++i) {
332 arg_names_[i] = NULL;
333 arg_values_[i].as_uint = 0u;
334 convertable_values_[i].reset();
335 arg_types_[i] = TRACE_VALUE_TYPE_UINT;
338 bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
339 size_t alloc_size = 0;
340 if (copy) {
341 alloc_size += GetAllocLength(name);
342 for (i = 0; i < num_args; ++i) {
343 alloc_size += GetAllocLength(arg_names_[i]);
344 if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
345 arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING;
349 bool arg_is_copy[kTraceMaxNumArgs];
350 for (i = 0; i < num_args; ++i) {
351 // No copying of convertable types, we retain ownership.
352 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
353 continue;
355 // We only take a copy of arg_vals if they are of type COPY_STRING.
356 arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING);
357 if (arg_is_copy[i])
358 alloc_size += GetAllocLength(arg_values_[i].as_string);
361 if (alloc_size) {
362 parameter_copy_storage_ = new RefCountedString;
363 parameter_copy_storage_->data().resize(alloc_size);
364 char* ptr = string_as_array(&parameter_copy_storage_->data());
365 const char* end = ptr + alloc_size;
366 if (copy) {
367 CopyTraceEventParameter(&ptr, &name_, end);
368 for (i = 0; i < num_args; ++i) {
369 CopyTraceEventParameter(&ptr, &arg_names_[i], end);
372 for (i = 0; i < num_args; ++i) {
373 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
374 continue;
375 if (arg_is_copy[i])
376 CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end);
378 DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end;
382 TraceEvent::TraceEvent(const TraceEvent& other)
383 : timestamp_(other.timestamp_),
384 id_(other.id_),
385 category_group_enabled_(other.category_group_enabled_),
386 name_(other.name_),
387 thread_id_(other.thread_id_),
388 phase_(other.phase_),
389 flags_(other.flags_) {
390 parameter_copy_storage_ = other.parameter_copy_storage_;
392 for (int i = 0; i < kTraceMaxNumArgs; ++i) {
393 arg_values_[i] = other.arg_values_[i];
394 arg_names_[i] = other.arg_names_[i];
395 arg_types_[i] = other.arg_types_[i];
397 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
398 convertable_values_[i].reset(
399 const_cast<TraceEvent*>(&other)->convertable_values_[i].release());
400 } else {
401 convertable_values_[i].reset();
406 TraceEvent& TraceEvent::operator=(const TraceEvent& other) {
407 if (this == &other)
408 return *this;
410 timestamp_ = other.timestamp_;
411 id_ = other.id_;
412 category_group_enabled_ = other.category_group_enabled_;
413 name_ = other.name_;
414 parameter_copy_storage_ = other.parameter_copy_storage_;
415 thread_id_ = other.thread_id_;
416 phase_ = other.phase_;
417 flags_ = other.flags_;
419 for (int i = 0; i < kTraceMaxNumArgs; ++i) {
420 arg_values_[i] = other.arg_values_[i];
421 arg_names_[i] = other.arg_names_[i];
422 arg_types_[i] = other.arg_types_[i];
424 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
425 convertable_values_[i].reset(
426 const_cast<TraceEvent*>(&other)->convertable_values_[i].release());
427 } else {
428 convertable_values_[i].reset();
431 return *this;
434 TraceEvent::~TraceEvent() {
437 // static
438 void TraceEvent::AppendValueAsJSON(unsigned char type,
439 TraceEvent::TraceValue value,
440 std::string* out) {
441 std::string::size_type start_pos;
442 switch (type) {
443 case TRACE_VALUE_TYPE_BOOL:
444 *out += value.as_bool ? "true" : "false";
445 break;
446 case TRACE_VALUE_TYPE_UINT:
447 StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint));
448 break;
449 case TRACE_VALUE_TYPE_INT:
450 StringAppendF(out, "%" PRId64, static_cast<int64>(value.as_int));
451 break;
452 case TRACE_VALUE_TYPE_DOUBLE:
453 StringAppendF(out, "%f", value.as_double);
454 break;
455 case TRACE_VALUE_TYPE_POINTER:
456 // JSON only supports double and int numbers.
457 // So as not to lose bits from a 64-bit pointer, output as a hex string.
458 StringAppendF(out, "\"0x%" PRIx64 "\"", static_cast<uint64>(
459 reinterpret_cast<intptr_t>(
460 value.as_pointer)));
461 break;
462 case TRACE_VALUE_TYPE_STRING:
463 case TRACE_VALUE_TYPE_COPY_STRING:
464 *out += "\"";
465 start_pos = out->size();
466 *out += value.as_string ? value.as_string : "NULL";
467 // insert backslash before special characters for proper json format.
468 while ((start_pos = out->find_first_of("\\\"", start_pos)) !=
469 std::string::npos) {
470 out->insert(start_pos, 1, '\\');
471 // skip inserted escape character and following character.
472 start_pos += 2;
474 *out += "\"";
475 break;
476 default:
477 NOTREACHED() << "Don't know how to print this value";
478 break;
482 void TraceEvent::AppendAsJSON(std::string* out) const {
483 int64 time_int64 = timestamp_.ToInternalValue();
484 int process_id = TraceLog::GetInstance()->process_id();
485 // Category group checked at category creation time.
486 DCHECK(!strchr(name_, '"'));
487 StringAppendF(out,
488 "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 ","
489 "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
490 TraceLog::GetCategoryGroupName(category_group_enabled_),
491 process_id,
492 thread_id_,
493 time_int64,
494 phase_,
495 name_);
497 // Output argument names and values, stop at first NULL argument name.
498 for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
499 if (i > 0)
500 *out += ",";
501 *out += "\"";
502 *out += arg_names_[i];
503 *out += "\":";
505 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
506 convertable_values_[i]->AppendAsTraceFormat(out);
507 else
508 AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
510 *out += "}";
512 // If id_ is set, print it out as a hex string so we don't loose any
513 // bits (it might be a 64-bit pointer).
514 if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
515 StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_));
517 // Instant events also output their scope.
518 if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
519 char scope = '?';
520 switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
521 case TRACE_EVENT_SCOPE_GLOBAL:
522 scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
523 break;
525 case TRACE_EVENT_SCOPE_PROCESS:
526 scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
527 break;
529 case TRACE_EVENT_SCOPE_THREAD:
530 scope = TRACE_EVENT_SCOPE_NAME_THREAD;
531 break;
533 StringAppendF(out, ",\"s\":\"%c\"", scope);
536 *out += "}";
539 ////////////////////////////////////////////////////////////////////////////////
541 // TraceResultBuffer
543 ////////////////////////////////////////////////////////////////////////////////
545 TraceResultBuffer::OutputCallback
546 TraceResultBuffer::SimpleOutput::GetCallback() {
547 return Bind(&SimpleOutput::Append, Unretained(this));
550 void TraceResultBuffer::SimpleOutput::Append(
551 const std::string& json_trace_output) {
552 json_output += json_trace_output;
555 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
558 TraceResultBuffer::~TraceResultBuffer() {
561 void TraceResultBuffer::SetOutputCallback(
562 const OutputCallback& json_chunk_callback) {
563 output_callback_ = json_chunk_callback;
566 void TraceResultBuffer::Start() {
567 append_comma_ = false;
568 output_callback_.Run("[");
571 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
572 if (append_comma_)
573 output_callback_.Run(",");
574 append_comma_ = true;
575 output_callback_.Run(trace_fragment);
578 void TraceResultBuffer::Finish() {
579 output_callback_.Run("]");
582 ////////////////////////////////////////////////////////////////////////////////
584 // TraceSamplingThread
586 ////////////////////////////////////////////////////////////////////////////////
587 class TraceBucketData;
588 typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
590 class TraceBucketData {
591 public:
592 TraceBucketData(base::subtle::AtomicWord* bucket,
593 const char* name,
594 TraceSampleCallback callback);
595 ~TraceBucketData();
597 TRACE_EVENT_API_ATOMIC_WORD* bucket;
598 const char* bucket_name;
599 TraceSampleCallback callback;
602 // This object must be created on the IO thread.
603 class TraceSamplingThread : public PlatformThread::Delegate {
604 public:
605 TraceSamplingThread();
606 virtual ~TraceSamplingThread();
608 // Implementation of PlatformThread::Delegate:
609 virtual void ThreadMain() OVERRIDE;
611 static void DefaultSampleCallback(TraceBucketData* bucekt_data);
613 void Stop();
614 void InstallWaitableEventForSamplingTesting(WaitableEvent* waitable_event);
616 private:
617 friend class TraceLog;
619 void GetSamples();
620 // Not thread-safe. Once the ThreadMain has been called, this can no longer
621 // be called.
622 void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
623 const char* const name,
624 TraceSampleCallback callback);
625 // Splits a combined "category\0name" into the two component parts.
626 static void ExtractCategoryAndName(const char* combined,
627 const char** category,
628 const char** name);
629 std::vector<TraceBucketData> sample_buckets_;
630 bool thread_running_;
631 scoped_ptr<CancellationFlag> cancellation_flag_;
632 scoped_ptr<WaitableEvent> waitable_event_for_testing_;
636 TraceSamplingThread::TraceSamplingThread()
637 : thread_running_(false) {
638 cancellation_flag_.reset(new CancellationFlag);
641 TraceSamplingThread::~TraceSamplingThread() {
644 void TraceSamplingThread::ThreadMain() {
645 PlatformThread::SetName("Sampling Thread");
646 thread_running_ = true;
647 const int kSamplingFrequencyMicroseconds = 1000;
648 while (!cancellation_flag_->IsSet()) {
649 PlatformThread::Sleep(
650 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
651 GetSamples();
652 if (waitable_event_for_testing_.get())
653 waitable_event_for_testing_->Signal();
657 // static
658 void TraceSamplingThread::DefaultSampleCallback(TraceBucketData* bucket_data) {
659 TRACE_EVENT_API_ATOMIC_WORD category_and_name =
660 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
661 if (!category_and_name)
662 return;
663 const char* const combined =
664 reinterpret_cast<const char* const>(category_and_name);
665 const char* category_group;
666 const char* name;
667 ExtractCategoryAndName(combined, &category_group, &name);
668 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
669 TraceLog::GetCategoryGroupEnabled(category_group),
670 name, 0, 0, NULL, NULL, NULL, NULL, 0);
673 void TraceSamplingThread::GetSamples() {
674 for (size_t i = 0; i < sample_buckets_.size(); ++i) {
675 TraceBucketData* bucket_data = &sample_buckets_[i];
676 bucket_data->callback.Run(bucket_data);
680 void TraceSamplingThread::RegisterSampleBucket(
681 TRACE_EVENT_API_ATOMIC_WORD* bucket,
682 const char* const name,
683 TraceSampleCallback callback) {
684 DCHECK(!thread_running_);
685 sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
688 // static
689 void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
690 const char** category,
691 const char** name) {
692 *category = combined;
693 *name = &combined[strlen(combined) + 1];
696 void TraceSamplingThread::Stop() {
697 cancellation_flag_->Set();
700 void TraceSamplingThread::InstallWaitableEventForSamplingTesting(
701 WaitableEvent* waitable_event) {
702 waitable_event_for_testing_.reset(waitable_event);
706 TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
707 const char* name,
708 TraceSampleCallback callback)
709 : bucket(bucket),
710 bucket_name(name),
711 callback(callback) {
714 TraceBucketData::~TraceBucketData() {
717 ////////////////////////////////////////////////////////////////////////////////
719 // TraceLog
721 ////////////////////////////////////////////////////////////////////////////////
723 TraceLog::NotificationHelper::NotificationHelper(TraceLog* trace_log)
724 : trace_log_(trace_log),
725 notification_(0) {
728 TraceLog::NotificationHelper::~NotificationHelper() {
731 void TraceLog::NotificationHelper::AddNotificationWhileLocked(
732 int notification) {
733 if (trace_log_->notification_callback_.is_null())
734 return;
735 if (notification_ == 0)
736 callback_copy_ = trace_log_->notification_callback_;
737 notification_ |= notification;
740 void TraceLog::NotificationHelper::SendNotificationIfAny() {
741 if (notification_)
742 callback_copy_.Run(notification_);
745 // static
746 TraceLog* TraceLog::GetInstance() {
747 return Singleton<TraceLog, StaticMemorySingletonTraits<TraceLog> >::get();
750 // static
751 // Note, if you add more options here you also need to update:
752 // content/browser/devtools/devtools_tracing_handler:TraceOptionsFromString
753 TraceLog::Options TraceLog::TraceOptionsFromString(const std::string& options) {
754 std::vector<std::string> split;
755 base::SplitString(options, ',', &split);
756 int ret = 0;
757 for (std::vector<std::string>::iterator iter = split.begin();
758 iter != split.end();
759 ++iter) {
760 if (*iter == kRecordUntilFull) {
761 ret |= RECORD_UNTIL_FULL;
762 } else if (*iter == kRecordContinuously) {
763 ret |= RECORD_CONTINUOUSLY;
764 } else {
765 NOTREACHED(); // Unknown option provided.
768 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY))
769 ret |= RECORD_UNTIL_FULL; // Default when no options are specified.
771 return static_cast<Options>(ret);
774 TraceLog::TraceLog()
775 : enable_count_(0),
776 num_traces_recorded_(0),
777 logged_events_(NULL),
778 dispatching_to_observer_list_(false),
779 watch_category_(NULL),
780 trace_options_(RECORD_UNTIL_FULL),
781 sampling_thread_handle_(0),
782 category_filter_(CategoryFilter::kDefaultCategoryFilterString) {
783 // Trace is enabled or disabled on one thread while other threads are
784 // accessing the enabled flag. We don't care whether edge-case events are
785 // traced or not, so we allow races on the enabled flag to keep the trace
786 // macros fast.
787 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
788 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
789 // sizeof(g_category_group_enabled),
790 // "trace_event category enabled");
791 for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
792 ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
793 "trace_event category enabled");
795 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
796 SetProcessID(0);
797 #else
798 SetProcessID(static_cast<int>(GetCurrentProcId()));
799 #endif
801 logged_events_.reset(GetTraceBuffer());
804 TraceLog::~TraceLog() {
807 const unsigned char* TraceLog::GetCategoryGroupEnabled(
808 const char* category_group) {
809 TraceLog* tracelog = GetInstance();
810 if (!tracelog) {
811 DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
812 return &g_category_group_enabled[g_category_already_shutdown];
814 return tracelog->GetCategoryGroupEnabledInternal(category_group);
817 const char* TraceLog::GetCategoryGroupName(
818 const unsigned char* category_group_enabled) {
819 // Calculate the index of the category group by finding
820 // category_group_enabled in g_category_group_enabled array.
821 uintptr_t category_begin =
822 reinterpret_cast<uintptr_t>(g_category_group_enabled);
823 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
824 DCHECK(category_ptr >= category_begin &&
825 category_ptr < reinterpret_cast<uintptr_t>(
826 g_category_group_enabled + MAX_CATEGORY_GROUPS)) <<
827 "out of bounds category pointer";
828 uintptr_t category_index =
829 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
830 return g_category_groups[category_index];
833 void TraceLog::EnableIncludedCategoryGroup(int category_index) {
834 bool is_enabled = category_filter_.IsCategoryGroupEnabled(
835 g_category_groups[category_index]);
836 SetCategoryGroupEnabled(category_index, is_enabled);
839 void TraceLog::SetCategoryGroupEnabled(int category_index, bool is_enabled) {
840 g_category_group_enabled[category_index] = is_enabled ? CATEGORY_ENABLED : 0;
842 #if defined(OS_ANDROID)
843 ApplyATraceEnabledFlag(&g_category_group_enabled[category_index]);
844 #endif
847 bool TraceLog::IsCategoryGroupEnabled(
848 const unsigned char* category_group_enabled) {
849 // On Android, ATrace and normal trace can be enabled independently.
850 // This function checks if the normal trace is enabled.
851 return *category_group_enabled & CATEGORY_ENABLED;
854 void TraceLog::EnableIncludedCategoryGroups() {
855 for (int i = 0; i < g_category_index; i++)
856 EnableIncludedCategoryGroup(i);
859 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
860 const char* category_group) {
861 DCHECK(!strchr(category_group, '"')) <<
862 "Category groups may not contain double quote";
863 AutoLock lock(lock_);
865 unsigned char* category_group_enabled = NULL;
866 // Search for pre-existing category group.
867 for (int i = 0; i < g_category_index; i++) {
868 if (strcmp(g_category_groups[i], category_group) == 0) {
869 category_group_enabled = &g_category_group_enabled[i];
870 break;
874 if (!category_group_enabled) {
875 // Create a new category group
876 DCHECK(g_category_index < MAX_CATEGORY_GROUPS) <<
877 "must increase MAX_CATEGORY_GROUPS";
878 if (g_category_index < MAX_CATEGORY_GROUPS) {
879 int new_index = g_category_index++;
880 // Don't hold on to the category_group pointer, so that we can create
881 // category groups with strings not known at compile time (this is
882 // required by SetWatchEvent).
883 const char* new_group = strdup(category_group);
884 ANNOTATE_LEAKING_OBJECT_PTR(new_group);
885 g_category_groups[new_index] = new_group;
886 DCHECK(!g_category_group_enabled[new_index]);
887 if (enable_count_) {
888 // Note that if both included and excluded patterns in the
889 // CategoryFilter are empty, we exclude nothing,
890 // thereby enabling this category group.
891 EnableIncludedCategoryGroup(new_index);
892 } else {
893 SetCategoryGroupEnabled(new_index, false);
895 category_group_enabled = &g_category_group_enabled[new_index];
896 } else {
897 category_group_enabled =
898 &g_category_group_enabled[g_category_categories_exhausted];
901 return category_group_enabled;
904 void TraceLog::GetKnownCategoryGroups(
905 std::vector<std::string>* category_groups) {
906 AutoLock lock(lock_);
907 for (int i = g_num_builtin_categories; i < g_category_index; i++)
908 category_groups->push_back(g_category_groups[i]);
911 void TraceLog::SetEnabled(const CategoryFilter& category_filter,
912 Options options) {
913 AutoLock lock(lock_);
915 if (enable_count_++ > 0) {
916 if (options != trace_options_) {
917 DLOG(ERROR) << "Attemting to re-enable tracing with a different "
918 << "set of options.";
921 category_filter_.Merge(category_filter);
922 EnableIncludedCategoryGroups();
923 return;
926 if (options != trace_options_) {
927 trace_options_ = options;
928 logged_events_.reset(GetTraceBuffer());
931 if (dispatching_to_observer_list_) {
932 DLOG(ERROR) <<
933 "Cannot manipulate TraceLog::Enabled state from an observer.";
934 return;
937 num_traces_recorded_++;
939 dispatching_to_observer_list_ = true;
940 FOR_EACH_OBSERVER(EnabledStateChangedObserver, enabled_state_observer_list_,
941 OnTraceLogWillEnable());
942 dispatching_to_observer_list_ = false;
944 category_filter_ = CategoryFilter(category_filter);
945 EnableIncludedCategoryGroups();
947 // Not supported in split-dll build. http://crbug.com/237249
948 #if !defined(CHROME_SPLIT_DLL)
949 if (options & ENABLE_SAMPLING) {
950 sampling_thread_.reset(new TraceSamplingThread);
951 sampling_thread_->RegisterSampleBucket(
952 &g_trace_state0,
953 "bucket0",
954 Bind(&TraceSamplingThread::DefaultSampleCallback));
955 sampling_thread_->RegisterSampleBucket(
956 &g_trace_state1,
957 "bucket1",
958 Bind(&TraceSamplingThread::DefaultSampleCallback));
959 sampling_thread_->RegisterSampleBucket(
960 &g_trace_state2,
961 "bucket2",
962 Bind(&TraceSamplingThread::DefaultSampleCallback));
963 if (!PlatformThread::Create(
964 0, sampling_thread_.get(), &sampling_thread_handle_)) {
965 DCHECK(false) << "failed to create thread";
968 #endif
971 const CategoryFilter& TraceLog::GetCurrentCategoryFilter() {
972 AutoLock lock(lock_);
973 DCHECK(enable_count_ > 0);
974 return category_filter_;
977 void TraceLog::SetDisabled() {
978 AutoLock lock(lock_);
979 DCHECK(enable_count_ > 0);
980 if (--enable_count_ != 0)
981 return;
983 if (dispatching_to_observer_list_) {
984 DLOG(ERROR)
985 << "Cannot manipulate TraceLog::Enabled state from an observer.";
986 return;
989 if (sampling_thread_.get()) {
990 // Stop the sampling thread.
991 sampling_thread_->Stop();
992 lock_.Release();
993 PlatformThread::Join(sampling_thread_handle_);
994 lock_.Acquire();
995 sampling_thread_handle_ = PlatformThreadHandle();
996 sampling_thread_.reset();
999 dispatching_to_observer_list_ = true;
1000 FOR_EACH_OBSERVER(EnabledStateChangedObserver,
1001 enabled_state_observer_list_,
1002 OnTraceLogWillDisable());
1003 dispatching_to_observer_list_ = false;
1005 category_filter_.Clear();
1006 watch_category_ = NULL;
1007 watch_event_name_ = "";
1008 for (int i = 0; i < g_category_index; i++)
1009 SetCategoryGroupEnabled(i, false);
1010 AddThreadNameMetadataEvents();
1013 int TraceLog::GetNumTracesRecorded() {
1014 AutoLock lock(lock_);
1015 if (enable_count_ == 0)
1016 return -1;
1017 return num_traces_recorded_;
1020 void TraceLog::AddEnabledStateObserver(EnabledStateChangedObserver* listener) {
1021 enabled_state_observer_list_.AddObserver(listener);
1024 void TraceLog::RemoveEnabledStateObserver(
1025 EnabledStateChangedObserver* listener) {
1026 enabled_state_observer_list_.RemoveObserver(listener);
1029 float TraceLog::GetBufferPercentFull() const {
1030 return (float)((double)logged_events_->Size()/(double)kTraceEventBufferSize);
1033 void TraceLog::SetNotificationCallback(
1034 const TraceLog::NotificationCallback& cb) {
1035 AutoLock lock(lock_);
1036 notification_callback_ = cb;
1039 TraceBuffer* TraceLog::GetTraceBuffer() {
1040 if (trace_options_ & RECORD_CONTINUOUSLY)
1041 return new TraceBufferRingBuffer();
1042 else if (trace_options_ & ECHO_TO_VLOG)
1043 return new TraceBufferDiscardsEvents();
1044 return new TraceBufferVector();
1047 void TraceLog::SetEventCallback(EventCallback cb) {
1048 AutoLock lock(lock_);
1049 event_callback_ = cb;
1052 void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
1053 scoped_ptr<TraceBuffer> previous_logged_events;
1055 AutoLock lock(lock_);
1056 previous_logged_events.swap(logged_events_);
1057 logged_events_.reset(GetTraceBuffer());
1058 } // release lock
1060 while (previous_logged_events->HasMoreEvents()) {
1061 scoped_refptr<RefCountedString> json_events_str_ptr =
1062 new RefCountedString();
1064 for (size_t i = 0; i < kTraceEventBatchSize; ++i) {
1065 if (i > 0)
1066 *(&(json_events_str_ptr->data())) += ",";
1068 previous_logged_events->NextEvent().AppendAsJSON(
1069 &(json_events_str_ptr->data()));
1071 if (!previous_logged_events->HasMoreEvents())
1072 break;
1075 cb.Run(json_events_str_ptr);
1079 void TraceLog::AddTraceEvent(
1080 char phase,
1081 const unsigned char* category_group_enabled,
1082 const char* name,
1083 unsigned long long id,
1084 int num_args,
1085 const char** arg_names,
1086 const unsigned char* arg_types,
1087 const unsigned long long* arg_values,
1088 scoped_ptr<ConvertableToTraceFormat> convertable_values[],
1089 unsigned char flags) {
1090 int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1091 base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime();
1092 AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled, name, id,
1093 thread_id, now, num_args, arg_names,
1094 arg_types, arg_values,
1095 convertable_values, flags);
1098 void TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1099 char phase,
1100 const unsigned char* category_group_enabled,
1101 const char* name,
1102 unsigned long long id,
1103 int thread_id,
1104 const TimeTicks& timestamp,
1105 int num_args,
1106 const char** arg_names,
1107 const unsigned char* arg_types,
1108 const unsigned long long* arg_values,
1109 scoped_ptr<ConvertableToTraceFormat> convertable_values[],
1110 unsigned char flags) {
1111 DCHECK(name);
1113 TimeDelta duration;
1114 if (phase == TRACE_EVENT_PHASE_END && trace_options_ & ECHO_TO_VLOG) {
1115 duration = timestamp - thread_event_start_times_[thread_id].top();
1116 thread_event_start_times_[thread_id].pop();
1119 if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
1120 id ^= process_id_hash_;
1122 #if defined(OS_ANDROID)
1123 SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id,
1124 num_args, arg_names, arg_types, arg_values, convertable_values,
1125 flags);
1126 #endif
1128 TimeTicks now = timestamp - time_offset_;
1129 EventCallback event_callback_copy;
1131 NotificationHelper notifier(this);
1133 do {
1134 AutoLock lock(lock_);
1135 if (!IsCategoryGroupEnabled(category_group_enabled))
1136 return;
1138 event_callback_copy = event_callback_;
1139 if (logged_events_->IsFull())
1140 break;
1142 const char* new_name = ThreadIdNameManager::GetInstance()->
1143 GetName(thread_id);
1144 // Check if the thread name has been set or changed since the previous
1145 // call (if any), but don't bother if the new name is empty. Note this will
1146 // not detect a thread name change within the same char* buffer address: we
1147 // favor common case performance over corner case correctness.
1148 if (new_name != g_current_thread_name.Get().Get() &&
1149 new_name && *new_name) {
1150 g_current_thread_name.Get().Set(new_name);
1152 hash_map<int, std::string>::iterator existing_name =
1153 thread_names_.find(thread_id);
1154 if (existing_name == thread_names_.end()) {
1155 // This is a new thread id, and a new name.
1156 thread_names_[thread_id] = new_name;
1157 } else {
1158 // This is a thread id that we've seen before, but potentially with a
1159 // new name.
1160 std::vector<StringPiece> existing_names;
1161 Tokenize(existing_name->second, ",", &existing_names);
1162 bool found = std::find(existing_names.begin(),
1163 existing_names.end(),
1164 new_name) != existing_names.end();
1165 if (!found) {
1166 existing_name->second.push_back(',');
1167 existing_name->second.append(new_name);
1172 if (trace_options_ & ECHO_TO_VLOG) {
1173 std::string thread_name = thread_names_[thread_id];
1174 if (thread_colors_.find(thread_name) == thread_colors_.end())
1175 thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
1177 std::ostringstream log;
1178 log << base::StringPrintf("%s: \x1b[0;3%dm",
1179 thread_name.c_str(),
1180 thread_colors_[thread_name]);
1182 size_t depth = 0;
1183 if (thread_event_start_times_.find(thread_id) !=
1184 thread_event_start_times_.end())
1185 depth = thread_event_start_times_[thread_id].size();
1187 for (size_t i = 0; i < depth; ++i)
1188 log << "| ";
1190 log << base::StringPrintf("'%c', %s", phase, name);
1192 if (phase == TRACE_EVENT_PHASE_END)
1193 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1195 VLOG(0) << log.str() << "\x1b[0;m";
1198 logged_events_->AddEvent(TraceEvent(thread_id,
1199 now, phase, category_group_enabled, name, id,
1200 num_args, arg_names, arg_types, arg_values,
1201 convertable_values, flags));
1203 if (logged_events_->IsFull())
1204 notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL);
1206 if (watch_category_ == category_group_enabled && watch_event_name_ == name)
1207 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
1208 } while (0); // release lock
1210 if (phase == TRACE_EVENT_PHASE_BEGIN && trace_options_ & ECHO_TO_VLOG)
1211 thread_event_start_times_[thread_id].push(timestamp);
1213 notifier.SendNotificationIfAny();
1214 if (event_callback_copy != NULL) {
1215 event_callback_copy(phase, category_group_enabled, name, id,
1216 num_args, arg_names, arg_types, arg_values,
1217 flags);
1221 void TraceLog::AddTraceEventEtw(char phase,
1222 const char* name,
1223 const void* id,
1224 const char* extra) {
1225 #if defined(OS_WIN)
1226 TraceEventETWProvider::Trace(name, phase, id, extra);
1227 #endif
1228 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1229 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1232 void TraceLog::AddTraceEventEtw(char phase,
1233 const char* name,
1234 const void* id,
1235 const std::string& extra)
1237 #if defined(OS_WIN)
1238 TraceEventETWProvider::Trace(name, phase, id, extra);
1239 #endif
1240 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1241 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1244 void TraceLog::SetWatchEvent(const std::string& category_name,
1245 const std::string& event_name) {
1246 const unsigned char* category = GetCategoryGroupEnabled(
1247 category_name.c_str());
1248 size_t notify_count = 0;
1250 AutoLock lock(lock_);
1251 watch_category_ = category;
1252 watch_event_name_ = event_name;
1254 // First, search existing events for watch event because we want to catch
1255 // it even if it has already occurred.
1256 notify_count = logged_events_->CountEnabledByName(category, event_name);
1257 } // release lock
1259 // Send notification for each event found.
1260 for (size_t i = 0; i < notify_count; ++i) {
1261 NotificationHelper notifier(this);
1262 lock_.Acquire();
1263 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
1264 lock_.Release();
1265 notifier.SendNotificationIfAny();
1269 void TraceLog::CancelWatchEvent() {
1270 AutoLock lock(lock_);
1271 watch_category_ = NULL;
1272 watch_event_name_ = "";
1275 void TraceLog::AddThreadNameMetadataEvents() {
1276 lock_.AssertAcquired();
1277 for(hash_map<int, std::string>::iterator it = thread_names_.begin();
1278 it != thread_names_.end();
1279 it++) {
1280 if (!it->second.empty()) {
1281 int num_args = 1;
1282 const char* arg_name = "name";
1283 unsigned char arg_type;
1284 unsigned long long arg_value;
1285 trace_event_internal::SetTraceValue(it->second, &arg_type, &arg_value);
1286 logged_events_->AddEvent(TraceEvent(it->first,
1287 TimeTicks(), TRACE_EVENT_PHASE_METADATA,
1288 &g_category_group_enabled[g_category_metadata],
1289 "thread_name", trace_event_internal::kNoEventId,
1290 num_args, &arg_name, &arg_type, &arg_value, NULL,
1291 TRACE_EVENT_FLAG_NONE));
1296 void TraceLog::InstallWaitableEventForSamplingTesting(
1297 WaitableEvent* waitable_event) {
1298 sampling_thread_->InstallWaitableEventForSamplingTesting(waitable_event);
1301 void TraceLog::DeleteForTesting() {
1302 DeleteTraceLogForTesting::Delete();
1305 void TraceLog::Resurrect() {
1306 StaticMemorySingletonTraits<TraceLog>::Resurrect();
1309 void TraceLog::SetProcessID(int process_id) {
1310 process_id_ = process_id;
1311 // Create a FNV hash from the process ID for XORing.
1312 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
1313 unsigned long long offset_basis = 14695981039346656037ull;
1314 unsigned long long fnv_prime = 1099511628211ull;
1315 unsigned long long pid = static_cast<unsigned long long>(process_id_);
1316 process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
1319 void TraceLog::SetTimeOffset(TimeDelta offset) {
1320 time_offset_ = offset;
1323 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
1324 const std::string& str) {
1325 return str.empty() ||
1326 str.at(0) == ' ' ||
1327 str.at(str.length() - 1) == ' ';
1330 bool CategoryFilter::DoesCategoryGroupContainCategory(
1331 const char* category_group,
1332 const char* category) const {
1333 DCHECK(category);
1334 CStringTokenizer category_group_tokens(category_group,
1335 category_group + strlen(category_group), ",");
1336 while (category_group_tokens.GetNext()) {
1337 std::string category_group_token = category_group_tokens.token();
1338 // Don't allow empty tokens, nor tokens with leading or trailing space.
1339 DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
1340 category_group_token))
1341 << "Disallowed category string";
1342 if (MatchPattern(category_group_token.c_str(), category))
1343 return true;
1345 return false;
1348 CategoryFilter::CategoryFilter(const std::string& filter_string) {
1349 if (!filter_string.empty())
1350 Initialize(filter_string);
1351 else
1352 Initialize(CategoryFilter::kDefaultCategoryFilterString);
1355 CategoryFilter::CategoryFilter(const CategoryFilter& cf)
1356 : included_(cf.included_),
1357 disabled_(cf.disabled_),
1358 excluded_(cf.excluded_) {
1361 CategoryFilter::~CategoryFilter() {
1364 CategoryFilter& CategoryFilter::operator=(const CategoryFilter& rhs) {
1365 if (this == &rhs)
1366 return *this;
1368 included_ = rhs.included_;
1369 disabled_ = rhs.disabled_;
1370 excluded_ = rhs.excluded_;
1371 return *this;
1374 void CategoryFilter::Initialize(const std::string& filter_string) {
1375 // Tokenize list of categories, delimited by ','.
1376 StringTokenizer tokens(filter_string, ",");
1377 // Add each token to the appropriate list (included_,excluded_).
1378 while (tokens.GetNext()) {
1379 std::string category = tokens.token();
1380 // Ignore empty categories.
1381 if (category.empty())
1382 continue;
1383 // Excluded categories start with '-'.
1384 if (category.at(0) == '-') {
1385 // Remove '-' from category string.
1386 category = category.substr(1);
1387 excluded_.push_back(category);
1388 } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
1389 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
1390 disabled_.push_back(category);
1391 } else {
1392 included_.push_back(category);
1397 void CategoryFilter::WriteString(const StringList& values,
1398 std::string* out,
1399 bool included) const {
1400 bool prepend_comma = !out->empty();
1401 int token_cnt = 0;
1402 for (StringList::const_iterator ci = values.begin();
1403 ci != values.end(); ++ci) {
1404 if (token_cnt > 0 || prepend_comma)
1405 StringAppendF(out, ",");
1406 StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str());
1407 ++token_cnt;
1411 std::string CategoryFilter::ToString() const {
1412 std::string filter_string;
1413 WriteString(included_, &filter_string, true);
1414 WriteString(disabled_, &filter_string, true);
1415 WriteString(excluded_, &filter_string, false);
1416 return filter_string;
1419 bool CategoryFilter::IsCategoryGroupEnabled(
1420 const char* category_group_name) const {
1421 // TraceLog should call this method only as part of enabling/disabling
1422 // categories.
1423 StringList::const_iterator ci;
1425 // Check the disabled- filters and the disabled-* wildcard first so that a
1426 // "*" filter does not include the disabled.
1427 for (ci = disabled_.begin(); ci != disabled_.end(); ++ci) {
1428 if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
1429 return true;
1431 if (DoesCategoryGroupContainCategory(category_group_name,
1432 TRACE_DISABLED_BY_DEFAULT("*")))
1433 return false;
1435 for (ci = included_.begin(); ci != included_.end(); ++ci) {
1436 if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
1437 return true;
1440 for (ci = excluded_.begin(); ci != excluded_.end(); ++ci) {
1441 if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
1442 return false;
1444 // If the category group is not excluded, and there are no included patterns
1445 // we consider this pattern enabled.
1446 return included_.empty();
1449 bool CategoryFilter::HasIncludedPatterns() const {
1450 return !included_.empty();
1453 void CategoryFilter::Merge(const CategoryFilter& nested_filter) {
1454 // Keep included patterns only if both filters have an included entry.
1455 // Otherwise, one of the filter was specifying "*" and we want to honour the
1456 // broadest filter.
1457 if (HasIncludedPatterns() && nested_filter.HasIncludedPatterns()) {
1458 included_.insert(included_.end(),
1459 nested_filter.included_.begin(),
1460 nested_filter.included_.end());
1461 } else {
1462 included_.clear();
1465 disabled_.insert(disabled_.end(),
1466 nested_filter.disabled_.begin(),
1467 nested_filter.disabled_.end());
1468 excluded_.insert(excluded_.end(),
1469 nested_filter.excluded_.begin(),
1470 nested_filter.excluded_.end());
1473 void CategoryFilter::Clear() {
1474 included_.clear();
1475 disabled_.clear();
1476 excluded_.clear();
1479 } // namespace debug
1480 } // namespace base
1482 namespace trace_event_internal {
1484 ScopedTrace::ScopedTrace(
1485 TRACE_EVENT_API_ATOMIC_WORD* event_uid, const char* name) {
1486 category_group_enabled_ =
1487 reinterpret_cast<const unsigned char*>(TRACE_EVENT_API_ATOMIC_LOAD(
1488 *event_uid));
1489 if (!category_group_enabled_) {
1490 category_group_enabled_ = TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("gpu");
1491 TRACE_EVENT_API_ATOMIC_STORE(
1492 *event_uid,
1493 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(category_group_enabled_));
1495 if (*category_group_enabled_) {
1496 name_ = name;
1497 TRACE_EVENT_API_ADD_TRACE_EVENT(
1498 TRACE_EVENT_PHASE_BEGIN, // phase
1499 category_group_enabled_, // category enabled
1500 name, // name
1501 0, // id
1502 0, // num_args
1503 NULL, // arg_names
1504 NULL, // arg_types
1505 NULL, // arg_values
1506 NULL, // convertable_values
1507 TRACE_EVENT_FLAG_NONE); // flags
1508 } else {
1509 category_group_enabled_ = NULL;
1513 ScopedTrace::~ScopedTrace() {
1514 if (category_group_enabled_ && *category_group_enabled_) {
1515 TRACE_EVENT_API_ADD_TRACE_EVENT(
1516 TRACE_EVENT_PHASE_END, // phase
1517 category_group_enabled_, // category enabled
1518 name_, // name
1519 0, // id
1520 0, // num_args
1521 NULL, // arg_names
1522 NULL, // arg_types
1523 NULL, // arg_values
1524 NULL, // convertable values
1525 TRACE_EVENT_FLAG_NONE); // flags
1529 } // namespace trace_event_internal