Cleanup setting of 'sysroot' in common.gypi
[chromium-blink-merge.git] / base / metrics / statistics_recorder.cc
blob87ffa3dbcd04e78a724a3bec71b650726bdce79c
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/metrics/statistics_recorder.h"
7 #include "base/at_exit.h"
8 #include "base/debug/leak_annotations.h"
9 #include "base/json/string_escape.h"
10 #include "base/logging.h"
11 #include "base/memory/scoped_ptr.h"
12 #include "base/metrics/histogram.h"
13 #include "base/stl_util.h"
14 #include "base/strings/stringprintf.h"
15 #include "base/synchronization/lock.h"
16 #include "base/values.h"
18 namespace {
19 // Initialize histogram statistics gathering system.
20 base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ =
21 LAZY_INSTANCE_INITIALIZER;
22 } // namespace
24 namespace base {
26 // static
27 void StatisticsRecorder::Initialize() {
28 // Ensure that an instance of the StatisticsRecorder object is created.
29 g_statistics_recorder_.Get();
32 // static
33 bool StatisticsRecorder::IsActive() {
34 if (lock_ == NULL)
35 return false;
36 base::AutoLock auto_lock(*lock_);
37 return NULL != histograms_;
40 // static
41 HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
42 HistogramBase* histogram) {
43 // As per crbug.com/79322 the histograms are intentionally leaked, so we need
44 // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
45 // for an object, the duplicates should not be annotated.
46 // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
47 // twice if (lock_ == NULL) || (!histograms_).
48 if (lock_ == NULL) {
49 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
50 return histogram;
53 HistogramBase* histogram_to_delete = NULL;
54 HistogramBase* histogram_to_return = NULL;
56 base::AutoLock auto_lock(*lock_);
57 if (histograms_ == NULL) {
58 histogram_to_return = histogram;
59 } else {
60 const std::string& name = histogram->histogram_name();
61 HistogramMap::iterator it = histograms_->find(HistogramNameRef(name));
62 if (histograms_->end() == it) {
63 (*histograms_)[HistogramNameRef(name)] = histogram;
64 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
65 // If there are callbacks for this histogram, we set the kCallbackExists
66 // flag.
67 auto callback_iterator = callbacks_->find(name);
68 if (callback_iterator != callbacks_->end()) {
69 if (!callback_iterator->second.is_null())
70 histogram->SetFlags(HistogramBase::kCallbackExists);
71 else
72 histogram->ClearFlags(HistogramBase::kCallbackExists);
74 histogram_to_return = histogram;
75 } else if (histogram == it->second) {
76 // The histogram was registered before.
77 histogram_to_return = histogram;
78 } else {
79 // We already have one histogram with this name.
80 histogram_to_return = it->second;
81 histogram_to_delete = histogram;
85 delete histogram_to_delete;
86 return histogram_to_return;
89 // static
90 const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
91 const BucketRanges* ranges) {
92 DCHECK(ranges->HasValidChecksum());
93 scoped_ptr<const BucketRanges> ranges_deleter;
95 if (lock_ == NULL) {
96 ANNOTATE_LEAKING_OBJECT_PTR(ranges);
97 return ranges;
100 base::AutoLock auto_lock(*lock_);
101 if (ranges_ == NULL) {
102 ANNOTATE_LEAKING_OBJECT_PTR(ranges);
103 return ranges;
106 std::list<const BucketRanges*>* checksum_matching_list;
107 RangesMap::iterator ranges_it = ranges_->find(ranges->checksum());
108 if (ranges_->end() == ranges_it) {
109 // Add a new matching list to map.
110 checksum_matching_list = new std::list<const BucketRanges*>();
111 ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list);
112 (*ranges_)[ranges->checksum()] = checksum_matching_list;
113 } else {
114 checksum_matching_list = ranges_it->second;
117 for (const BucketRanges* existing_ranges : *checksum_matching_list) {
118 if (existing_ranges->Equals(ranges)) {
119 if (existing_ranges == ranges) {
120 return ranges;
121 } else {
122 ranges_deleter.reset(ranges);
123 return existing_ranges;
127 // We haven't found a BucketRanges which has the same ranges. Register the
128 // new BucketRanges.
129 checksum_matching_list->push_front(ranges);
130 return ranges;
133 // static
134 void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
135 std::string* output) {
136 if (!IsActive())
137 return;
139 Histograms snapshot;
140 GetSnapshot(query, &snapshot);
141 for (const HistogramBase* histogram : snapshot) {
142 histogram->WriteHTMLGraph(output);
143 output->append("<br><hr><br>");
147 // static
148 void StatisticsRecorder::WriteGraph(const std::string& query,
149 std::string* output) {
150 if (!IsActive())
151 return;
152 if (query.length())
153 StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
154 else
155 output->append("Collections of all histograms\n");
157 Histograms snapshot;
158 GetSnapshot(query, &snapshot);
159 for (const HistogramBase* histogram : snapshot) {
160 histogram->WriteAscii(output);
161 output->append("\n");
165 // static
166 std::string StatisticsRecorder::ToJSON(const std::string& query) {
167 if (!IsActive())
168 return std::string();
170 std::string output("{");
171 if (!query.empty()) {
172 output += "\"query\":";
173 EscapeJSONString(query, true, &output);
174 output += ",";
177 Histograms snapshot;
178 GetSnapshot(query, &snapshot);
179 output += "\"histograms\":[";
180 bool first_histogram = true;
181 for (const HistogramBase* histogram : snapshot) {
182 if (first_histogram)
183 first_histogram = false;
184 else
185 output += ",";
186 std::string json;
187 histogram->WriteJSON(&json);
188 output += json;
190 output += "]}";
191 return output;
194 // static
195 void StatisticsRecorder::GetHistograms(Histograms* output) {
196 if (lock_ == NULL)
197 return;
198 base::AutoLock auto_lock(*lock_);
199 if (histograms_ == NULL)
200 return;
202 for (const auto& entry : *histograms_) {
203 DCHECK_EQ(entry.first.name_, entry.second->histogram_name());
204 output->push_back(entry.second);
208 // static
209 void StatisticsRecorder::GetBucketRanges(
210 std::vector<const BucketRanges*>* output) {
211 if (lock_ == NULL)
212 return;
213 base::AutoLock auto_lock(*lock_);
214 if (ranges_ == NULL)
215 return;
217 for (const auto& entry : *ranges_) {
218 for (const auto& range_entry : *entry.second) {
219 output->push_back(range_entry);
224 // static
225 HistogramBase* StatisticsRecorder::FindHistogram(const std::string& name) {
226 if (lock_ == NULL)
227 return NULL;
228 base::AutoLock auto_lock(*lock_);
229 if (histograms_ == NULL)
230 return NULL;
232 HistogramMap::iterator it = histograms_->find(HistogramNameRef(name));
233 if (histograms_->end() == it)
234 return NULL;
235 return it->second;
238 // static
239 bool StatisticsRecorder::SetCallback(
240 const std::string& name,
241 const StatisticsRecorder::OnSampleCallback& cb) {
242 DCHECK(!cb.is_null());
243 if (lock_ == NULL)
244 return false;
245 base::AutoLock auto_lock(*lock_);
246 if (histograms_ == NULL)
247 return false;
249 if (ContainsKey(*callbacks_, name))
250 return false;
251 callbacks_->insert(std::make_pair(name, cb));
253 auto histogram_iterator = histograms_->find(HistogramNameRef(name));
254 if (histogram_iterator != histograms_->end())
255 histogram_iterator->second->SetFlags(HistogramBase::kCallbackExists);
257 return true;
260 // static
261 void StatisticsRecorder::ClearCallback(const std::string& name) {
262 if (lock_ == NULL)
263 return;
264 base::AutoLock auto_lock(*lock_);
265 if (histograms_ == NULL)
266 return;
268 callbacks_->erase(name);
270 // We also clear the flag from the histogram (if it exists).
271 auto histogram_iterator = histograms_->find(HistogramNameRef(name));
272 if (histogram_iterator != histograms_->end())
273 histogram_iterator->second->ClearFlags(HistogramBase::kCallbackExists);
276 // static
277 StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
278 const std::string& name) {
279 if (lock_ == NULL)
280 return OnSampleCallback();
281 base::AutoLock auto_lock(*lock_);
282 if (histograms_ == NULL)
283 return OnSampleCallback();
285 auto callback_iterator = callbacks_->find(name);
286 return callback_iterator != callbacks_->end() ? callback_iterator->second
287 : OnSampleCallback();
290 // private static
291 void StatisticsRecorder::GetSnapshot(const std::string& query,
292 Histograms* snapshot) {
293 if (lock_ == NULL)
294 return;
295 base::AutoLock auto_lock(*lock_);
296 if (histograms_ == NULL)
297 return;
299 for (const auto& entry : *histograms_) {
300 if (entry.first.name_.find(query) != std::string::npos)
301 snapshot->push_back(entry.second);
305 // This singleton instance should be started during the single threaded portion
306 // of main(), and hence it is not thread safe. It initializes globals to
307 // provide support for all future calls.
308 StatisticsRecorder::StatisticsRecorder() {
309 DCHECK(!histograms_);
310 if (lock_ == NULL) {
311 // This will leak on purpose. It's the only way to make sure we won't race
312 // against the static uninitialization of the module while one of our
313 // static methods relying on the lock get called at an inappropriate time
314 // during the termination phase. Since it's a static data member, we will
315 // leak one per process, which would be similar to the instance allocated
316 // during static initialization and released only on process termination.
317 lock_ = new base::Lock;
319 base::AutoLock auto_lock(*lock_);
320 histograms_ = new HistogramMap;
321 callbacks_ = new CallbackMap;
322 ranges_ = new RangesMap;
324 if (VLOG_IS_ON(1))
325 AtExitManager::RegisterCallback(&DumpHistogramsToVlog, this);
328 // static
329 void StatisticsRecorder::DumpHistogramsToVlog(void* instance) {
330 std::string output;
331 StatisticsRecorder::WriteGraph(std::string(), &output);
332 VLOG(1) << output;
335 StatisticsRecorder::~StatisticsRecorder() {
336 DCHECK(histograms_ && ranges_ && lock_);
338 // Clean up.
339 scoped_ptr<HistogramMap> histograms_deleter;
340 scoped_ptr<CallbackMap> callbacks_deleter;
341 scoped_ptr<RangesMap> ranges_deleter;
342 // We don't delete lock_ on purpose to avoid having to properly protect
343 // against it going away after we checked for NULL in the static methods.
345 base::AutoLock auto_lock(*lock_);
346 histograms_deleter.reset(histograms_);
347 callbacks_deleter.reset(callbacks_);
348 ranges_deleter.reset(ranges_);
349 histograms_ = NULL;
350 callbacks_ = NULL;
351 ranges_ = NULL;
353 // We are going to leak the histograms and the ranges.
357 // static
358 StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
359 // static
360 StatisticsRecorder::CallbackMap* StatisticsRecorder::callbacks_ = NULL;
361 // static
362 StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL;
363 // static
364 base::Lock* StatisticsRecorder::lock_ = NULL;
366 } // namespace base