Supervised user import: Listen for profile creation/deletion
[chromium-blink-merge.git] / base / metrics / statistics_recorder.cc
blob39ecc30a359986c3bb5567cef83f410a155f9f8a
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/metrics/statistics_recorder.h"
7 #include "base/at_exit.h"
8 #include "base/debug/leak_annotations.h"
9 #include "base/json/string_escape.h"
10 #include "base/logging.h"
11 #include "base/memory/scoped_ptr.h"
12 #include "base/metrics/histogram.h"
13 #include "base/strings/stringprintf.h"
14 #include "base/synchronization/lock.h"
15 #include "base/values.h"
17 using std::list;
18 using std::string;
20 namespace {
21 // Initialize histogram statistics gathering system.
22 base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ =
23 LAZY_INSTANCE_INITIALIZER;
24 } // namespace
26 namespace base {
28 // static
29 void StatisticsRecorder::Initialize() {
30 // Ensure that an instance of the StatisticsRecorder object is created.
31 g_statistics_recorder_.Get();
34 // static
35 bool StatisticsRecorder::IsActive() {
36 if (lock_ == NULL)
37 return false;
38 base::AutoLock auto_lock(*lock_);
39 return NULL != histograms_;
42 // static
43 HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
44 HistogramBase* histogram) {
45 // As per crbug.com/79322 the histograms are intentionally leaked, so we need
46 // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
47 // for an object, the duplicates should not be annotated.
48 // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
49 // twice if (lock_ == NULL) || (!histograms_).
50 if (lock_ == NULL) {
51 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
52 return histogram;
55 HistogramBase* histogram_to_delete = NULL;
56 HistogramBase* histogram_to_return = NULL;
58 base::AutoLock auto_lock(*lock_);
59 if (histograms_ == NULL) {
60 histogram_to_return = histogram;
61 } else {
62 const string& name = histogram->histogram_name();
63 HistogramMap::iterator it = histograms_->find(name);
64 if (histograms_->end() == it) {
65 (*histograms_)[name] = histogram;
66 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
67 histogram_to_return = histogram;
68 } else if (histogram == it->second) {
69 // The histogram was registered before.
70 histogram_to_return = histogram;
71 } else {
72 // We already have one histogram with this name.
73 histogram_to_return = it->second;
74 histogram_to_delete = histogram;
78 delete histogram_to_delete;
79 return histogram_to_return;
82 // static
83 const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
84 const BucketRanges* ranges) {
85 DCHECK(ranges->HasValidChecksum());
86 scoped_ptr<const BucketRanges> ranges_deleter;
88 if (lock_ == NULL) {
89 ANNOTATE_LEAKING_OBJECT_PTR(ranges);
90 return ranges;
93 base::AutoLock auto_lock(*lock_);
94 if (ranges_ == NULL) {
95 ANNOTATE_LEAKING_OBJECT_PTR(ranges);
96 return ranges;
99 list<const BucketRanges*>* checksum_matching_list;
100 RangesMap::iterator ranges_it = ranges_->find(ranges->checksum());
101 if (ranges_->end() == ranges_it) {
102 // Add a new matching list to map.
103 checksum_matching_list = new list<const BucketRanges*>();
104 ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list);
105 (*ranges_)[ranges->checksum()] = checksum_matching_list;
106 } else {
107 checksum_matching_list = ranges_it->second;
110 list<const BucketRanges*>::iterator checksum_matching_list_it;
111 for (checksum_matching_list_it = checksum_matching_list->begin();
112 checksum_matching_list_it != checksum_matching_list->end();
113 ++checksum_matching_list_it) {
114 const BucketRanges* existing_ranges = *checksum_matching_list_it;
115 if (existing_ranges->Equals(ranges)) {
116 if (existing_ranges == ranges) {
117 return ranges;
118 } else {
119 ranges_deleter.reset(ranges);
120 return existing_ranges;
124 // We haven't found a BucketRanges which has the same ranges. Register the
125 // new BucketRanges.
126 checksum_matching_list->push_front(ranges);
127 return ranges;
130 // static
131 void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
132 std::string* output) {
133 if (!IsActive())
134 return;
136 Histograms snapshot;
137 GetSnapshot(query, &snapshot);
138 for (Histograms::iterator it = snapshot.begin();
139 it != snapshot.end();
140 ++it) {
141 (*it)->WriteHTMLGraph(output);
142 output->append("<br><hr><br>");
146 // static
147 void StatisticsRecorder::WriteGraph(const std::string& query,
148 std::string* output) {
149 if (!IsActive())
150 return;
151 if (query.length())
152 StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
153 else
154 output->append("Collections of all histograms\n");
156 Histograms snapshot;
157 GetSnapshot(query, &snapshot);
158 for (Histograms::iterator it = snapshot.begin();
159 it != snapshot.end();
160 ++it) {
161 (*it)->WriteAscii(output);
162 output->append("\n");
166 // static
167 std::string StatisticsRecorder::ToJSON(const std::string& query) {
168 if (!IsActive())
169 return std::string();
171 std::string output("{");
172 if (!query.empty()) {
173 output += "\"query\":";
174 EscapeJSONString(query, true, &output);
175 output += ",";
178 Histograms snapshot;
179 GetSnapshot(query, &snapshot);
180 output += "\"histograms\":[";
181 bool first_histogram = true;
182 for (Histograms::const_iterator it = snapshot.begin(); it != snapshot.end();
183 ++it) {
184 if (first_histogram)
185 first_histogram = false;
186 else
187 output += ",";
188 std::string json;
189 (*it)->WriteJSON(&json);
190 output += json;
192 output += "]}";
193 return output;
196 // static
197 void StatisticsRecorder::GetHistograms(Histograms* output) {
198 if (lock_ == NULL)
199 return;
200 base::AutoLock auto_lock(*lock_);
201 if (histograms_ == NULL)
202 return;
204 for (HistogramMap::iterator it = histograms_->begin();
205 histograms_->end() != it;
206 ++it) {
207 DCHECK_EQ(it->first, it->second->histogram_name());
208 output->push_back(it->second);
212 // static
213 void StatisticsRecorder::GetBucketRanges(
214 std::vector<const BucketRanges*>* output) {
215 if (lock_ == NULL)
216 return;
217 base::AutoLock auto_lock(*lock_);
218 if (ranges_ == NULL)
219 return;
221 for (RangesMap::iterator it = ranges_->begin();
222 ranges_->end() != it;
223 ++it) {
224 list<const BucketRanges*>* ranges_list = it->second;
225 list<const BucketRanges*>::iterator ranges_list_it;
226 for (ranges_list_it = ranges_list->begin();
227 ranges_list_it != ranges_list->end();
228 ++ranges_list_it) {
229 output->push_back(*ranges_list_it);
234 // static
235 HistogramBase* StatisticsRecorder::FindHistogram(const std::string& name) {
236 if (lock_ == NULL)
237 return NULL;
238 base::AutoLock auto_lock(*lock_);
239 if (histograms_ == NULL)
240 return NULL;
242 HistogramMap::iterator it = histograms_->find(name);
243 if (histograms_->end() == it)
244 return NULL;
245 return it->second;
248 // private static
249 void StatisticsRecorder::GetSnapshot(const std::string& query,
250 Histograms* snapshot) {
251 if (lock_ == NULL)
252 return;
253 base::AutoLock auto_lock(*lock_);
254 if (histograms_ == NULL)
255 return;
257 for (HistogramMap::iterator it = histograms_->begin();
258 histograms_->end() != it;
259 ++it) {
260 if (it->first.find(query) != std::string::npos)
261 snapshot->push_back(it->second);
265 // This singleton instance should be started during the single threaded portion
266 // of main(), and hence it is not thread safe. It initializes globals to
267 // provide support for all future calls.
268 StatisticsRecorder::StatisticsRecorder() {
269 DCHECK(!histograms_);
270 if (lock_ == NULL) {
271 // This will leak on purpose. It's the only way to make sure we won't race
272 // against the static uninitialization of the module while one of our
273 // static methods relying on the lock get called at an inappropriate time
274 // during the termination phase. Since it's a static data member, we will
275 // leak one per process, which would be similar to the instance allocated
276 // during static initialization and released only on process termination.
277 lock_ = new base::Lock;
279 base::AutoLock auto_lock(*lock_);
280 histograms_ = new HistogramMap;
281 ranges_ = new RangesMap;
283 if (VLOG_IS_ON(1))
284 AtExitManager::RegisterCallback(&DumpHistogramsToVlog, this);
287 // static
288 void StatisticsRecorder::DumpHistogramsToVlog(void* instance) {
289 DCHECK(VLOG_IS_ON(1));
291 string output;
292 StatisticsRecorder::WriteGraph(std::string(), &output);
293 VLOG(1) << output;
296 StatisticsRecorder::~StatisticsRecorder() {
297 DCHECK(histograms_ && ranges_ && lock_);
299 // Clean up.
300 scoped_ptr<HistogramMap> histograms_deleter;
301 scoped_ptr<RangesMap> ranges_deleter;
302 // We don't delete lock_ on purpose to avoid having to properly protect
303 // against it going away after we checked for NULL in the static methods.
305 base::AutoLock auto_lock(*lock_);
306 histograms_deleter.reset(histograms_);
307 ranges_deleter.reset(ranges_);
308 histograms_ = NULL;
309 ranges_ = NULL;
311 // We are going to leak the histograms and the ranges.
315 // static
316 StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
317 // static
318 StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL;
319 // static
320 base::Lock* StatisticsRecorder::lock_ = NULL;
322 } // namespace base