Update V8 to version 4.7.53.
[chromium-blink-merge.git] / net / disk_cache / blockfile / stats.cc
blobbb319f55b152314f2ba39ac05c522d9290ed81df
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/stats.h"
7 #include "base/format_macros.h"
8 #include "base/logging.h"
9 #include "base/metrics/bucket_ranges.h"
10 #include "base/metrics/histogram.h"
11 #include "base/metrics/histogram_samples.h"
12 #include "base/metrics/sample_vector.h"
13 #include "base/metrics/statistics_recorder.h"
14 #include "base/strings/string_util.h"
15 #include "base/strings/stringprintf.h"
17 namespace {
19 const int32 kDiskSignature = 0xF01427E0;
21 struct OnDiskStats {
22 int32 signature;
23 int size;
24 int data_sizes[disk_cache::Stats::kDataSizesLength];
25 int64 counters[disk_cache::Stats::MAX_COUNTER];
27 static_assert(sizeof(OnDiskStats) < 512, "needs more than 2 blocks");
29 // Returns the "floor" (as opposed to "ceiling") of log base 2 of number.
30 int LogBase2(int32 number) {
31 unsigned int value = static_cast<unsigned int>(number);
32 const unsigned int mask[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
33 const unsigned int s[] = {1, 2, 4, 8, 16};
35 unsigned int result = 0;
36 for (int i = 4; i >= 0; i--) {
37 if (value & mask[i]) {
38 value >>= s[i];
39 result |= s[i];
42 return static_cast<int>(result);
45 // WARNING: Add new stats only at the end, or change LoadStats().
46 const char* const kCounterNames[] = {
47 "Open miss",
48 "Open hit",
49 "Create miss",
50 "Create hit",
51 "Resurrect hit",
52 "Create error",
53 "Trim entry",
54 "Doom entry",
55 "Doom cache",
56 "Invalid entry",
57 "Open entries",
58 "Max entries",
59 "Timer",
60 "Read data",
61 "Write data",
62 "Open rankings",
63 "Get rankings",
64 "Fatal error",
65 "Last report",
66 "Last report timer",
67 "Doom recent entries",
68 "unused"
70 static_assert(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER,
71 "update the names");
73 } // namespace
75 namespace disk_cache {
77 bool VerifyStats(OnDiskStats* stats) {
78 if (stats->signature != kDiskSignature)
79 return false;
81 // We don't want to discard the whole cache every time we have one extra
82 // counter; we keep old data if we can.
83 if (static_cast<unsigned int>(stats->size) > sizeof(*stats)) {
84 memset(stats, 0, sizeof(*stats));
85 stats->signature = kDiskSignature;
86 } else if (static_cast<unsigned int>(stats->size) != sizeof(*stats)) {
87 size_t delta = sizeof(*stats) - static_cast<unsigned int>(stats->size);
88 memset(reinterpret_cast<char*>(stats) + stats->size, 0, delta);
89 stats->size = sizeof(*stats);
92 return true;
95 Stats::Stats() {
98 Stats::~Stats() {
101 bool Stats::Init(void* data, int num_bytes, Addr address) {
102 OnDiskStats local_stats;
103 OnDiskStats* stats = &local_stats;
104 if (!num_bytes) {
105 memset(stats, 0, sizeof(local_stats));
106 local_stats.signature = kDiskSignature;
107 local_stats.size = sizeof(local_stats);
108 } else if (num_bytes >= static_cast<int>(sizeof(*stats))) {
109 stats = reinterpret_cast<OnDiskStats*>(data);
110 if (!VerifyStats(stats)) {
111 memset(&local_stats, 0, sizeof(local_stats));
112 if (memcmp(stats, &local_stats, sizeof(local_stats))) {
113 return false;
114 } else {
115 // The storage is empty which means that SerializeStats() was never
116 // called on the last run. Just re-initialize everything.
117 local_stats.signature = kDiskSignature;
118 local_stats.size = sizeof(local_stats);
119 stats = &local_stats;
122 } else {
123 return false;
126 storage_addr_ = address;
128 memcpy(data_sizes_, stats->data_sizes, sizeof(data_sizes_));
129 memcpy(counters_, stats->counters, sizeof(counters_));
131 // Clean up old value.
132 SetCounter(UNUSED, 0);
133 return true;
136 void Stats::InitSizeHistogram() {
137 // Only generate this histogram for the main cache.
138 static bool first_time = true;
139 if (!first_time)
140 return;
142 first_time = false;
143 int min = 1;
144 int max = 64 * 1024;
145 int num_buckets = 75;
146 base::BucketRanges ranges(num_buckets + 1);
147 base::Histogram::InitializeBucketRanges(min, max, &ranges);
149 base::HistogramBase* stats_histogram = base::Histogram::FactoryGet(
150 "DiskCache.SizeStats2", min, max, num_buckets,
151 base::HistogramBase::kUmaTargetedHistogramFlag);
153 base::SampleVector samples(&ranges);
154 for (int i = 0; i < kDataSizesLength; i++) {
155 // This is a good time to fix any inconsistent data. The count should be
156 // always positive, but if it's not, reset the value now.
157 if (data_sizes_[i] < 0)
158 data_sizes_[i] = 0;
160 samples.Accumulate(GetBucketRange(i) / 1024, data_sizes_[i]);
162 stats_histogram->AddSamples(samples);
165 int Stats::StorageSize() {
166 // If we have more than 512 bytes of counters, change kDiskSignature so we
167 // don't overwrite something else (LoadStats must fail).
168 static_assert(sizeof(OnDiskStats) <= 256 * 2, "use more blocks");
169 return 256 * 2;
172 void Stats::ModifyStorageStats(int32 old_size, int32 new_size) {
173 // We keep a counter of the data block size on an array where each entry is
174 // the adjusted log base 2 of the size. The first entry counts blocks of 256
175 // bytes, the second blocks up to 512 bytes, etc. With 20 entries, the last
176 // one stores entries of more than 64 MB
177 int new_index = GetStatsBucket(new_size);
178 int old_index = GetStatsBucket(old_size);
180 if (new_size)
181 data_sizes_[new_index]++;
183 if (old_size)
184 data_sizes_[old_index]--;
187 void Stats::OnEvent(Counters an_event) {
188 DCHECK(an_event >= MIN_COUNTER && an_event < MAX_COUNTER);
189 counters_[an_event]++;
192 void Stats::SetCounter(Counters counter, int64 value) {
193 DCHECK(counter >= MIN_COUNTER && counter < MAX_COUNTER);
194 counters_[counter] = value;
197 int64 Stats::GetCounter(Counters counter) const {
198 DCHECK(counter >= MIN_COUNTER && counter < MAX_COUNTER);
199 return counters_[counter];
202 void Stats::GetItems(StatsItems* items) {
203 std::pair<std::string, std::string> item;
204 for (int i = 0; i < kDataSizesLength; i++) {
205 item.first = base::StringPrintf("Size%02d", i);
206 item.second = base::StringPrintf("0x%08x", data_sizes_[i]);
207 items->push_back(item);
210 for (int i = MIN_COUNTER; i < MAX_COUNTER; i++) {
211 item.first = kCounterNames[i];
212 item.second = base::StringPrintf("0x%" PRIx64, counters_[i]);
213 items->push_back(item);
217 int Stats::GetHitRatio() const {
218 return GetRatio(OPEN_HIT, OPEN_MISS);
221 int Stats::GetResurrectRatio() const {
222 return GetRatio(RESURRECT_HIT, CREATE_HIT);
225 void Stats::ResetRatios() {
226 SetCounter(OPEN_HIT, 0);
227 SetCounter(OPEN_MISS, 0);
228 SetCounter(RESURRECT_HIT, 0);
229 SetCounter(CREATE_HIT, 0);
232 int Stats::GetLargeEntriesSize() {
233 int total = 0;
234 // data_sizes_[20] stores values between 512 KB and 1 MB (see comment before
235 // GetStatsBucket()).
236 for (int bucket = 20; bucket < kDataSizesLength; bucket++)
237 total += data_sizes_[bucket] * GetBucketRange(bucket);
239 return total;
242 int Stats::SerializeStats(void* data, int num_bytes, Addr* address) {
243 OnDiskStats* stats = reinterpret_cast<OnDiskStats*>(data);
244 if (num_bytes < static_cast<int>(sizeof(*stats)))
245 return 0;
247 stats->signature = kDiskSignature;
248 stats->size = sizeof(*stats);
249 memcpy(stats->data_sizes, data_sizes_, sizeof(data_sizes_));
250 memcpy(stats->counters, counters_, sizeof(counters_));
252 *address = storage_addr_;
253 return sizeof(*stats);
256 int Stats::GetBucketRange(size_t i) const {
257 if (i < 2)
258 return static_cast<int>(1024 * i);
260 if (i < 12)
261 return static_cast<int>(2048 * (i - 1));
263 if (i < 17)
264 return static_cast<int>(4096 * (i - 11)) + 20 * 1024;
266 int n = 64 * 1024;
267 if (i > static_cast<size_t>(kDataSizesLength)) {
268 NOTREACHED();
269 i = kDataSizesLength;
272 i -= 17;
273 n <<= i;
274 return n;
277 // The array will be filled this way:
278 // index size
279 // 0 [0, 1024)
280 // 1 [1024, 2048)
281 // 2 [2048, 4096)
282 // 3 [4K, 6K)
283 // ...
284 // 10 [18K, 20K)
285 // 11 [20K, 24K)
286 // 12 [24k, 28K)
287 // ...
288 // 15 [36k, 40K)
289 // 16 [40k, 64K)
290 // 17 [64K, 128K)
291 // 18 [128K, 256K)
292 // ...
293 // 23 [4M, 8M)
294 // 24 [8M, 16M)
295 // 25 [16M, 32M)
296 // 26 [32M, 64M)
297 // 27 [64M, ...)
298 int Stats::GetStatsBucket(int32 size) {
299 if (size < 1024)
300 return 0;
302 // 10 slots more, until 20K.
303 if (size < 20 * 1024)
304 return size / 2048 + 1;
306 // 5 slots more, from 20K to 40K.
307 if (size < 40 * 1024)
308 return (size - 20 * 1024) / 4096 + 11;
310 // From this point on, use a logarithmic scale.
311 int result = LogBase2(size) + 1;
313 static_assert(kDataSizesLength > 16, "update the scale");
314 if (result >= kDataSizesLength)
315 result = kDataSizesLength - 1;
317 return result;
320 int Stats::GetRatio(Counters hit, Counters miss) const {
321 int64 ratio = GetCounter(hit) * 100;
322 if (!ratio)
323 return 0;
325 ratio /= (GetCounter(hit) + GetCounter(miss));
326 return static_cast<int>(ratio);
329 } // namespace disk_cache