1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/stats.h"
7 #include "base/format_macros.h"
8 #include "base/logging.h"
9 #include "base/metrics/bucket_ranges.h"
10 #include "base/metrics/histogram.h"
11 #include "base/metrics/histogram_samples.h"
12 #include "base/metrics/sample_vector.h"
13 #include "base/metrics/statistics_recorder.h"
14 #include "base/strings/string_util.h"
15 #include "base/strings/stringprintf.h"
19 const int32 kDiskSignature
= 0xF01427E0;
24 int data_sizes
[disk_cache::Stats::kDataSizesLength
];
25 int64 counters
[disk_cache::Stats::MAX_COUNTER
];
27 static_assert(sizeof(OnDiskStats
) < 512, "needs more than 2 blocks");
29 // Returns the "floor" (as opposed to "ceiling") of log base 2 of number.
30 int LogBase2(int32 number
) {
31 unsigned int value
= static_cast<unsigned int>(number
);
32 const unsigned int mask
[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
33 const unsigned int s
[] = {1, 2, 4, 8, 16};
35 unsigned int result
= 0;
36 for (int i
= 4; i
>= 0; i
--) {
37 if (value
& mask
[i
]) {
42 return static_cast<int>(result
);
45 // WARNING: Add new stats only at the end, or change LoadStats().
46 const char* const kCounterNames
[] = {
67 "Doom recent entries",
70 static_assert(arraysize(kCounterNames
) == disk_cache::Stats::MAX_COUNTER
,
75 namespace disk_cache
{
77 bool VerifyStats(OnDiskStats
* stats
) {
78 if (stats
->signature
!= kDiskSignature
)
81 // We don't want to discard the whole cache every time we have one extra
82 // counter; we keep old data if we can.
83 if (static_cast<unsigned int>(stats
->size
) > sizeof(*stats
)) {
84 memset(stats
, 0, sizeof(*stats
));
85 stats
->signature
= kDiskSignature
;
86 } else if (static_cast<unsigned int>(stats
->size
) != sizeof(*stats
)) {
87 size_t delta
= sizeof(*stats
) - static_cast<unsigned int>(stats
->size
);
88 memset(reinterpret_cast<char*>(stats
) + stats
->size
, 0, delta
);
89 stats
->size
= sizeof(*stats
);
101 bool Stats::Init(void* data
, int num_bytes
, Addr address
) {
102 OnDiskStats local_stats
;
103 OnDiskStats
* stats
= &local_stats
;
105 memset(stats
, 0, sizeof(local_stats
));
106 local_stats
.signature
= kDiskSignature
;
107 local_stats
.size
= sizeof(local_stats
);
108 } else if (num_bytes
>= static_cast<int>(sizeof(*stats
))) {
109 stats
= reinterpret_cast<OnDiskStats
*>(data
);
110 if (!VerifyStats(stats
)) {
111 memset(&local_stats
, 0, sizeof(local_stats
));
112 if (memcmp(stats
, &local_stats
, sizeof(local_stats
))) {
115 // The storage is empty which means that SerializeStats() was never
116 // called on the last run. Just re-initialize everything.
117 local_stats
.signature
= kDiskSignature
;
118 local_stats
.size
= sizeof(local_stats
);
119 stats
= &local_stats
;
126 storage_addr_
= address
;
128 memcpy(data_sizes_
, stats
->data_sizes
, sizeof(data_sizes_
));
129 memcpy(counters_
, stats
->counters
, sizeof(counters_
));
131 // Clean up old value.
132 SetCounter(UNUSED
, 0);
136 void Stats::InitSizeHistogram() {
137 // Only generate this histogram for the main cache.
138 static bool first_time
= true;
145 int num_buckets
= 75;
146 base::BucketRanges
ranges(num_buckets
+ 1);
147 base::Histogram::InitializeBucketRanges(min
, max
, &ranges
);
149 base::HistogramBase
* stats_histogram
= base::Histogram::FactoryGet(
150 "DiskCache.SizeStats2", min
, max
, num_buckets
,
151 base::HistogramBase::kUmaTargetedHistogramFlag
);
153 base::SampleVector
samples(&ranges
);
154 for (int i
= 0; i
< kDataSizesLength
; i
++) {
155 // This is a good time to fix any inconsistent data. The count should be
156 // always positive, but if it's not, reset the value now.
157 if (data_sizes_
[i
] < 0)
160 samples
.Accumulate(GetBucketRange(i
) / 1024, data_sizes_
[i
]);
162 stats_histogram
->AddSamples(samples
);
165 int Stats::StorageSize() {
166 // If we have more than 512 bytes of counters, change kDiskSignature so we
167 // don't overwrite something else (LoadStats must fail).
168 static_assert(sizeof(OnDiskStats
) <= 256 * 2, "use more blocks");
172 void Stats::ModifyStorageStats(int32 old_size
, int32 new_size
) {
173 // We keep a counter of the data block size on an array where each entry is
174 // the adjusted log base 2 of the size. The first entry counts blocks of 256
175 // bytes, the second blocks up to 512 bytes, etc. With 20 entries, the last
176 // one stores entries of more than 64 MB
177 int new_index
= GetStatsBucket(new_size
);
178 int old_index
= GetStatsBucket(old_size
);
181 data_sizes_
[new_index
]++;
184 data_sizes_
[old_index
]--;
187 void Stats::OnEvent(Counters an_event
) {
188 DCHECK(an_event
>= MIN_COUNTER
&& an_event
< MAX_COUNTER
);
189 counters_
[an_event
]++;
192 void Stats::SetCounter(Counters counter
, int64 value
) {
193 DCHECK(counter
>= MIN_COUNTER
&& counter
< MAX_COUNTER
);
194 counters_
[counter
] = value
;
197 int64
Stats::GetCounter(Counters counter
) const {
198 DCHECK(counter
>= MIN_COUNTER
&& counter
< MAX_COUNTER
);
199 return counters_
[counter
];
202 void Stats::GetItems(StatsItems
* items
) {
203 std::pair
<std::string
, std::string
> item
;
204 for (int i
= 0; i
< kDataSizesLength
; i
++) {
205 item
.first
= base::StringPrintf("Size%02d", i
);
206 item
.second
= base::StringPrintf("0x%08x", data_sizes_
[i
]);
207 items
->push_back(item
);
210 for (int i
= MIN_COUNTER
; i
< MAX_COUNTER
; i
++) {
211 item
.first
= kCounterNames
[i
];
212 item
.second
= base::StringPrintf("0x%" PRIx64
, counters_
[i
]);
213 items
->push_back(item
);
217 int Stats::GetHitRatio() const {
218 return GetRatio(OPEN_HIT
, OPEN_MISS
);
221 int Stats::GetResurrectRatio() const {
222 return GetRatio(RESURRECT_HIT
, CREATE_HIT
);
225 void Stats::ResetRatios() {
226 SetCounter(OPEN_HIT
, 0);
227 SetCounter(OPEN_MISS
, 0);
228 SetCounter(RESURRECT_HIT
, 0);
229 SetCounter(CREATE_HIT
, 0);
232 int Stats::GetLargeEntriesSize() {
234 // data_sizes_[20] stores values between 512 KB and 1 MB (see comment before
235 // GetStatsBucket()).
236 for (int bucket
= 20; bucket
< kDataSizesLength
; bucket
++)
237 total
+= data_sizes_
[bucket
] * GetBucketRange(bucket
);
242 int Stats::SerializeStats(void* data
, int num_bytes
, Addr
* address
) {
243 OnDiskStats
* stats
= reinterpret_cast<OnDiskStats
*>(data
);
244 if (num_bytes
< static_cast<int>(sizeof(*stats
)))
247 stats
->signature
= kDiskSignature
;
248 stats
->size
= sizeof(*stats
);
249 memcpy(stats
->data_sizes
, data_sizes_
, sizeof(data_sizes_
));
250 memcpy(stats
->counters
, counters_
, sizeof(counters_
));
252 *address
= storage_addr_
;
253 return sizeof(*stats
);
256 int Stats::GetBucketRange(size_t i
) const {
258 return static_cast<int>(1024 * i
);
261 return static_cast<int>(2048 * (i
- 1));
264 return static_cast<int>(4096 * (i
- 11)) + 20 * 1024;
267 if (i
> static_cast<size_t>(kDataSizesLength
)) {
269 i
= kDataSizesLength
;
277 // The array will be filled this way:
298 int Stats::GetStatsBucket(int32 size
) {
302 // 10 slots more, until 20K.
303 if (size
< 20 * 1024)
304 return size
/ 2048 + 1;
306 // 5 slots more, from 20K to 40K.
307 if (size
< 40 * 1024)
308 return (size
- 20 * 1024) / 4096 + 11;
310 // From this point on, use a logarithmic scale.
311 int result
= LogBase2(size
) + 1;
313 static_assert(kDataSizesLength
> 16, "update the scale");
314 if (result
>= kDataSizesLength
)
315 result
= kDataSizesLength
- 1;
320 int Stats::GetRatio(Counters hit
, Counters miss
) const {
321 int64 ratio
= GetCounter(hit
) * 100;
325 ratio
/= (GetCounter(hit
) + GetCounter(miss
));
326 return static_cast<int>(ratio
);
329 } // namespace disk_cache