1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/stats.h"
7 #include "base/format_macros.h"
8 #include "base/logging.h"
9 #include "base/metrics/bucket_ranges.h"
10 #include "base/metrics/histogram.h"
11 #include "base/metrics/histogram_samples.h"
12 #include "base/metrics/sample_vector.h"
13 #include "base/metrics/statistics_recorder.h"
14 #include "base/strings/string_util.h"
15 #include "base/strings/stringprintf.h"
19 const int32 kDiskSignature
= 0xF01427E0;
24 int data_sizes
[disk_cache::Stats::kDataSizesLength
];
25 int64 counters
[disk_cache::Stats::MAX_COUNTER
];
27 COMPILE_ASSERT(sizeof(OnDiskStats
) < 512, needs_more_than_2_blocks
);
29 // Returns the "floor" (as opposed to "ceiling") of log base 2 of number.
30 int LogBase2(int32 number
) {
31 unsigned int value
= static_cast<unsigned int>(number
);
32 const unsigned int mask
[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
33 const unsigned int s
[] = {1, 2, 4, 8, 16};
35 unsigned int result
= 0;
36 for (int i
= 4; i
>= 0; i
--) {
37 if (value
& mask
[i
]) {
42 return static_cast<int>(result
);
45 // WARNING: Add new stats only at the end, or change LoadStats().
46 static const char* kCounterNames
[] = {
67 "Doom recent entries",
70 COMPILE_ASSERT(arraysize(kCounterNames
) == disk_cache::Stats::MAX_COUNTER
,
75 namespace disk_cache
{
77 bool VerifyStats(OnDiskStats
* stats
) {
78 if (stats
->signature
!= kDiskSignature
)
81 // We don't want to discard the whole cache every time we have one extra
82 // counter; we keep old data if we can.
83 if (static_cast<unsigned int>(stats
->size
) > sizeof(*stats
)) {
84 memset(stats
, 0, sizeof(*stats
));
85 stats
->signature
= kDiskSignature
;
86 } else if (static_cast<unsigned int>(stats
->size
) != sizeof(*stats
)) {
87 size_t delta
= sizeof(*stats
) - static_cast<unsigned int>(stats
->size
);
88 memset(reinterpret_cast<char*>(stats
) + stats
->size
, 0, delta
);
89 stats
->size
= sizeof(*stats
);
101 bool Stats::Init(void* data
, int num_bytes
, Addr address
) {
102 OnDiskStats local_stats
;
103 OnDiskStats
* stats
= &local_stats
;
105 memset(stats
, 0, sizeof(local_stats
));
106 local_stats
.signature
= kDiskSignature
;
107 local_stats
.size
= sizeof(local_stats
);
108 } else if (num_bytes
>= static_cast<int>(sizeof(*stats
))) {
109 stats
= reinterpret_cast<OnDiskStats
*>(data
);
110 if (!VerifyStats(stats
))
116 storage_addr_
= address
;
118 memcpy(data_sizes_
, stats
->data_sizes
, sizeof(data_sizes_
));
119 memcpy(counters_
, stats
->counters
, sizeof(counters_
));
121 // Clean up old value.
122 SetCounter(UNUSED
, 0);
126 void Stats::InitSizeHistogram() {
127 // Only generate this histogram for the main cache.
128 static bool first_time
= true;
135 int num_buckets
= 75;
136 base::BucketRanges
ranges(num_buckets
+ 1);
137 base::Histogram::InitializeBucketRanges(min
, max
, &ranges
);
139 base::HistogramBase
* stats_histogram
= base::Histogram::FactoryGet(
140 "DiskCache.SizeStats2", min
, max
, num_buckets
,
141 base::HistogramBase::kUmaTargetedHistogramFlag
);
143 base::SampleVector
samples(&ranges
);
144 for (int i
= 0; i
< kDataSizesLength
; i
++) {
145 // This is a good time to fix any inconsistent data. The count should be
146 // always positive, but if it's not, reset the value now.
147 if (data_sizes_
[i
] < 0)
150 samples
.Accumulate(GetBucketRange(i
) / 1024, data_sizes_
[i
]);
152 stats_histogram
->AddSamples(samples
);
155 int Stats::StorageSize() {
156 // If we have more than 512 bytes of counters, change kDiskSignature so we
157 // don't overwrite something else (LoadStats must fail).
158 COMPILE_ASSERT(sizeof(OnDiskStats
) <= 256 * 2, use_more_blocks
);
162 void Stats::ModifyStorageStats(int32 old_size
, int32 new_size
) {
163 // We keep a counter of the data block size on an array where each entry is
164 // the adjusted log base 2 of the size. The first entry counts blocks of 256
165 // bytes, the second blocks up to 512 bytes, etc. With 20 entries, the last
166 // one stores entries of more than 64 MB
167 int new_index
= GetStatsBucket(new_size
);
168 int old_index
= GetStatsBucket(old_size
);
171 data_sizes_
[new_index
]++;
174 data_sizes_
[old_index
]--;
177 void Stats::OnEvent(Counters an_event
) {
178 DCHECK(an_event
>= MIN_COUNTER
&& an_event
< MAX_COUNTER
);
179 counters_
[an_event
]++;
182 void Stats::SetCounter(Counters counter
, int64 value
) {
183 DCHECK(counter
>= MIN_COUNTER
&& counter
< MAX_COUNTER
);
184 counters_
[counter
] = value
;
187 int64
Stats::GetCounter(Counters counter
) const {
188 DCHECK(counter
>= MIN_COUNTER
&& counter
< MAX_COUNTER
);
189 return counters_
[counter
];
192 void Stats::GetItems(StatsItems
* items
) {
193 std::pair
<std::string
, std::string
> item
;
194 for (int i
= 0; i
< kDataSizesLength
; i
++) {
195 item
.first
= base::StringPrintf("Size%02d", i
);
196 item
.second
= base::StringPrintf("0x%08x", data_sizes_
[i
]);
197 items
->push_back(item
);
200 for (int i
= MIN_COUNTER
; i
< MAX_COUNTER
; i
++) {
201 item
.first
= kCounterNames
[i
];
202 item
.second
= base::StringPrintf("0x%" PRIx64
, counters_
[i
]);
203 items
->push_back(item
);
207 int Stats::GetHitRatio() const {
208 return GetRatio(OPEN_HIT
, OPEN_MISS
);
211 int Stats::GetResurrectRatio() const {
212 return GetRatio(RESURRECT_HIT
, CREATE_HIT
);
215 void Stats::ResetRatios() {
216 SetCounter(OPEN_HIT
, 0);
217 SetCounter(OPEN_MISS
, 0);
218 SetCounter(RESURRECT_HIT
, 0);
219 SetCounter(CREATE_HIT
, 0);
222 int Stats::GetLargeEntriesSize() {
224 // data_sizes_[20] stores values between 512 KB and 1 MB (see comment before
225 // GetStatsBucket()).
226 for (int bucket
= 20; bucket
< kDataSizesLength
; bucket
++)
227 total
+= data_sizes_
[bucket
] * GetBucketRange(bucket
);
232 int Stats::SerializeStats(void* data
, int num_bytes
, Addr
* address
) {
233 OnDiskStats
* stats
= reinterpret_cast<OnDiskStats
*>(data
);
234 if (num_bytes
< static_cast<int>(sizeof(*stats
)))
237 stats
->signature
= kDiskSignature
;
238 stats
->size
= sizeof(*stats
);
239 memcpy(stats
->data_sizes
, data_sizes_
, sizeof(data_sizes_
));
240 memcpy(stats
->counters
, counters_
, sizeof(counters_
));
242 *address
= storage_addr_
;
243 return sizeof(*stats
);
246 int Stats::GetBucketRange(size_t i
) const {
248 return static_cast<int>(1024 * i
);
251 return static_cast<int>(2048 * (i
- 1));
254 return static_cast<int>(4096 * (i
- 11)) + 20 * 1024;
257 if (i
> static_cast<size_t>(kDataSizesLength
)) {
259 i
= kDataSizesLength
;
267 // The array will be filled this way:
288 int Stats::GetStatsBucket(int32 size
) {
292 // 10 slots more, until 20K.
293 if (size
< 20 * 1024)
294 return size
/ 2048 + 1;
296 // 5 slots more, from 20K to 40K.
297 if (size
< 40 * 1024)
298 return (size
- 20 * 1024) / 4096 + 11;
300 // From this point on, use a logarithmic scale.
301 int result
= LogBase2(size
) + 1;
303 COMPILE_ASSERT(kDataSizesLength
> 16, update_the_scale
);
304 if (result
>= kDataSizesLength
)
305 result
= kDataSizesLength
- 1;
310 int Stats::GetRatio(Counters hit
, Counters miss
) const {
311 int64 ratio
= GetCounter(hit
) * 100;
315 ratio
/= (GetCounter(hit
) + GetCounter(miss
));
316 return static_cast<int>(ratio
);
319 } // namespace disk_cache