1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/backend_impl.h"
8 #include "base/bind_helpers.h"
9 #include "base/file_util.h"
10 #include "base/files/file_path.h"
11 #include "base/hash.h"
12 #include "base/message_loop/message_loop.h"
13 #include "base/metrics/field_trial.h"
14 #include "base/metrics/histogram.h"
15 #include "base/metrics/stats_counters.h"
16 #include "base/rand_util.h"
17 #include "base/strings/string_util.h"
18 #include "base/strings/stringprintf.h"
19 #include "base/sys_info.h"
20 #include "base/threading/thread_restrictions.h"
21 #include "base/time/time.h"
22 #include "base/timer/timer.h"
23 #include "net/base/net_errors.h"
24 #include "net/disk_cache/cache_util.h"
25 #include "net/disk_cache/entry_impl.h"
26 #include "net/disk_cache/errors.h"
27 #include "net/disk_cache/experiments.h"
28 #include "net/disk_cache/file.h"
30 // This has to be defined before including histogram_macros.h from this file.
31 #define NET_DISK_CACHE_BACKEND_IMPL_CC_
32 #include "net/disk_cache/histogram_macros.h"
35 using base::TimeDelta
;
36 using base::TimeTicks
;
40 const char* kIndexName
= "index";
42 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
43 // Note that the actual target is to keep the index table load factor under 55%
45 const int k64kEntriesStore
= 240 * 1000 * 1000;
46 const int kBaseTableLen
= 64 * 1024;
47 const int kDefaultCacheSize
= 80 * 1024 * 1024;
49 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
50 const int kTrimDelay
= 10;
52 int DesiredIndexTableLen(int32 storage_size
) {
53 if (storage_size
<= k64kEntriesStore
)
55 if (storage_size
<= k64kEntriesStore
* 2)
56 return kBaseTableLen
* 2;
57 if (storage_size
<= k64kEntriesStore
* 4)
58 return kBaseTableLen
* 4;
59 if (storage_size
<= k64kEntriesStore
* 8)
60 return kBaseTableLen
* 8;
62 // The biggest storage_size for int32 requires a 4 MB table.
63 return kBaseTableLen
* 16;
66 int MaxStorageSizeForTable(int table_len
) {
67 return table_len
* (k64kEntriesStore
/ kBaseTableLen
);
70 size_t GetIndexSize(int table_len
) {
71 size_t table_size
= sizeof(disk_cache::CacheAddr
) * table_len
;
72 return sizeof(disk_cache::IndexHeader
) + table_size
;
75 // ------------------------------------------------------------------------
77 // Sets group for the current experiment. Returns false if the files should be
79 bool InitExperiment(disk_cache::IndexHeader
* header
, bool cache_created
) {
80 if (header
->experiment
== disk_cache::EXPERIMENT_OLD_FILE1
||
81 header
->experiment
== disk_cache::EXPERIMENT_OLD_FILE2
) {
82 // Discard current cache.
86 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
87 "ExperimentControl") {
89 header
->experiment
= disk_cache::EXPERIMENT_SIMPLE_CONTROL
;
91 } else if (header
->experiment
!= disk_cache::EXPERIMENT_SIMPLE_CONTROL
) {
96 header
->experiment
= disk_cache::NO_EXPERIMENT
;
102 // ------------------------------------------------------------------------
104 namespace disk_cache
{
106 BackendImpl::BackendImpl(const base::FilePath
& path
,
107 base::MessageLoopProxy
* cache_thread
,
108 net::NetLog
* net_log
)
109 : background_queue_(this, cache_thread
),
115 cache_type_(net::DISK_CACHE
),
123 new_eviction_(false),
131 int BackendImpl::SyncInit() {
132 #if defined(NET_BUILD_STRESS_CACHE)
133 // Start evictions right away.
134 up_ticks_
= kTrimDelay
* 2;
138 return net::ERR_FAILED
;
140 bool create_files
= false;
141 if (!InitBackingStore(&create_files
)) {
142 ReportError(ERR_STORAGE_ERROR
);
143 return net::ERR_FAILED
;
146 num_refs_
= num_pending_io_
= max_refs_
= 0;
147 entry_count_
= byte_count_
= 0;
151 trace_object_
= TraceObject::GetTraceObject();
152 // Create a recurrent timer of 30 secs.
153 int timer_delay
= unit_test_
? 1000 : 30000;
154 timer_
.reset(new base::RepeatingTimer
<BackendImpl
>());
155 timer_
->Start(FROM_HERE
, TimeDelta::FromMilliseconds(timer_delay
), this,
156 &BackendImpl::OnStatsTimer
);
162 if (data_
->header
.experiment
!= NO_EXPERIMENT
&&
163 cache_type_
!= net::DISK_CACHE
) {
164 // No experiment for other caches.
165 return net::ERR_FAILED
;
168 if (!(user_flags_
& kNoRandom
)) {
169 // The unit test controls directly what to test.
170 new_eviction_
= (cache_type_
== net::DISK_CACHE
);
174 ReportError(ERR_INIT_FAILED
);
175 return net::ERR_FAILED
;
178 if (!restarted_
&& (create_files
|| !data_
->header
.num_entries
))
179 ReportError(ERR_CACHE_CREATED
);
181 if (!(user_flags_
& kNoRandom
) && cache_type_
== net::DISK_CACHE
&&
182 !InitExperiment(&data_
->header
, create_files
)) {
183 return net::ERR_FAILED
;
186 // We don't care if the value overflows. The only thing we care about is that
187 // the id cannot be zero, because that value is used as "not dirty".
188 // Increasing the value once per second gives us many years before we start
189 // having collisions.
190 data_
->header
.this_id
++;
191 if (!data_
->header
.this_id
)
192 data_
->header
.this_id
++;
194 bool previous_crash
= (data_
->header
.crash
!= 0);
195 data_
->header
.crash
= 1;
197 if (!block_files_
.Init(create_files
))
198 return net::ERR_FAILED
;
200 // We want to minimize the changes to cache for an AppCache.
201 if (cache_type() == net::APP_CACHE
) {
202 DCHECK(!new_eviction_
);
204 } else if (cache_type() == net::SHADER_CACHE
) {
205 DCHECK(!new_eviction_
);
208 eviction_
.Init(this);
210 // stats_ and rankings_ may end up calling back to us so we better be enabled.
213 return net::ERR_FAILED
;
215 disabled_
= !rankings_
.Init(this, new_eviction_
);
217 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
218 trace_object_
->EnableTracing(false);
219 int sc
= SelfCheck();
220 if (sc
< 0 && sc
!= ERR_NUM_ENTRIES_MISMATCH
)
222 trace_object_
->EnableTracing(true);
225 if (previous_crash
) {
226 ReportError(ERR_PREVIOUS_CRASH
);
227 } else if (!restarted_
) {
228 ReportError(ERR_NO_ERROR
);
233 return disabled_
? net::ERR_FAILED
: net::OK
;
236 void BackendImpl::PrepareForRestart() {
237 // Reset the mask_ if it was not given by the user.
238 if (!(user_flags_
& kMask
))
241 if (!(user_flags_
& kNewEviction
))
242 new_eviction_
= false;
245 data_
->header
.crash
= 0;
249 block_files_
.CloseFiles();
255 BackendImpl::~BackendImpl() {
256 if (user_flags_
& kNoRandom
) {
257 // This is a unit test, so we want to be strict about not leaking entries
258 // and completing all the work.
259 background_queue_
.WaitForPendingIO();
261 // This is most likely not a test, so we want to do as little work as
262 // possible at this time, at the price of leaving dirty entries behind.
263 background_queue_
.DropPendingIO();
266 if (background_queue_
.BackgroundIsCurrentThread()) {
267 // Unit tests may use the same thread for everything.
270 background_queue_
.background_thread()->PostTask(
271 FROM_HERE
, base::Bind(&FinalCleanupCallback
, base::Unretained(this)));
272 // http://crbug.com/74623
273 base::ThreadRestrictions::ScopedAllowWait allow_wait
;
278 void BackendImpl::CleanupCache() {
279 Trace("Backend Cleanup");
286 data_
->header
.crash
= 0;
288 if (user_flags_
& kNoRandom
) {
289 // This is a net_unittest, verify that we are not 'leaking' entries.
290 File::WaitForPendingIO(&num_pending_io_
);
293 File::DropPendingIO();
296 block_files_
.CloseFiles();
299 ptr_factory_
.InvalidateWeakPtrs();
303 base::FilePath
BackendImpl::GetFileName(Addr address
) const {
304 if (!address
.is_separate_file() || !address
.is_initialized()) {
306 return base::FilePath();
309 std::string tmp
= base::StringPrintf("f_%06x", address
.FileNumber());
310 return path_
.AppendASCII(tmp
);
313 // We just created a new file so we're going to write the header and set the
314 // file length to include the hash table (zero filled).
315 bool BackendImpl::CreateBackingStore(disk_cache::File
* file
) {
316 AdjustMaxCacheSize(0);
319 header
.table_len
= DesiredIndexTableLen(max_size_
);
321 // We need file version 2.1 for the new eviction algorithm.
323 header
.version
= 0x20001;
325 header
.create_time
= Time::Now().ToInternalValue();
327 if (!file
->Write(&header
, sizeof(header
), 0))
330 return file
->SetLength(GetIndexSize(header
.table_len
));
333 bool BackendImpl::InitBackingStore(bool* file_created
) {
334 if (!file_util::CreateDirectory(path_
))
337 base::FilePath index_name
= path_
.AppendASCII(kIndexName
);
339 int flags
= base::PLATFORM_FILE_READ
|
340 base::PLATFORM_FILE_WRITE
|
341 base::PLATFORM_FILE_OPEN_ALWAYS
|
342 base::PLATFORM_FILE_EXCLUSIVE_WRITE
;
343 scoped_refptr
<disk_cache::File
> file(new disk_cache::File(
344 base::CreatePlatformFile(index_name
, flags
, file_created
, NULL
)));
346 if (!file
->IsValid())
351 ret
= CreateBackingStore(file
.get());
357 index_
= new MappedFile();
358 data_
= reinterpret_cast<Index
*>(index_
->Init(index_name
, 0));
360 LOG(ERROR
) << "Unable to map Index file";
364 if (index_
->GetLength() < sizeof(Index
)) {
365 // We verify this again on CheckIndex() but it's easier to make sure now
366 // that the header is there.
367 LOG(ERROR
) << "Corrupt Index file";
374 void BackendImpl::ReportError(int error
) {
375 STRESS_DCHECK(!error
|| error
== ERR_PREVIOUS_CRASH
||
376 error
== ERR_CACHE_CREATED
);
378 // We transmit positive numbers, instead of direct error codes.
380 CACHE_UMA(CACHE_ERROR
, "Error", 0, error
* -1);
384 bool BackendImpl::CheckIndex() {
387 size_t current_size
= index_
->GetLength();
388 if (current_size
< sizeof(Index
)) {
389 LOG(ERROR
) << "Corrupt Index file";
394 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
395 if (kIndexMagic
!= data_
->header
.magic
||
396 kCurrentVersion
>> 16 != data_
->header
.version
>> 16) {
397 LOG(ERROR
) << "Invalid file version or magic";
400 if (kCurrentVersion
== data_
->header
.version
) {
401 // We need file version 2.1 for the new eviction algorithm.
405 if (kIndexMagic
!= data_
->header
.magic
||
406 kCurrentVersion
!= data_
->header
.version
) {
407 LOG(ERROR
) << "Invalid file version or magic";
412 if (!data_
->header
.table_len
) {
413 LOG(ERROR
) << "Invalid table size";
417 if (current_size
< GetIndexSize(data_
->header
.table_len
) ||
418 data_
->header
.table_len
& (kBaseTableLen
- 1)) {
419 LOG(ERROR
) << "Corrupt Index file";
423 AdjustMaxCacheSize(data_
->header
.table_len
);
425 #if !defined(NET_BUILD_STRESS_CACHE)
426 if (data_
->header
.num_bytes
< 0 ||
427 (max_size_
< kint32max
- kDefaultCacheSize
&&
428 data_
->header
.num_bytes
> max_size_
+ kDefaultCacheSize
)) {
429 LOG(ERROR
) << "Invalid cache (current) size";
434 if (data_
->header
.num_entries
< 0) {
435 LOG(ERROR
) << "Invalid number of entries";
440 mask_
= data_
->header
.table_len
- 1;
442 // Load the table into memory with a single read.
443 scoped_ptr
<char[]> buf(new char[current_size
]);
444 return index_
->Read(buf
.get(), current_size
, 0);
447 bool BackendImpl::InitStats() {
448 Addr
address(data_
->header
.stats
);
449 int size
= stats_
.StorageSize();
451 if (!address
.is_initialized()) {
452 FileType file_type
= Addr::RequiredFileType(size
);
453 DCHECK_NE(file_type
, EXTERNAL
);
454 int num_blocks
= Addr::RequiredBlocks(size
, file_type
);
456 if (!CreateBlock(file_type
, num_blocks
, &address
))
458 return stats_
.Init(NULL
, 0, address
);
461 if (!address
.is_block_file()) {
466 // Load the required data.
467 size
= address
.num_blocks() * address
.BlockSize();
468 MappedFile
* file
= File(address
);
472 scoped_ptr
<char[]> data(new char[size
]);
473 size_t offset
= address
.start_block() * address
.BlockSize() +
475 if (!file
->Read(data
.get(), size
, offset
))
478 if (!stats_
.Init(data
.get(), size
, address
))
480 if (cache_type_
== net::DISK_CACHE
&& ShouldReportAgain())
481 stats_
.InitSizeHistogram();
485 } // namespace disk_cache