1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/backend_worker_v3.h"
8 #include "base/bind_helpers.h"
9 #include "base/files/file_path.h"
10 #include "base/files/file_util.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/strings/string_util.h"
13 #include "base/strings/stringprintf.h"
14 #include "base/time/time.h"
15 #include "base/timer/timer.h"
16 #include "net/base/net_errors.h"
17 #include "net/disk_cache/blockfile/errors.h"
18 #include "net/disk_cache/blockfile/experiments.h"
19 #include "net/disk_cache/blockfile/file.h"
22 using base::TimeDelta
;
23 using base::TimeTicks
;
27 #if defined(V3_NOT_JUST_YET_READY)
29 const char kIndexName
[] = "index";
31 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
32 // Note that the actual target is to keep the index table load factor under 55%
34 const int k64kEntriesStore
= 240 * 1000 * 1000;
35 const int kBaseTableLen
= 64 * 1024;
36 const int kDefaultCacheSize
= 80 * 1024 * 1024;
38 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
39 const int kTrimDelay
= 10;
41 int DesiredIndexTableLen(int32 storage_size
) {
42 if (storage_size
<= k64kEntriesStore
)
44 if (storage_size
<= k64kEntriesStore
* 2)
45 return kBaseTableLen
* 2;
46 if (storage_size
<= k64kEntriesStore
* 4)
47 return kBaseTableLen
* 4;
48 if (storage_size
<= k64kEntriesStore
* 8)
49 return kBaseTableLen
* 8;
51 // The biggest storage_size for int32 requires a 4 MB table.
52 return kBaseTableLen
* 16;
55 int MaxStorageSizeForTable(int table_len
) {
56 return table_len
* (k64kEntriesStore
/ kBaseTableLen
);
59 size_t GetIndexSize(int table_len
) {
60 size_t table_size
= sizeof(disk_cache::CacheAddr
) * table_len
;
61 return sizeof(disk_cache::IndexHeader
) + table_size
;
64 // ------------------------------------------------------------------------
66 // Sets group for the current experiment. Returns false if the files should be
68 bool InitExperiment(disk_cache::IndexHeader
* header
, bool cache_created
) {
69 if (header
->experiment
== disk_cache::EXPERIMENT_OLD_FILE1
||
70 header
->experiment
== disk_cache::EXPERIMENT_OLD_FILE2
) {
71 // Discard current cache.
75 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
76 "ExperimentControl") {
78 header
->experiment
= disk_cache::EXPERIMENT_SIMPLE_CONTROL
;
80 } else if (header
->experiment
!= disk_cache::EXPERIMENT_SIMPLE_CONTROL
) {
85 header
->experiment
= disk_cache::NO_EXPERIMENT
;
88 #endif // defined(V3_NOT_JUST_YET_READY).
92 // ------------------------------------------------------------------------
94 namespace disk_cache
{
96 BackendImplV3::Worker::Worker(
97 const base::FilePath
& path
,
98 const scoped_refptr
<base::SingleThreadTaskRunner
>& main_thread
)
99 : path_(path
), block_files_(path
), init_(false) {
102 #if defined(V3_NOT_JUST_YET_READY)
104 int BackendImpl::SyncInit() {
105 #if defined(NET_BUILD_STRESS_CACHE)
106 // Start evictions right away.
107 up_ticks_
= kTrimDelay
* 2;
111 return net::ERR_FAILED
;
113 bool create_files
= false;
114 if (!InitBackingStore(&create_files
)) {
115 ReportError(ERR_STORAGE_ERROR
);
116 return net::ERR_FAILED
;
119 num_refs_
= num_pending_io_
= max_refs_
= 0;
120 entry_count_
= byte_count_
= 0;
124 trace_object_
= TraceObject::GetTraceObject();
125 // Create a recurrent timer of 30 secs.
126 int timer_delay
= unit_test_
? 1000 : 30000;
127 timer_
.reset(new base::RepeatingTimer
<BackendImpl
>());
128 timer_
->Start(FROM_HERE
, TimeDelta::FromMilliseconds(timer_delay
), this,
129 &BackendImpl::OnStatsTimer
);
135 if (data_
->header
.experiment
!= NO_EXPERIMENT
&&
136 cache_type_
!= net::DISK_CACHE
) {
137 // No experiment for other caches.
138 return net::ERR_FAILED
;
141 if (!(user_flags_
& kNoRandom
)) {
142 // The unit test controls directly what to test.
143 new_eviction_
= (cache_type_
== net::DISK_CACHE
);
147 ReportError(ERR_INIT_FAILED
);
148 return net::ERR_FAILED
;
151 if (!restarted_
&& (create_files
|| !data_
->header
.num_entries
))
152 ReportError(ERR_CACHE_CREATED
);
154 if (!(user_flags_
& kNoRandom
) && cache_type_
== net::DISK_CACHE
&&
155 !InitExperiment(&data_
->header
, create_files
)) {
156 return net::ERR_FAILED
;
159 // We don't care if the value overflows. The only thing we care about is that
160 // the id cannot be zero, because that value is used as "not dirty".
161 // Increasing the value once per second gives us many years before we start
162 // having collisions.
163 data_
->header
.this_id
++;
164 if (!data_
->header
.this_id
)
165 data_
->header
.this_id
++;
167 bool previous_crash
= (data_
->header
.crash
!= 0);
168 data_
->header
.crash
= 1;
170 if (!block_files_
.Init(create_files
))
171 return net::ERR_FAILED
;
173 // We want to minimize the changes to cache for an AppCache.
174 if (cache_type() == net::APP_CACHE
) {
175 DCHECK(!new_eviction_
);
177 } else if (cache_type() == net::SHADER_CACHE
) {
178 DCHECK(!new_eviction_
);
181 eviction_
.Init(this);
183 // stats_ and rankings_ may end up calling back to us so we better be enabled.
186 return net::ERR_FAILED
;
188 disabled_
= !rankings_
.Init(this, new_eviction_
);
190 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
191 trace_object_
->EnableTracing(false);
192 int sc
= SelfCheck();
193 if (sc
< 0 && sc
!= ERR_NUM_ENTRIES_MISMATCH
)
195 trace_object_
->EnableTracing(true);
198 if (previous_crash
) {
199 ReportError(ERR_PREVIOUS_CRASH
);
200 } else if (!restarted_
) {
201 ReportError(ERR_NO_ERROR
);
206 return disabled_
? net::ERR_FAILED
: net::OK
;
209 void BackendImpl::PrepareForRestart() {
210 // Reset the mask_ if it was not given by the user.
211 if (!(user_flags_
& kMask
))
214 if (!(user_flags_
& kNewEviction
))
215 new_eviction_
= false;
218 data_
->header
.crash
= 0;
222 block_files_
.CloseFiles();
228 BackendImpl::~BackendImpl() {
229 if (user_flags_
& kNoRandom
) {
230 // This is a unit test, so we want to be strict about not leaking entries
231 // and completing all the work.
232 background_queue_
.WaitForPendingIO();
234 // This is most likely not a test, so we want to do as little work as
235 // possible at this time, at the price of leaving dirty entries behind.
236 background_queue_
.DropPendingIO();
239 if (background_queue_
.BackgroundIsCurrentThread()) {
240 // Unit tests may use the same thread for everything.
243 background_queue_
.background_thread()->PostTask(
244 FROM_HERE
, base::Bind(&FinalCleanupCallback
, base::Unretained(this)));
245 // http://crbug.com/74623
246 base::ThreadRestrictions::ScopedAllowWait allow_wait
;
251 void BackendImpl::CleanupCache() {
252 Trace("Backend Cleanup");
259 data_
->header
.crash
= 0;
261 if (user_flags_
& kNoRandom
) {
262 // This is a net_unittest, verify that we are not 'leaking' entries.
263 File::WaitForPendingIO(&num_pending_io_
);
266 File::DropPendingIO();
269 block_files_
.CloseFiles();
272 ptr_factory_
.InvalidateWeakPtrs();
276 base::FilePath
BackendImpl::GetFileName(Addr address
) const {
277 if (!address
.is_separate_file() || !address
.is_initialized()) {
279 return base::FilePath();
282 std::string tmp
= base::StringPrintf("f_%06x", address
.FileNumber());
283 return path_
.AppendASCII(tmp
);
286 // We just created a new file so we're going to write the header and set the
287 // file length to include the hash table (zero filled).
288 bool BackendImpl::CreateBackingStore(disk_cache::File
* file
) {
289 AdjustMaxCacheSize(0);
292 header
.table_len
= DesiredIndexTableLen(max_size_
);
294 // We need file version 2.1 for the new eviction algorithm.
296 header
.version
= 0x20001;
298 header
.create_time
= Time::Now().ToInternalValue();
300 if (!file
->Write(&header
, sizeof(header
), 0))
303 return file
->SetLength(GetIndexSize(header
.table_len
));
306 bool BackendImpl::InitBackingStore(bool* file_created
) {
307 if (!base::CreateDirectory(path_
))
310 base::FilePath index_name
= path_
.AppendASCII(kIndexName
);
312 int flags
= base::PLATFORM_FILE_READ
|
313 base::PLATFORM_FILE_WRITE
|
314 base::PLATFORM_FILE_OPEN_ALWAYS
|
315 base::PLATFORM_FILE_EXCLUSIVE_WRITE
;
316 scoped_refptr
<disk_cache::File
> file(new disk_cache::File(
317 base::CreatePlatformFile(index_name
, flags
, file_created
, NULL
)));
319 if (!file
->IsValid())
324 ret
= CreateBackingStore(file
.get());
330 index_
= new MappedFile();
331 data_
= reinterpret_cast<Index
*>(index_
->Init(index_name
, 0));
333 LOG(ERROR
) << "Unable to map Index file";
337 if (index_
->GetLength() < sizeof(Index
)) {
338 // We verify this again on CheckIndex() but it's easier to make sure now
339 // that the header is there.
340 LOG(ERROR
) << "Corrupt Index file";
347 void BackendImpl::ReportError(int error
) {
348 STRESS_DCHECK(!error
|| error
== ERR_PREVIOUS_CRASH
||
349 error
== ERR_CACHE_CREATED
);
351 // We transmit positive numbers, instead of direct error codes.
353 CACHE_UMA(CACHE_ERROR
, "Error", 0, error
* -1);
357 bool BackendImpl::CheckIndex() {
360 size_t current_size
= index_
->GetLength();
361 if (current_size
< sizeof(Index
)) {
362 LOG(ERROR
) << "Corrupt Index file";
367 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
368 if (kIndexMagic
!= data_
->header
.magic
||
369 kCurrentVersion
>> 16 != data_
->header
.version
>> 16) {
370 LOG(ERROR
) << "Invalid file version or magic";
373 if (kCurrentVersion
== data_
->header
.version
) {
374 // We need file version 2.1 for the new eviction algorithm.
378 if (kIndexMagic
!= data_
->header
.magic
||
379 kCurrentVersion
!= data_
->header
.version
) {
380 LOG(ERROR
) << "Invalid file version or magic";
385 if (!data_
->header
.table_len
) {
386 LOG(ERROR
) << "Invalid table size";
390 if (current_size
< GetIndexSize(data_
->header
.table_len
) ||
391 data_
->header
.table_len
& (kBaseTableLen
- 1)) {
392 LOG(ERROR
) << "Corrupt Index file";
396 AdjustMaxCacheSize(data_
->header
.table_len
);
398 #if !defined(NET_BUILD_STRESS_CACHE)
399 if (data_
->header
.num_bytes
< 0 ||
400 (max_size_
< kint32max
- kDefaultCacheSize
&&
401 data_
->header
.num_bytes
> max_size_
+ kDefaultCacheSize
)) {
402 LOG(ERROR
) << "Invalid cache (current) size";
407 if (data_
->header
.num_entries
< 0) {
408 LOG(ERROR
) << "Invalid number of entries";
413 mask_
= data_
->header
.table_len
- 1;
415 // Load the table into memory with a single read.
416 scoped_ptr
<char[]> buf(new char[current_size
]);
417 return index_
->Read(buf
.get(), current_size
, 0);
420 bool BackendImpl::InitStats() {
421 Addr
address(data_
->header
.stats
);
422 int size
= stats_
.StorageSize();
424 if (!address
.is_initialized()) {
425 FileType file_type
= Addr::RequiredFileType(size
);
426 DCHECK_NE(file_type
, EXTERNAL
);
427 int num_blocks
= Addr::RequiredBlocks(size
, file_type
);
429 if (!CreateBlock(file_type
, num_blocks
, &address
))
431 return stats_
.Init(NULL
, 0, address
);
434 if (!address
.is_block_file()) {
439 // Load the required data.
440 size
= address
.num_blocks() * address
.BlockSize();
441 MappedFile
* file
= File(address
);
445 scoped_ptr
<char[]> data(new char[size
]);
446 size_t offset
= address
.start_block() * address
.BlockSize() +
448 if (!file
->Read(data
.get(), size
, offset
))
451 if (!stats_
.Init(data
.get(), size
, address
))
453 if (cache_type_
== net::DISK_CACHE
&& ShouldReportAgain())
454 stats_
.InitSizeHistogram();
458 #endif // defined(V3_NOT_JUST_YET_READY).
460 int BackendImplV3::Worker::Init(const CompletionCallback
& callback
) {
461 return net::ERR_FAILED
;
464 BackendImplV3::Worker::~Worker() {
467 } // namespace disk_cache