1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/backend_worker_v3.h"
8 #include "base/bind_helpers.h"
9 #include "base/file_util.h"
10 #include "base/files/file_path.h"
11 #include "base/message_loop/message_loop.h"
12 #include "base/strings/string_util.h"
13 #include "base/strings/stringprintf.h"
14 #include "base/time/time.h"
15 #include "base/timer/timer.h"
16 #include "net/base/net_errors.h"
17 #include "net/disk_cache/blockfile/errors.h"
18 #include "net/disk_cache/blockfile/experiments.h"
19 #include "net/disk_cache/blockfile/file.h"
22 using base::TimeDelta
;
23 using base::TimeTicks
;
27 #if defined(V3_NOT_JUST_YET_READY)
29 const char* kIndexName
= "index";
31 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
32 // Note that the actual target is to keep the index table load factor under 55%
34 const int k64kEntriesStore
= 240 * 1000 * 1000;
35 const int kBaseTableLen
= 64 * 1024;
36 const int kDefaultCacheSize
= 80 * 1024 * 1024;
38 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
39 const int kTrimDelay
= 10;
41 int DesiredIndexTableLen(int32 storage_size
) {
42 if (storage_size
<= k64kEntriesStore
)
44 if (storage_size
<= k64kEntriesStore
* 2)
45 return kBaseTableLen
* 2;
46 if (storage_size
<= k64kEntriesStore
* 4)
47 return kBaseTableLen
* 4;
48 if (storage_size
<= k64kEntriesStore
* 8)
49 return kBaseTableLen
* 8;
51 // The biggest storage_size for int32 requires a 4 MB table.
52 return kBaseTableLen
* 16;
55 int MaxStorageSizeForTable(int table_len
) {
56 return table_len
* (k64kEntriesStore
/ kBaseTableLen
);
59 size_t GetIndexSize(int table_len
) {
60 size_t table_size
= sizeof(disk_cache::CacheAddr
) * table_len
;
61 return sizeof(disk_cache::IndexHeader
) + table_size
;
64 // ------------------------------------------------------------------------
66 // Sets group for the current experiment. Returns false if the files should be
68 bool InitExperiment(disk_cache::IndexHeader
* header
, bool cache_created
) {
69 if (header
->experiment
== disk_cache::EXPERIMENT_OLD_FILE1
||
70 header
->experiment
== disk_cache::EXPERIMENT_OLD_FILE2
) {
71 // Discard current cache.
75 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
76 "ExperimentControl") {
78 header
->experiment
= disk_cache::EXPERIMENT_SIMPLE_CONTROL
;
80 } else if (header
->experiment
!= disk_cache::EXPERIMENT_SIMPLE_CONTROL
) {
85 header
->experiment
= disk_cache::NO_EXPERIMENT
;
88 #endif // defined(V3_NOT_JUST_YET_READY).
92 // ------------------------------------------------------------------------
94 namespace disk_cache
{
96 BackendImplV3::Worker::Worker(const base::FilePath
& path
,
97 base::MessageLoopProxy
* main_thread
)
103 #if defined(V3_NOT_JUST_YET_READY)
105 int BackendImpl::SyncInit() {
106 #if defined(NET_BUILD_STRESS_CACHE)
107 // Start evictions right away.
108 up_ticks_
= kTrimDelay
* 2;
112 return net::ERR_FAILED
;
114 bool create_files
= false;
115 if (!InitBackingStore(&create_files
)) {
116 ReportError(ERR_STORAGE_ERROR
);
117 return net::ERR_FAILED
;
120 num_refs_
= num_pending_io_
= max_refs_
= 0;
121 entry_count_
= byte_count_
= 0;
125 trace_object_
= TraceObject::GetTraceObject();
126 // Create a recurrent timer of 30 secs.
127 int timer_delay
= unit_test_
? 1000 : 30000;
128 timer_
.reset(new base::RepeatingTimer
<BackendImpl
>());
129 timer_
->Start(FROM_HERE
, TimeDelta::FromMilliseconds(timer_delay
), this,
130 &BackendImpl::OnStatsTimer
);
136 if (data_
->header
.experiment
!= NO_EXPERIMENT
&&
137 cache_type_
!= net::DISK_CACHE
) {
138 // No experiment for other caches.
139 return net::ERR_FAILED
;
142 if (!(user_flags_
& kNoRandom
)) {
143 // The unit test controls directly what to test.
144 new_eviction_
= (cache_type_
== net::DISK_CACHE
);
148 ReportError(ERR_INIT_FAILED
);
149 return net::ERR_FAILED
;
152 if (!restarted_
&& (create_files
|| !data_
->header
.num_entries
))
153 ReportError(ERR_CACHE_CREATED
);
155 if (!(user_flags_
& kNoRandom
) && cache_type_
== net::DISK_CACHE
&&
156 !InitExperiment(&data_
->header
, create_files
)) {
157 return net::ERR_FAILED
;
160 // We don't care if the value overflows. The only thing we care about is that
161 // the id cannot be zero, because that value is used as "not dirty".
162 // Increasing the value once per second gives us many years before we start
163 // having collisions.
164 data_
->header
.this_id
++;
165 if (!data_
->header
.this_id
)
166 data_
->header
.this_id
++;
168 bool previous_crash
= (data_
->header
.crash
!= 0);
169 data_
->header
.crash
= 1;
171 if (!block_files_
.Init(create_files
))
172 return net::ERR_FAILED
;
174 // We want to minimize the changes to cache for an AppCache.
175 if (cache_type() == net::APP_CACHE
) {
176 DCHECK(!new_eviction_
);
178 } else if (cache_type() == net::SHADER_CACHE
) {
179 DCHECK(!new_eviction_
);
182 eviction_
.Init(this);
184 // stats_ and rankings_ may end up calling back to us so we better be enabled.
187 return net::ERR_FAILED
;
189 disabled_
= !rankings_
.Init(this, new_eviction_
);
191 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
192 trace_object_
->EnableTracing(false);
193 int sc
= SelfCheck();
194 if (sc
< 0 && sc
!= ERR_NUM_ENTRIES_MISMATCH
)
196 trace_object_
->EnableTracing(true);
199 if (previous_crash
) {
200 ReportError(ERR_PREVIOUS_CRASH
);
201 } else if (!restarted_
) {
202 ReportError(ERR_NO_ERROR
);
207 return disabled_
? net::ERR_FAILED
: net::OK
;
210 void BackendImpl::PrepareForRestart() {
211 // Reset the mask_ if it was not given by the user.
212 if (!(user_flags_
& kMask
))
215 if (!(user_flags_
& kNewEviction
))
216 new_eviction_
= false;
219 data_
->header
.crash
= 0;
223 block_files_
.CloseFiles();
229 BackendImpl::~BackendImpl() {
230 if (user_flags_
& kNoRandom
) {
231 // This is a unit test, so we want to be strict about not leaking entries
232 // and completing all the work.
233 background_queue_
.WaitForPendingIO();
235 // This is most likely not a test, so we want to do as little work as
236 // possible at this time, at the price of leaving dirty entries behind.
237 background_queue_
.DropPendingIO();
240 if (background_queue_
.BackgroundIsCurrentThread()) {
241 // Unit tests may use the same thread for everything.
244 background_queue_
.background_thread()->PostTask(
245 FROM_HERE
, base::Bind(&FinalCleanupCallback
, base::Unretained(this)));
246 // http://crbug.com/74623
247 base::ThreadRestrictions::ScopedAllowWait allow_wait
;
252 void BackendImpl::CleanupCache() {
253 Trace("Backend Cleanup");
260 data_
->header
.crash
= 0;
262 if (user_flags_
& kNoRandom
) {
263 // This is a net_unittest, verify that we are not 'leaking' entries.
264 File::WaitForPendingIO(&num_pending_io_
);
267 File::DropPendingIO();
270 block_files_
.CloseFiles();
273 ptr_factory_
.InvalidateWeakPtrs();
277 base::FilePath
BackendImpl::GetFileName(Addr address
) const {
278 if (!address
.is_separate_file() || !address
.is_initialized()) {
280 return base::FilePath();
283 std::string tmp
= base::StringPrintf("f_%06x", address
.FileNumber());
284 return path_
.AppendASCII(tmp
);
287 // We just created a new file so we're going to write the header and set the
288 // file length to include the hash table (zero filled).
289 bool BackendImpl::CreateBackingStore(disk_cache::File
* file
) {
290 AdjustMaxCacheSize(0);
293 header
.table_len
= DesiredIndexTableLen(max_size_
);
295 // We need file version 2.1 for the new eviction algorithm.
297 header
.version
= 0x20001;
299 header
.create_time
= Time::Now().ToInternalValue();
301 if (!file
->Write(&header
, sizeof(header
), 0))
304 return file
->SetLength(GetIndexSize(header
.table_len
));
307 bool BackendImpl::InitBackingStore(bool* file_created
) {
308 if (!base::CreateDirectory(path_
))
311 base::FilePath index_name
= path_
.AppendASCII(kIndexName
);
313 int flags
= base::PLATFORM_FILE_READ
|
314 base::PLATFORM_FILE_WRITE
|
315 base::PLATFORM_FILE_OPEN_ALWAYS
|
316 base::PLATFORM_FILE_EXCLUSIVE_WRITE
;
317 scoped_refptr
<disk_cache::File
> file(new disk_cache::File(
318 base::CreatePlatformFile(index_name
, flags
, file_created
, NULL
)));
320 if (!file
->IsValid())
325 ret
= CreateBackingStore(file
.get());
331 index_
= new MappedFile();
332 data_
= reinterpret_cast<Index
*>(index_
->Init(index_name
, 0));
334 LOG(ERROR
) << "Unable to map Index file";
338 if (index_
->GetLength() < sizeof(Index
)) {
339 // We verify this again on CheckIndex() but it's easier to make sure now
340 // that the header is there.
341 LOG(ERROR
) << "Corrupt Index file";
348 void BackendImpl::ReportError(int error
) {
349 STRESS_DCHECK(!error
|| error
== ERR_PREVIOUS_CRASH
||
350 error
== ERR_CACHE_CREATED
);
352 // We transmit positive numbers, instead of direct error codes.
354 CACHE_UMA(CACHE_ERROR
, "Error", 0, error
* -1);
358 bool BackendImpl::CheckIndex() {
361 size_t current_size
= index_
->GetLength();
362 if (current_size
< sizeof(Index
)) {
363 LOG(ERROR
) << "Corrupt Index file";
368 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
369 if (kIndexMagic
!= data_
->header
.magic
||
370 kCurrentVersion
>> 16 != data_
->header
.version
>> 16) {
371 LOG(ERROR
) << "Invalid file version or magic";
374 if (kCurrentVersion
== data_
->header
.version
) {
375 // We need file version 2.1 for the new eviction algorithm.
379 if (kIndexMagic
!= data_
->header
.magic
||
380 kCurrentVersion
!= data_
->header
.version
) {
381 LOG(ERROR
) << "Invalid file version or magic";
386 if (!data_
->header
.table_len
) {
387 LOG(ERROR
) << "Invalid table size";
391 if (current_size
< GetIndexSize(data_
->header
.table_len
) ||
392 data_
->header
.table_len
& (kBaseTableLen
- 1)) {
393 LOG(ERROR
) << "Corrupt Index file";
397 AdjustMaxCacheSize(data_
->header
.table_len
);
399 #if !defined(NET_BUILD_STRESS_CACHE)
400 if (data_
->header
.num_bytes
< 0 ||
401 (max_size_
< kint32max
- kDefaultCacheSize
&&
402 data_
->header
.num_bytes
> max_size_
+ kDefaultCacheSize
)) {
403 LOG(ERROR
) << "Invalid cache (current) size";
408 if (data_
->header
.num_entries
< 0) {
409 LOG(ERROR
) << "Invalid number of entries";
414 mask_
= data_
->header
.table_len
- 1;
416 // Load the table into memory with a single read.
417 scoped_ptr
<char[]> buf(new char[current_size
]);
418 return index_
->Read(buf
.get(), current_size
, 0);
421 bool BackendImpl::InitStats() {
422 Addr
address(data_
->header
.stats
);
423 int size
= stats_
.StorageSize();
425 if (!address
.is_initialized()) {
426 FileType file_type
= Addr::RequiredFileType(size
);
427 DCHECK_NE(file_type
, EXTERNAL
);
428 int num_blocks
= Addr::RequiredBlocks(size
, file_type
);
430 if (!CreateBlock(file_type
, num_blocks
, &address
))
432 return stats_
.Init(NULL
, 0, address
);
435 if (!address
.is_block_file()) {
440 // Load the required data.
441 size
= address
.num_blocks() * address
.BlockSize();
442 MappedFile
* file
= File(address
);
446 scoped_ptr
<char[]> data(new char[size
]);
447 size_t offset
= address
.start_block() * address
.BlockSize() +
449 if (!file
->Read(data
.get(), size
, offset
))
452 if (!stats_
.Init(data
.get(), size
, address
))
454 if (cache_type_
== net::DISK_CACHE
&& ShouldReportAgain())
455 stats_
.InitSizeHistogram();
459 #endif // defined(V3_NOT_JUST_YET_READY).
461 int BackendImplV3::Worker::Init(const CompletionCallback
& callback
) {
462 return net::ERR_FAILED
;
465 BackendImplV3::Worker::~Worker() {
468 } // namespace disk_cache