Updating trunk VERSION from 2139.0 to 2140.0
[chromium-blink-merge.git] / net / disk_cache / blockfile / backend_impl.cc
blob884dd2d43a273456402de96beb5b642529eb0a6e
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/backend_impl.h"
7 #include "base/bind.h"
8 #include "base/bind_helpers.h"
9 #include "base/file_util.h"
10 #include "base/files/file.h"
11 #include "base/files/file_path.h"
12 #include "base/hash.h"
13 #include "base/message_loop/message_loop.h"
14 #include "base/metrics/field_trial.h"
15 #include "base/metrics/histogram.h"
16 #include "base/metrics/stats_counters.h"
17 #include "base/rand_util.h"
18 #include "base/single_thread_task_runner.h"
19 #include "base/strings/string_util.h"
20 #include "base/strings/stringprintf.h"
21 #include "base/sys_info.h"
22 #include "base/threading/thread_restrictions.h"
23 #include "base/time/time.h"
24 #include "base/timer/timer.h"
25 #include "net/base/net_errors.h"
26 #include "net/disk_cache/blockfile/disk_format.h"
27 #include "net/disk_cache/blockfile/entry_impl.h"
28 #include "net/disk_cache/blockfile/errors.h"
29 #include "net/disk_cache/blockfile/experiments.h"
30 #include "net/disk_cache/blockfile/file.h"
31 #include "net/disk_cache/blockfile/histogram_macros.h"
32 #include "net/disk_cache/blockfile/webfonts_histogram.h"
33 #include "net/disk_cache/cache_util.h"
35 // Provide a BackendImpl object to macros from histogram_macros.h.
36 #define CACHE_UMA_BACKEND_IMPL_OBJ this
38 using base::Time;
39 using base::TimeDelta;
40 using base::TimeTicks;
42 namespace {
44 const char* kIndexName = "index";
46 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
47 // Note that the actual target is to keep the index table load factor under 55%
48 // for most users.
49 const int k64kEntriesStore = 240 * 1000 * 1000;
50 const int kBaseTableLen = 64 * 1024;
52 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
53 const int kTrimDelay = 10;
55 int DesiredIndexTableLen(int32 storage_size) {
56 if (storage_size <= k64kEntriesStore)
57 return kBaseTableLen;
58 if (storage_size <= k64kEntriesStore * 2)
59 return kBaseTableLen * 2;
60 if (storage_size <= k64kEntriesStore * 4)
61 return kBaseTableLen * 4;
62 if (storage_size <= k64kEntriesStore * 8)
63 return kBaseTableLen * 8;
65 // The biggest storage_size for int32 requires a 4 MB table.
66 return kBaseTableLen * 16;
69 int MaxStorageSizeForTable(int table_len) {
70 return table_len * (k64kEntriesStore / kBaseTableLen);
73 size_t GetIndexSize(int table_len) {
74 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
75 return sizeof(disk_cache::IndexHeader) + table_size;
78 // ------------------------------------------------------------------------
80 // Sets group for the current experiment. Returns false if the files should be
81 // discarded.
82 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
83 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
84 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
85 // Discard current cache.
86 return false;
89 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
90 "ExperimentControl") {
91 if (cache_created) {
92 header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
93 return true;
95 return header->experiment == disk_cache::EXPERIMENT_SIMPLE_CONTROL;
98 header->experiment = disk_cache::NO_EXPERIMENT;
99 return true;
102 // A callback to perform final cleanup on the background thread.
103 void FinalCleanupCallback(disk_cache::BackendImpl* backend) {
104 backend->CleanupCache();
107 } // namespace
109 // ------------------------------------------------------------------------
111 namespace disk_cache {
113 BackendImpl::BackendImpl(
114 const base::FilePath& path,
115 const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
116 net::NetLog* net_log)
117 : background_queue_(this, cache_thread),
118 path_(path),
119 block_files_(path),
120 mask_(0),
121 max_size_(0),
122 up_ticks_(0),
123 cache_type_(net::DISK_CACHE),
124 uma_report_(0),
125 user_flags_(0),
126 init_(false),
127 restarted_(false),
128 unit_test_(false),
129 read_only_(false),
130 disabled_(false),
131 new_eviction_(false),
132 first_timer_(true),
133 user_load_(false),
134 net_log_(net_log),
135 done_(true, false),
136 ptr_factory_(this) {
139 BackendImpl::BackendImpl(
140 const base::FilePath& path,
141 uint32 mask,
142 const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
143 net::NetLog* net_log)
144 : background_queue_(this, cache_thread),
145 path_(path),
146 block_files_(path),
147 mask_(mask),
148 max_size_(0),
149 up_ticks_(0),
150 cache_type_(net::DISK_CACHE),
151 uma_report_(0),
152 user_flags_(kMask),
153 init_(false),
154 restarted_(false),
155 unit_test_(false),
156 read_only_(false),
157 disabled_(false),
158 new_eviction_(false),
159 first_timer_(true),
160 user_load_(false),
161 net_log_(net_log),
162 done_(true, false),
163 ptr_factory_(this) {
166 BackendImpl::~BackendImpl() {
167 if (user_flags_ & kNoRandom) {
168 // This is a unit test, so we want to be strict about not leaking entries
169 // and completing all the work.
170 background_queue_.WaitForPendingIO();
171 } else {
172 // This is most likely not a test, so we want to do as little work as
173 // possible at this time, at the price of leaving dirty entries behind.
174 background_queue_.DropPendingIO();
177 if (background_queue_.BackgroundIsCurrentThread()) {
178 // Unit tests may use the same thread for everything.
179 CleanupCache();
180 } else {
181 background_queue_.background_thread()->PostTask(
182 FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
183 // http://crbug.com/74623
184 base::ThreadRestrictions::ScopedAllowWait allow_wait;
185 done_.Wait();
189 int BackendImpl::Init(const CompletionCallback& callback) {
190 background_queue_.Init(callback);
191 return net::ERR_IO_PENDING;
194 int BackendImpl::SyncInit() {
195 #if defined(NET_BUILD_STRESS_CACHE)
196 // Start evictions right away.
197 up_ticks_ = kTrimDelay * 2;
198 #endif
199 DCHECK(!init_);
200 if (init_)
201 return net::ERR_FAILED;
203 bool create_files = false;
204 if (!InitBackingStore(&create_files)) {
205 ReportError(ERR_STORAGE_ERROR);
206 return net::ERR_FAILED;
209 num_refs_ = num_pending_io_ = max_refs_ = 0;
210 entry_count_ = byte_count_ = 0;
212 bool should_create_timer = false;
213 if (!restarted_) {
214 buffer_bytes_ = 0;
215 trace_object_ = TraceObject::GetTraceObject();
216 should_create_timer = true;
219 init_ = true;
220 Trace("Init");
222 if (data_->header.experiment != NO_EXPERIMENT &&
223 cache_type_ != net::DISK_CACHE) {
224 // No experiment for other caches.
225 return net::ERR_FAILED;
228 if (!(user_flags_ & kNoRandom)) {
229 // The unit test controls directly what to test.
230 new_eviction_ = (cache_type_ == net::DISK_CACHE);
233 if (!CheckIndex()) {
234 ReportError(ERR_INIT_FAILED);
235 return net::ERR_FAILED;
238 if (!restarted_ && (create_files || !data_->header.num_entries))
239 ReportError(ERR_CACHE_CREATED);
241 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
242 !InitExperiment(&data_->header, create_files)) {
243 return net::ERR_FAILED;
246 // We don't care if the value overflows. The only thing we care about is that
247 // the id cannot be zero, because that value is used as "not dirty".
248 // Increasing the value once per second gives us many years before we start
249 // having collisions.
250 data_->header.this_id++;
251 if (!data_->header.this_id)
252 data_->header.this_id++;
254 bool previous_crash = (data_->header.crash != 0);
255 data_->header.crash = 1;
257 if (!block_files_.Init(create_files))
258 return net::ERR_FAILED;
260 // We want to minimize the changes to cache for an AppCache.
261 if (cache_type() == net::APP_CACHE) {
262 DCHECK(!new_eviction_);
263 read_only_ = true;
264 } else if (cache_type() == net::SHADER_CACHE) {
265 DCHECK(!new_eviction_);
268 eviction_.Init(this);
270 // stats_ and rankings_ may end up calling back to us so we better be enabled.
271 disabled_ = false;
272 if (!InitStats())
273 return net::ERR_FAILED;
275 disabled_ = !rankings_.Init(this, new_eviction_);
277 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
278 trace_object_->EnableTracing(false);
279 int sc = SelfCheck();
280 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
281 NOTREACHED();
282 trace_object_->EnableTracing(true);
283 #endif
285 if (previous_crash) {
286 ReportError(ERR_PREVIOUS_CRASH);
287 } else if (!restarted_) {
288 ReportError(ERR_NO_ERROR);
291 FlushIndex();
293 if (!disabled_ && should_create_timer) {
294 // Create a recurrent timer of 30 secs.
295 int timer_delay = unit_test_ ? 1000 : 30000;
296 timer_.reset(new base::RepeatingTimer<BackendImpl>());
297 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
298 &BackendImpl::OnStatsTimer);
301 return disabled_ ? net::ERR_FAILED : net::OK;
304 void BackendImpl::CleanupCache() {
305 Trace("Backend Cleanup");
306 eviction_.Stop();
307 timer_.reset();
309 if (init_) {
310 StoreStats();
311 if (data_)
312 data_->header.crash = 0;
314 if (user_flags_ & kNoRandom) {
315 // This is a net_unittest, verify that we are not 'leaking' entries.
316 File::WaitForPendingIO(&num_pending_io_);
317 DCHECK(!num_refs_);
318 } else {
319 File::DropPendingIO();
322 block_files_.CloseFiles();
323 FlushIndex();
324 index_ = NULL;
325 ptr_factory_.InvalidateWeakPtrs();
326 done_.Signal();
329 // ------------------------------------------------------------------------
331 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry,
332 const CompletionCallback& callback) {
333 DCHECK(!callback.is_null());
334 background_queue_.OpenPrevEntry(iter, prev_entry, callback);
335 return net::ERR_IO_PENDING;
338 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) {
339 DCHECK(entry);
340 *entry = OpenEntryImpl(key);
341 return (*entry) ? net::OK : net::ERR_FAILED;
344 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) {
345 DCHECK(entry);
346 *entry = CreateEntryImpl(key);
347 return (*entry) ? net::OK : net::ERR_FAILED;
350 int BackendImpl::SyncDoomEntry(const std::string& key) {
351 if (disabled_)
352 return net::ERR_FAILED;
354 EntryImpl* entry = OpenEntryImpl(key);
355 if (!entry)
356 return net::ERR_FAILED;
358 entry->DoomImpl();
359 entry->Release();
360 return net::OK;
363 int BackendImpl::SyncDoomAllEntries() {
364 // This is not really an error, but it is an interesting condition.
365 ReportError(ERR_CACHE_DOOMED);
366 stats_.OnEvent(Stats::DOOM_CACHE);
367 if (!num_refs_) {
368 RestartCache(false);
369 return disabled_ ? net::ERR_FAILED : net::OK;
370 } else {
371 if (disabled_)
372 return net::ERR_FAILED;
374 eviction_.TrimCache(true);
375 return net::OK;
379 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
380 const base::Time end_time) {
381 DCHECK_NE(net::APP_CACHE, cache_type_);
382 if (end_time.is_null())
383 return SyncDoomEntriesSince(initial_time);
385 DCHECK(end_time >= initial_time);
387 if (disabled_)
388 return net::ERR_FAILED;
390 EntryImpl* node;
391 void* iter = NULL;
392 EntryImpl* next = OpenNextEntryImpl(&iter);
393 if (!next)
394 return net::OK;
396 while (next) {
397 node = next;
398 next = OpenNextEntryImpl(&iter);
400 if (node->GetLastUsed() >= initial_time &&
401 node->GetLastUsed() < end_time) {
402 node->DoomImpl();
403 } else if (node->GetLastUsed() < initial_time) {
404 if (next)
405 next->Release();
406 next = NULL;
407 SyncEndEnumeration(iter);
410 node->Release();
413 return net::OK;
416 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get
417 // entries that are too old.
418 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
419 DCHECK_NE(net::APP_CACHE, cache_type_);
420 if (disabled_)
421 return net::ERR_FAILED;
423 stats_.OnEvent(Stats::DOOM_RECENT);
424 for (;;) {
425 void* iter = NULL;
426 EntryImpl* entry = OpenNextEntryImpl(&iter);
427 if (!entry)
428 return net::OK;
430 if (initial_time > entry->GetLastUsed()) {
431 entry->Release();
432 SyncEndEnumeration(iter);
433 return net::OK;
436 entry->DoomImpl();
437 entry->Release();
438 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator.
442 int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) {
443 *next_entry = OpenNextEntryImpl(iter);
444 return (*next_entry) ? net::OK : net::ERR_FAILED;
447 int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) {
448 *prev_entry = OpenPrevEntryImpl(iter);
449 return (*prev_entry) ? net::OK : net::ERR_FAILED;
452 void BackendImpl::SyncEndEnumeration(void* iter) {
453 scoped_ptr<Rankings::Iterator> iterator(
454 reinterpret_cast<Rankings::Iterator*>(iter));
457 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) {
458 if (disabled_)
459 return;
461 uint32 hash = base::Hash(key);
462 bool error;
463 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
464 if (cache_entry) {
465 if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) {
466 UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE);
468 cache_entry->Release();
472 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
473 if (disabled_)
474 return NULL;
476 TimeTicks start = TimeTicks::Now();
477 uint32 hash = base::Hash(key);
478 Trace("Open hash 0x%x", hash);
480 bool error;
481 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
482 if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
483 // The entry was already evicted.
484 cache_entry->Release();
485 cache_entry = NULL;
486 web_fonts_histogram::RecordEvictedEntry(key);
487 } else if (!cache_entry) {
488 web_fonts_histogram::RecordCacheMiss(key);
491 int current_size = data_->header.num_bytes / (1024 * 1024);
492 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
493 int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
494 int64 use_hours = total_hours - no_use_hours;
496 if (!cache_entry) {
497 CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start);
498 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size);
499 CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours);
500 CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours);
501 stats_.OnEvent(Stats::OPEN_MISS);
502 return NULL;
505 eviction_.OnOpenEntry(cache_entry);
506 entry_count_++;
508 Trace("Open hash 0x%x end: 0x%x", hash,
509 cache_entry->entry()->address().value());
510 CACHE_UMA(AGE_MS, "OpenTime", 0, start);
511 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size);
512 CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours);
513 CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours);
514 stats_.OnEvent(Stats::OPEN_HIT);
515 web_fonts_histogram::RecordCacheHit(cache_entry);
516 SIMPLE_STATS_COUNTER("disk_cache.hit");
517 return cache_entry;
520 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
521 if (disabled_ || key.empty())
522 return NULL;
524 TimeTicks start = TimeTicks::Now();
525 uint32 hash = base::Hash(key);
526 Trace("Create hash 0x%x", hash);
528 scoped_refptr<EntryImpl> parent;
529 Addr entry_address(data_->table[hash & mask_]);
530 if (entry_address.is_initialized()) {
531 // We have an entry already. It could be the one we are looking for, or just
532 // a hash conflict.
533 bool error;
534 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
535 if (old_entry)
536 return ResurrectEntry(old_entry);
538 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
539 DCHECK(!error);
540 if (parent_entry) {
541 parent.swap(&parent_entry);
542 } else if (data_->table[hash & mask_]) {
543 // We should have corrected the problem.
544 NOTREACHED();
545 return NULL;
549 // The general flow is to allocate disk space and initialize the entry data,
550 // followed by saving that to disk, then linking the entry though the index
551 // and finally through the lists. If there is a crash in this process, we may
552 // end up with:
553 // a. Used, unreferenced empty blocks on disk (basically just garbage).
554 // b. Used, unreferenced but meaningful data on disk (more garbage).
555 // c. A fully formed entry, reachable only through the index.
556 // d. A fully formed entry, also reachable through the lists, but still dirty.
558 // Anything after (b) can be automatically cleaned up. We may consider saving
559 // the current operation (as we do while manipulating the lists) so that we
560 // can detect and cleanup (a) and (b).
562 int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
563 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
564 LOG(ERROR) << "Create entry failed " << key.c_str();
565 stats_.OnEvent(Stats::CREATE_ERROR);
566 return NULL;
569 Addr node_address(0);
570 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
571 block_files_.DeleteBlock(entry_address, false);
572 LOG(ERROR) << "Create entry failed " << key.c_str();
573 stats_.OnEvent(Stats::CREATE_ERROR);
574 return NULL;
577 scoped_refptr<EntryImpl> cache_entry(
578 new EntryImpl(this, entry_address, false));
579 IncreaseNumRefs();
581 if (!cache_entry->CreateEntry(node_address, key, hash)) {
582 block_files_.DeleteBlock(entry_address, false);
583 block_files_.DeleteBlock(node_address, false);
584 LOG(ERROR) << "Create entry failed " << key.c_str();
585 stats_.OnEvent(Stats::CREATE_ERROR);
586 return NULL;
589 cache_entry->BeginLogging(net_log_, true);
591 // We are not failing the operation; let's add this to the map.
592 open_entries_[entry_address.value()] = cache_entry.get();
594 // Save the entry.
595 cache_entry->entry()->Store();
596 cache_entry->rankings()->Store();
597 IncreaseNumEntries();
598 entry_count_++;
600 // Link this entry through the index.
601 if (parent.get()) {
602 parent->SetNextAddress(entry_address);
603 } else {
604 data_->table[hash & mask_] = entry_address.value();
607 // Link this entry through the lists.
608 eviction_.OnCreateEntry(cache_entry.get());
610 CACHE_UMA(AGE_MS, "CreateTime", 0, start);
611 stats_.OnEvent(Stats::CREATE_HIT);
612 SIMPLE_STATS_COUNTER("disk_cache.miss");
613 Trace("create entry hit ");
614 FlushIndex();
615 cache_entry->AddRef();
616 return cache_entry.get();
619 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) {
620 return OpenFollowingEntry(true, iter);
623 EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) {
624 return OpenFollowingEntry(false, iter);
627 bool BackendImpl::SetMaxSize(int max_bytes) {
628 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
629 if (max_bytes < 0)
630 return false;
632 // Zero size means use the default.
633 if (!max_bytes)
634 return true;
636 // Avoid a DCHECK later on.
637 if (max_bytes >= kint32max - kint32max / 10)
638 max_bytes = kint32max - kint32max / 10 - 1;
640 user_flags_ |= kMaxSize;
641 max_size_ = max_bytes;
642 return true;
645 void BackendImpl::SetType(net::CacheType type) {
646 DCHECK_NE(net::MEMORY_CACHE, type);
647 cache_type_ = type;
650 base::FilePath BackendImpl::GetFileName(Addr address) const {
651 if (!address.is_separate_file() || !address.is_initialized()) {
652 NOTREACHED();
653 return base::FilePath();
656 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
657 return path_.AppendASCII(tmp);
660 MappedFile* BackendImpl::File(Addr address) {
661 if (disabled_)
662 return NULL;
663 return block_files_.GetFile(address);
666 base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() {
667 return background_queue_.GetWeakPtr();
670 bool BackendImpl::CreateExternalFile(Addr* address) {
671 int file_number = data_->header.last_file + 1;
672 Addr file_address(0);
673 bool success = false;
674 for (int i = 0; i < 0x0fffffff; i++, file_number++) {
675 if (!file_address.SetFileNumber(file_number)) {
676 file_number = 1;
677 continue;
679 base::FilePath name = GetFileName(file_address);
680 int flags = base::File::FLAG_READ | base::File::FLAG_WRITE |
681 base::File::FLAG_CREATE | base::File::FLAG_EXCLUSIVE_WRITE;
682 base::File file(name, flags);
683 if (!file.IsValid()) {
684 base::File::Error error = file.error_details();
685 if (error != base::File::FILE_ERROR_EXISTS) {
686 LOG(ERROR) << "Unable to create file: " << error;
687 return false;
689 continue;
692 success = true;
693 break;
696 DCHECK(success);
697 if (!success)
698 return false;
700 data_->header.last_file = file_number;
701 address->set_value(file_address.value());
702 return true;
705 bool BackendImpl::CreateBlock(FileType block_type, int block_count,
706 Addr* block_address) {
707 return block_files_.CreateBlock(block_type, block_count, block_address);
710 void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
711 block_files_.DeleteBlock(block_address, deep);
714 LruData* BackendImpl::GetLruData() {
715 return &data_->header.lru;
718 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
719 if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE))
720 return;
721 eviction_.UpdateRank(entry, modified);
724 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
725 Addr address(rankings->Data()->contents);
726 EntryImpl* cache_entry = NULL;
727 if (NewEntry(address, &cache_entry)) {
728 STRESS_NOTREACHED();
729 return;
732 uint32 hash = cache_entry->GetHash();
733 cache_entry->Release();
735 // Anything on the table means that this entry is there.
736 if (data_->table[hash & mask_])
737 return;
739 data_->table[hash & mask_] = address.value();
740 FlushIndex();
743 void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
744 uint32 hash = entry->GetHash();
745 std::string key = entry->GetKey();
746 Addr entry_addr = entry->entry()->address();
747 bool error;
748 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
749 CacheAddr child(entry->GetNextAddress());
751 Trace("Doom entry 0x%p", entry);
753 if (!entry->doomed()) {
754 // We may have doomed this entry from within MatchEntry.
755 eviction_.OnDoomEntry(entry);
756 entry->InternalDoom();
757 if (!new_eviction_) {
758 DecreaseNumEntries();
760 stats_.OnEvent(Stats::DOOM_ENTRY);
763 if (parent_entry) {
764 parent_entry->SetNextAddress(Addr(child));
765 parent_entry->Release();
766 } else if (!error) {
767 data_->table[hash & mask_] = child;
770 FlushIndex();
773 #if defined(NET_BUILD_STRESS_CACHE)
775 CacheAddr BackendImpl::GetNextAddr(Addr address) {
776 EntriesMap::iterator it = open_entries_.find(address.value());
777 if (it != open_entries_.end()) {
778 EntryImpl* this_entry = it->second;
779 return this_entry->GetNextAddress();
781 DCHECK(block_files_.IsValid(address));
782 DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256);
784 CacheEntryBlock entry(File(address), address);
785 CHECK(entry.Load());
786 return entry.Data()->next;
789 void BackendImpl::NotLinked(EntryImpl* entry) {
790 Addr entry_addr = entry->entry()->address();
791 uint32 i = entry->GetHash() & mask_;
792 Addr address(data_->table[i]);
793 if (!address.is_initialized())
794 return;
796 for (;;) {
797 DCHECK(entry_addr.value() != address.value());
798 address.set_value(GetNextAddr(address));
799 if (!address.is_initialized())
800 break;
803 #endif // NET_BUILD_STRESS_CACHE
805 // An entry may be linked on the DELETED list for a while after being doomed.
806 // This function is called when we want to remove it.
807 void BackendImpl::RemoveEntry(EntryImpl* entry) {
808 #if defined(NET_BUILD_STRESS_CACHE)
809 NotLinked(entry);
810 #endif
811 if (!new_eviction_)
812 return;
814 DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state);
816 Trace("Remove entry 0x%p", entry);
817 eviction_.OnDestroyEntry(entry);
818 DecreaseNumEntries();
821 void BackendImpl::OnEntryDestroyBegin(Addr address) {
822 EntriesMap::iterator it = open_entries_.find(address.value());
823 if (it != open_entries_.end())
824 open_entries_.erase(it);
827 void BackendImpl::OnEntryDestroyEnd() {
828 DecreaseNumRefs();
829 if (data_->header.num_bytes > max_size_ && !read_only_ &&
830 (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
831 eviction_.TrimCache(false);
834 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
835 DCHECK(rankings->HasData());
836 EntriesMap::const_iterator it =
837 open_entries_.find(rankings->Data()->contents);
838 if (it != open_entries_.end()) {
839 // We have this entry in memory.
840 return it->second;
843 return NULL;
846 int32 BackendImpl::GetCurrentEntryId() const {
847 return data_->header.this_id;
850 int BackendImpl::MaxFileSize() const {
851 return cache_type() == net::PNACL_CACHE ? max_size_ : max_size_ / 8;
854 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
855 if (disabled_ || old_size == new_size)
856 return;
857 if (old_size > new_size)
858 SubstractStorageSize(old_size - new_size);
859 else
860 AddStorageSize(new_size - old_size);
862 FlushIndex();
864 // Update the usage statistics.
865 stats_.ModifyStorageStats(old_size, new_size);
868 void BackendImpl::TooMuchStorageRequested(int32 size) {
869 stats_.ModifyStorageStats(0, size);
872 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) {
873 DCHECK_GT(new_size, current_size);
874 if (user_flags_ & kNoBuffering)
875 return false;
877 int to_add = new_size - current_size;
878 if (buffer_bytes_ + to_add > MaxBuffersSize())
879 return false;
881 buffer_bytes_ += to_add;
882 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024);
883 return true;
886 void BackendImpl::BufferDeleted(int size) {
887 buffer_bytes_ -= size;
888 DCHECK_GE(size, 0);
891 bool BackendImpl::IsLoaded() const {
892 CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_);
893 if (user_flags_ & kNoLoadProtection)
894 return false;
896 return (num_pending_io_ > 5 || user_load_);
899 std::string BackendImpl::HistogramName(const char* name, int experiment) const {
900 if (!experiment)
901 return base::StringPrintf("DiskCache.%d.%s", cache_type_, name);
902 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_,
903 name, experiment);
906 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
907 return ptr_factory_.GetWeakPtr();
910 // We want to remove biases from some histograms so we only send data once per
911 // week.
912 bool BackendImpl::ShouldReportAgain() {
913 if (uma_report_)
914 return uma_report_ == 2;
916 uma_report_++;
917 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
918 Time last_time = Time::FromInternalValue(last_report);
919 if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
920 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
921 uma_report_++;
922 return true;
924 return false;
927 void BackendImpl::FirstEviction() {
928 DCHECK(data_->header.create_time);
929 if (!GetEntryCount())
930 return; // This is just for unit tests.
932 Time create_time = Time::FromInternalValue(data_->header.create_time);
933 CACHE_UMA(AGE, "FillupAge", 0, create_time);
935 int64 use_time = stats_.GetCounter(Stats::TIMER);
936 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120));
937 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
939 if (!use_time)
940 use_time = 1;
941 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0,
942 static_cast<int>(data_->header.num_entries / use_time));
943 CACHE_UMA(COUNTS, "FirstByteIORate", 0,
944 static_cast<int>((data_->header.num_bytes / 1024) / use_time));
946 int avg_size = data_->header.num_bytes / GetEntryCount();
947 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
949 int large_entries_bytes = stats_.GetLargeEntriesSize();
950 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
951 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
953 if (new_eviction_) {
954 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio());
955 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0,
956 data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
957 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0,
958 data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
959 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0,
960 data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
963 stats_.ResetRatios();
966 void BackendImpl::CriticalError(int error) {
967 STRESS_NOTREACHED();
968 LOG(ERROR) << "Critical error found " << error;
969 if (disabled_)
970 return;
972 stats_.OnEvent(Stats::FATAL_ERROR);
973 LogStats();
974 ReportError(error);
976 // Setting the index table length to an invalid value will force re-creation
977 // of the cache files.
978 data_->header.table_len = 1;
979 disabled_ = true;
981 if (!num_refs_)
982 base::MessageLoop::current()->PostTask(
983 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
986 void BackendImpl::ReportError(int error) {
987 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
988 error == ERR_CACHE_CREATED);
990 // We transmit positive numbers, instead of direct error codes.
991 DCHECK_LE(error, 0);
992 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
995 void BackendImpl::OnEvent(Stats::Counters an_event) {
996 stats_.OnEvent(an_event);
999 void BackendImpl::OnRead(int32 bytes) {
1000 DCHECK_GE(bytes, 0);
1001 byte_count_ += bytes;
1002 if (byte_count_ < 0)
1003 byte_count_ = kint32max;
1006 void BackendImpl::OnWrite(int32 bytes) {
1007 // We use the same implementation as OnRead... just log the number of bytes.
1008 OnRead(bytes);
1011 void BackendImpl::OnStatsTimer() {
1012 if (disabled_)
1013 return;
1015 stats_.OnEvent(Stats::TIMER);
1016 int64 time = stats_.GetCounter(Stats::TIMER);
1017 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
1019 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
1020 // the bias towards 0.
1021 if (num_refs_ && (current != num_refs_)) {
1022 int64 diff = (num_refs_ - current) / 50;
1023 if (!diff)
1024 diff = num_refs_ > current ? 1 : -1;
1025 current = current + diff;
1026 stats_.SetCounter(Stats::OPEN_ENTRIES, current);
1027 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
1030 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_);
1032 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_);
1033 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024);
1035 // These values cover about 99.5% of the population (Oct 2011).
1036 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
1037 entry_count_ = 0;
1038 byte_count_ = 0;
1039 up_ticks_++;
1041 if (!data_)
1042 first_timer_ = false;
1043 if (first_timer_) {
1044 first_timer_ = false;
1045 if (ShouldReportAgain())
1046 ReportStats();
1049 // Save stats to disk at 5 min intervals.
1050 if (time % 10 == 0)
1051 StoreStats();
1054 void BackendImpl::IncrementIoCount() {
1055 num_pending_io_++;
1058 void BackendImpl::DecrementIoCount() {
1059 num_pending_io_--;
1062 void BackendImpl::SetUnitTestMode() {
1063 user_flags_ |= kUnitTestMode;
1064 unit_test_ = true;
1067 void BackendImpl::SetUpgradeMode() {
1068 user_flags_ |= kUpgradeMode;
1069 read_only_ = true;
1072 void BackendImpl::SetNewEviction() {
1073 user_flags_ |= kNewEviction;
1074 new_eviction_ = true;
1077 void BackendImpl::SetFlags(uint32 flags) {
1078 user_flags_ |= flags;
1081 void BackendImpl::ClearRefCountForTest() {
1082 num_refs_ = 0;
1085 int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) {
1086 background_queue_.FlushQueue(callback);
1087 return net::ERR_IO_PENDING;
1090 int BackendImpl::RunTaskForTest(const base::Closure& task,
1091 const CompletionCallback& callback) {
1092 background_queue_.RunTask(task, callback);
1093 return net::ERR_IO_PENDING;
1096 void BackendImpl::TrimForTest(bool empty) {
1097 eviction_.SetTestMode();
1098 eviction_.TrimCache(empty);
1101 void BackendImpl::TrimDeletedListForTest(bool empty) {
1102 eviction_.SetTestMode();
1103 eviction_.TrimDeletedList(empty);
1106 base::RepeatingTimer<BackendImpl>* BackendImpl::GetTimerForTest() {
1107 return timer_.get();
1110 int BackendImpl::SelfCheck() {
1111 if (!init_) {
1112 LOG(ERROR) << "Init failed";
1113 return ERR_INIT_FAILED;
1116 int num_entries = rankings_.SelfCheck();
1117 if (num_entries < 0) {
1118 LOG(ERROR) << "Invalid rankings list, error " << num_entries;
1119 #if !defined(NET_BUILD_STRESS_CACHE)
1120 return num_entries;
1121 #endif
1124 if (num_entries != data_->header.num_entries) {
1125 LOG(ERROR) << "Number of entries mismatch";
1126 #if !defined(NET_BUILD_STRESS_CACHE)
1127 return ERR_NUM_ENTRIES_MISMATCH;
1128 #endif
1131 return CheckAllEntries();
1134 void BackendImpl::FlushIndex() {
1135 if (index_.get() && !disabled_)
1136 index_->Flush();
1139 // ------------------------------------------------------------------------
1141 net::CacheType BackendImpl::GetCacheType() const {
1142 return cache_type_;
1145 int32 BackendImpl::GetEntryCount() const {
1146 if (!index_.get() || disabled_)
1147 return 0;
1148 // num_entries includes entries already evicted.
1149 int32 not_deleted = data_->header.num_entries -
1150 data_->header.lru.sizes[Rankings::DELETED];
1152 if (not_deleted < 0) {
1153 NOTREACHED();
1154 not_deleted = 0;
1157 return not_deleted;
1160 int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
1161 const CompletionCallback& callback) {
1162 DCHECK(!callback.is_null());
1163 background_queue_.OpenEntry(key, entry, callback);
1164 return net::ERR_IO_PENDING;
1167 int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
1168 const CompletionCallback& callback) {
1169 DCHECK(!callback.is_null());
1170 background_queue_.CreateEntry(key, entry, callback);
1171 return net::ERR_IO_PENDING;
1174 int BackendImpl::DoomEntry(const std::string& key,
1175 const CompletionCallback& callback) {
1176 DCHECK(!callback.is_null());
1177 background_queue_.DoomEntry(key, callback);
1178 return net::ERR_IO_PENDING;
1181 int BackendImpl::DoomAllEntries(const CompletionCallback& callback) {
1182 DCHECK(!callback.is_null());
1183 background_queue_.DoomAllEntries(callback);
1184 return net::ERR_IO_PENDING;
1187 int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
1188 const base::Time end_time,
1189 const CompletionCallback& callback) {
1190 DCHECK(!callback.is_null());
1191 background_queue_.DoomEntriesBetween(initial_time, end_time, callback);
1192 return net::ERR_IO_PENDING;
1195 int BackendImpl::DoomEntriesSince(const base::Time initial_time,
1196 const CompletionCallback& callback) {
1197 DCHECK(!callback.is_null());
1198 background_queue_.DoomEntriesSince(initial_time, callback);
1199 return net::ERR_IO_PENDING;
1202 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
1203 const CompletionCallback& callback) {
1204 DCHECK(!callback.is_null());
1205 background_queue_.OpenNextEntry(iter, next_entry, callback);
1206 return net::ERR_IO_PENDING;
1209 void BackendImpl::EndEnumeration(void** iter) {
1210 background_queue_.EndEnumeration(*iter);
1211 *iter = NULL;
1214 void BackendImpl::GetStats(StatsItems* stats) {
1215 if (disabled_)
1216 return;
1218 std::pair<std::string, std::string> item;
1220 item.first = "Entries";
1221 item.second = base::StringPrintf("%d", data_->header.num_entries);
1222 stats->push_back(item);
1224 item.first = "Pending IO";
1225 item.second = base::StringPrintf("%d", num_pending_io_);
1226 stats->push_back(item);
1228 item.first = "Max size";
1229 item.second = base::StringPrintf("%d", max_size_);
1230 stats->push_back(item);
1232 item.first = "Current size";
1233 item.second = base::StringPrintf("%d", data_->header.num_bytes);
1234 stats->push_back(item);
1236 item.first = "Cache type";
1237 item.second = "Blockfile Cache";
1238 stats->push_back(item);
1240 stats_.GetItems(stats);
1243 void BackendImpl::OnExternalCacheHit(const std::string& key) {
1244 background_queue_.OnExternalCacheHit(key);
1247 // ------------------------------------------------------------------------
1249 // We just created a new file so we're going to write the header and set the
1250 // file length to include the hash table (zero filled).
1251 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
1252 AdjustMaxCacheSize(0);
1254 IndexHeader header;
1255 header.table_len = DesiredIndexTableLen(max_size_);
1257 // We need file version 2.1 for the new eviction algorithm.
1258 if (new_eviction_)
1259 header.version = 0x20001;
1261 header.create_time = Time::Now().ToInternalValue();
1263 if (!file->Write(&header, sizeof(header), 0))
1264 return false;
1266 return file->SetLength(GetIndexSize(header.table_len));
1269 bool BackendImpl::InitBackingStore(bool* file_created) {
1270 if (!base::CreateDirectory(path_))
1271 return false;
1273 base::FilePath index_name = path_.AppendASCII(kIndexName);
1275 int flags = base::File::FLAG_READ | base::File::FLAG_WRITE |
1276 base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_EXCLUSIVE_WRITE;
1277 base::File base_file(index_name, flags);
1278 if (!base_file.IsValid())
1279 return false;
1281 bool ret = true;
1282 *file_created = base_file.created();
1284 scoped_refptr<disk_cache::File> file(new disk_cache::File(base_file.Pass()));
1285 if (*file_created)
1286 ret = CreateBackingStore(file.get());
1288 file = NULL;
1289 if (!ret)
1290 return false;
1292 index_ = new MappedFile();
1293 data_ = static_cast<Index*>(index_->Init(index_name, 0));
1294 if (!data_) {
1295 LOG(ERROR) << "Unable to map Index file";
1296 return false;
1299 if (index_->GetLength() < sizeof(Index)) {
1300 // We verify this again on CheckIndex() but it's easier to make sure now
1301 // that the header is there.
1302 LOG(ERROR) << "Corrupt Index file";
1303 return false;
1306 return true;
1309 // The maximum cache size will be either set explicitly by the caller, or
1310 // calculated by this code.
1311 void BackendImpl::AdjustMaxCacheSize(int table_len) {
1312 if (max_size_)
1313 return;
1315 // If table_len is provided, the index file exists.
1316 DCHECK(!table_len || data_->header.magic);
1318 // The user is not setting the size, let's figure it out.
1319 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
1320 if (available < 0) {
1321 max_size_ = kDefaultCacheSize;
1322 return;
1325 if (table_len)
1326 available += data_->header.num_bytes;
1328 max_size_ = PreferredCacheSize(available);
1330 if (!table_len)
1331 return;
1333 // If we already have a table, adjust the size to it.
1334 int current_max_size = MaxStorageSizeForTable(table_len);
1335 if (max_size_ > current_max_size)
1336 max_size_= current_max_size;
1339 bool BackendImpl::InitStats() {
1340 Addr address(data_->header.stats);
1341 int size = stats_.StorageSize();
1343 if (!address.is_initialized()) {
1344 FileType file_type = Addr::RequiredFileType(size);
1345 DCHECK_NE(file_type, EXTERNAL);
1346 int num_blocks = Addr::RequiredBlocks(size, file_type);
1348 if (!CreateBlock(file_type, num_blocks, &address))
1349 return false;
1351 data_->header.stats = address.value();
1352 return stats_.Init(NULL, 0, address);
1355 if (!address.is_block_file()) {
1356 NOTREACHED();
1357 return false;
1360 // Load the required data.
1361 size = address.num_blocks() * address.BlockSize();
1362 MappedFile* file = File(address);
1363 if (!file)
1364 return false;
1366 scoped_ptr<char[]> data(new char[size]);
1367 size_t offset = address.start_block() * address.BlockSize() +
1368 kBlockHeaderSize;
1369 if (!file->Read(data.get(), size, offset))
1370 return false;
1372 if (!stats_.Init(data.get(), size, address))
1373 return false;
1374 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
1375 stats_.InitSizeHistogram();
1376 return true;
1379 void BackendImpl::StoreStats() {
1380 int size = stats_.StorageSize();
1381 scoped_ptr<char[]> data(new char[size]);
1382 Addr address;
1383 size = stats_.SerializeStats(data.get(), size, &address);
1384 DCHECK(size);
1385 if (!address.is_initialized())
1386 return;
1388 MappedFile* file = File(address);
1389 if (!file)
1390 return;
1392 size_t offset = address.start_block() * address.BlockSize() +
1393 kBlockHeaderSize;
1394 file->Write(data.get(), size, offset); // ignore result.
1397 void BackendImpl::RestartCache(bool failure) {
1398 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
1399 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
1400 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
1401 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
1403 PrepareForRestart();
1404 if (failure) {
1405 DCHECK(!num_refs_);
1406 DCHECK(!open_entries_.size());
1407 DelayedCacheCleanup(path_);
1408 } else {
1409 DeleteCache(path_, false);
1412 // Don't call Init() if directed by the unit test: we are simulating a failure
1413 // trying to re-enable the cache.
1414 if (unit_test_)
1415 init_ = true; // Let the destructor do proper cleanup.
1416 else if (SyncInit() == net::OK) {
1417 stats_.SetCounter(Stats::FATAL_ERROR, errors);
1418 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
1419 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
1420 stats_.SetCounter(Stats::LAST_REPORT, last_report);
1424 void BackendImpl::PrepareForRestart() {
1425 // Reset the mask_ if it was not given by the user.
1426 if (!(user_flags_ & kMask))
1427 mask_ = 0;
1429 if (!(user_flags_ & kNewEviction))
1430 new_eviction_ = false;
1432 disabled_ = true;
1433 data_->header.crash = 0;
1434 index_->Flush();
1435 index_ = NULL;
1436 data_ = NULL;
1437 block_files_.CloseFiles();
1438 rankings_.Reset();
1439 init_ = false;
1440 restarted_ = true;
1443 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) {
1444 EntriesMap::iterator it = open_entries_.find(address.value());
1445 if (it != open_entries_.end()) {
1446 // Easy job. This entry is already in memory.
1447 EntryImpl* this_entry = it->second;
1448 this_entry->AddRef();
1449 *entry = this_entry;
1450 return 0;
1453 STRESS_DCHECK(block_files_.IsValid(address));
1455 if (!address.SanityCheckForEntryV2()) {
1456 LOG(WARNING) << "Wrong entry address.";
1457 STRESS_NOTREACHED();
1458 return ERR_INVALID_ADDRESS;
1461 scoped_refptr<EntryImpl> cache_entry(
1462 new EntryImpl(this, address, read_only_));
1463 IncreaseNumRefs();
1464 *entry = NULL;
1466 TimeTicks start = TimeTicks::Now();
1467 if (!cache_entry->entry()->Load())
1468 return ERR_READ_FAILURE;
1470 if (IsLoaded()) {
1471 CACHE_UMA(AGE_MS, "LoadTime", 0, start);
1474 if (!cache_entry->SanityCheck()) {
1475 LOG(WARNING) << "Messed up entry found.";
1476 STRESS_NOTREACHED();
1477 return ERR_INVALID_ENTRY;
1480 STRESS_DCHECK(block_files_.IsValid(
1481 Addr(cache_entry->entry()->Data()->rankings_node)));
1483 if (!cache_entry->LoadNodeAddress())
1484 return ERR_READ_FAILURE;
1486 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
1487 STRESS_NOTREACHED();
1488 cache_entry->SetDirtyFlag(0);
1489 // Don't remove this from the list (it is not linked properly). Instead,
1490 // break the link back to the entry because it is going away, and leave the
1491 // rankings node to be deleted if we find it through a list.
1492 rankings_.SetContents(cache_entry->rankings(), 0);
1493 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
1494 STRESS_NOTREACHED();
1495 cache_entry->SetDirtyFlag(0);
1496 rankings_.SetContents(cache_entry->rankings(), address.value());
1499 if (!cache_entry->DataSanityCheck()) {
1500 LOG(WARNING) << "Messed up entry found.";
1501 cache_entry->SetDirtyFlag(0);
1502 cache_entry->FixForDelete();
1505 // Prevent overwriting the dirty flag on the destructor.
1506 cache_entry->SetDirtyFlag(GetCurrentEntryId());
1508 if (cache_entry->dirty()) {
1509 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
1510 address.value());
1513 open_entries_[address.value()] = cache_entry.get();
1515 cache_entry->BeginLogging(net_log_, false);
1516 cache_entry.swap(entry);
1517 return 0;
1520 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
1521 bool find_parent, Addr entry_addr,
1522 bool* match_error) {
1523 Addr address(data_->table[hash & mask_]);
1524 scoped_refptr<EntryImpl> cache_entry, parent_entry;
1525 EntryImpl* tmp = NULL;
1526 bool found = false;
1527 std::set<CacheAddr> visited;
1528 *match_error = false;
1530 for (;;) {
1531 if (disabled_)
1532 break;
1534 if (visited.find(address.value()) != visited.end()) {
1535 // It's possible for a buggy version of the code to write a loop. Just
1536 // break it.
1537 Trace("Hash collision loop 0x%x", address.value());
1538 address.set_value(0);
1539 parent_entry->SetNextAddress(address);
1541 visited.insert(address.value());
1543 if (!address.is_initialized()) {
1544 if (find_parent)
1545 found = true;
1546 break;
1549 int error = NewEntry(address, &tmp);
1550 cache_entry.swap(&tmp);
1552 if (error || cache_entry->dirty()) {
1553 // This entry is dirty on disk (it was not properly closed): we cannot
1554 // trust it.
1555 Addr child(0);
1556 if (!error)
1557 child.set_value(cache_entry->GetNextAddress());
1559 if (parent_entry.get()) {
1560 parent_entry->SetNextAddress(child);
1561 parent_entry = NULL;
1562 } else {
1563 data_->table[hash & mask_] = child.value();
1566 Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(),
1567 address.value());
1569 if (!error) {
1570 // It is important to call DestroyInvalidEntry after removing this
1571 // entry from the table.
1572 DestroyInvalidEntry(cache_entry.get());
1573 cache_entry = NULL;
1574 } else {
1575 Trace("NewEntry failed on MatchEntry 0x%x", address.value());
1578 // Restart the search.
1579 address.set_value(data_->table[hash & mask_]);
1580 visited.clear();
1581 continue;
1584 DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_);
1585 if (cache_entry->IsSameEntry(key, hash)) {
1586 if (!cache_entry->Update())
1587 cache_entry = NULL;
1588 found = true;
1589 if (find_parent && entry_addr.value() != address.value()) {
1590 Trace("Entry not on the index 0x%x", address.value());
1591 *match_error = true;
1592 parent_entry = NULL;
1594 break;
1596 if (!cache_entry->Update())
1597 cache_entry = NULL;
1598 parent_entry = cache_entry;
1599 cache_entry = NULL;
1600 if (!parent_entry.get())
1601 break;
1603 address.set_value(parent_entry->GetNextAddress());
1606 if (parent_entry.get() && (!find_parent || !found))
1607 parent_entry = NULL;
1609 if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) {
1610 *match_error = true;
1611 parent_entry = NULL;
1614 if (cache_entry.get() && (find_parent || !found))
1615 cache_entry = NULL;
1617 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp);
1618 FlushIndex();
1619 return tmp;
1622 // This is the actual implementation for OpenNextEntry and OpenPrevEntry.
1623 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) {
1624 if (disabled_)
1625 return NULL;
1627 DCHECK(iter);
1629 const int kListsToSearch = 3;
1630 scoped_refptr<EntryImpl> entries[kListsToSearch];
1631 scoped_ptr<Rankings::Iterator> iterator(
1632 reinterpret_cast<Rankings::Iterator*>(*iter));
1633 *iter = NULL;
1635 if (!iterator.get()) {
1636 iterator.reset(new Rankings::Iterator(&rankings_));
1637 bool ret = false;
1639 // Get an entry from each list.
1640 for (int i = 0; i < kListsToSearch; i++) {
1641 EntryImpl* temp = NULL;
1642 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
1643 &iterator->nodes[i], &temp);
1644 entries[i].swap(&temp); // The entry was already addref'd.
1646 if (!ret)
1647 return NULL;
1648 } else {
1649 // Get the next entry from the last list, and the actual entries for the
1650 // elements on the other lists.
1651 for (int i = 0; i < kListsToSearch; i++) {
1652 EntryImpl* temp = NULL;
1653 if (iterator->list == i) {
1654 OpenFollowingEntryFromList(forward, iterator->list,
1655 &iterator->nodes[i], &temp);
1656 } else {
1657 temp = GetEnumeratedEntry(iterator->nodes[i],
1658 static_cast<Rankings::List>(i));
1661 entries[i].swap(&temp); // The entry was already addref'd.
1665 int newest = -1;
1666 int oldest = -1;
1667 Time access_times[kListsToSearch];
1668 for (int i = 0; i < kListsToSearch; i++) {
1669 if (entries[i].get()) {
1670 access_times[i] = entries[i]->GetLastUsed();
1671 if (newest < 0) {
1672 DCHECK_LT(oldest, 0);
1673 newest = oldest = i;
1674 continue;
1676 if (access_times[i] > access_times[newest])
1677 newest = i;
1678 if (access_times[i] < access_times[oldest])
1679 oldest = i;
1683 if (newest < 0 || oldest < 0)
1684 return NULL;
1686 EntryImpl* next_entry;
1687 if (forward) {
1688 next_entry = entries[newest].get();
1689 iterator->list = static_cast<Rankings::List>(newest);
1690 } else {
1691 next_entry = entries[oldest].get();
1692 iterator->list = static_cast<Rankings::List>(oldest);
1695 *iter = iterator.release();
1696 next_entry->AddRef();
1697 return next_entry;
1700 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
1701 CacheRankingsBlock** from_entry,
1702 EntryImpl** next_entry) {
1703 if (disabled_)
1704 return false;
1706 if (!new_eviction_ && Rankings::NO_USE != list)
1707 return false;
1709 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
1710 CacheRankingsBlock* next_block = forward ?
1711 rankings_.GetNext(rankings.get(), list) :
1712 rankings_.GetPrev(rankings.get(), list);
1713 Rankings::ScopedRankingsBlock next(&rankings_, next_block);
1714 *from_entry = NULL;
1716 *next_entry = GetEnumeratedEntry(next.get(), list);
1717 if (!*next_entry)
1718 return false;
1720 *from_entry = next.release();
1721 return true;
1724 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next,
1725 Rankings::List list) {
1726 if (!next || disabled_)
1727 return NULL;
1729 EntryImpl* entry;
1730 int rv = NewEntry(Addr(next->Data()->contents), &entry);
1731 if (rv) {
1732 STRESS_NOTREACHED();
1733 rankings_.Remove(next, list, false);
1734 if (rv == ERR_INVALID_ADDRESS) {
1735 // There is nothing linked from the index. Delete the rankings node.
1736 DeleteBlock(next->address(), true);
1738 return NULL;
1741 if (entry->dirty()) {
1742 // We cannot trust this entry.
1743 InternalDoomEntry(entry);
1744 entry->Release();
1745 return NULL;
1748 if (!entry->Update()) {
1749 STRESS_NOTREACHED();
1750 entry->Release();
1751 return NULL;
1754 // Note that it is unfortunate (but possible) for this entry to be clean, but
1755 // not actually the real entry. In other words, we could have lost this entry
1756 // from the index, and it could have been replaced with a newer one. It's not
1757 // worth checking that this entry is "the real one", so we just return it and
1758 // let the enumeration continue; this entry will be evicted at some point, and
1759 // the regular path will work with the real entry. With time, this problem
1760 // will disasappear because this scenario is just a bug.
1762 // Make sure that we save the key for later.
1763 entry->GetKey();
1765 return entry;
1768 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) {
1769 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
1770 deleted_entry->Release();
1771 stats_.OnEvent(Stats::CREATE_MISS);
1772 Trace("create entry miss ");
1773 return NULL;
1776 // We are attempting to create an entry and found out that the entry was
1777 // previously deleted.
1779 eviction_.OnCreateEntry(deleted_entry);
1780 entry_count_++;
1782 stats_.OnEvent(Stats::RESURRECT_HIT);
1783 Trace("Resurrect entry hit ");
1784 return deleted_entry;
1787 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
1788 LOG(WARNING) << "Destroying invalid entry.";
1789 Trace("Destroying invalid entry 0x%p", entry);
1791 entry->SetPointerForInvalidEntry(GetCurrentEntryId());
1793 eviction_.OnDoomEntry(entry);
1794 entry->InternalDoom();
1796 if (!new_eviction_)
1797 DecreaseNumEntries();
1798 stats_.OnEvent(Stats::INVALID_ENTRY);
1801 void BackendImpl::AddStorageSize(int32 bytes) {
1802 data_->header.num_bytes += bytes;
1803 DCHECK_GE(data_->header.num_bytes, 0);
1806 void BackendImpl::SubstractStorageSize(int32 bytes) {
1807 data_->header.num_bytes -= bytes;
1808 DCHECK_GE(data_->header.num_bytes, 0);
1811 void BackendImpl::IncreaseNumRefs() {
1812 num_refs_++;
1813 if (max_refs_ < num_refs_)
1814 max_refs_ = num_refs_;
1817 void BackendImpl::DecreaseNumRefs() {
1818 DCHECK(num_refs_);
1819 num_refs_--;
1821 if (!num_refs_ && disabled_)
1822 base::MessageLoop::current()->PostTask(
1823 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
1826 void BackendImpl::IncreaseNumEntries() {
1827 data_->header.num_entries++;
1828 DCHECK_GT(data_->header.num_entries, 0);
1831 void BackendImpl::DecreaseNumEntries() {
1832 data_->header.num_entries--;
1833 if (data_->header.num_entries < 0) {
1834 NOTREACHED();
1835 data_->header.num_entries = 0;
1839 void BackendImpl::LogStats() {
1840 StatsItems stats;
1841 GetStats(&stats);
1843 for (size_t index = 0; index < stats.size(); index++)
1844 VLOG(1) << stats[index].first << ": " << stats[index].second;
1847 void BackendImpl::ReportStats() {
1848 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries);
1850 int current_size = data_->header.num_bytes / (1024 * 1024);
1851 int max_size = max_size_ / (1024 * 1024);
1852 int hit_ratio_as_percentage = stats_.GetHitRatio();
1854 CACHE_UMA(COUNTS_10000, "Size2", 0, current_size);
1855 // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the
1856 // ratio of that bin's total count to the count in the same bin in the Size2
1857 // histogram.
1858 if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1859 CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size);
1860 CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size);
1861 if (!max_size)
1862 max_size++;
1863 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size);
1865 CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0,
1866 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
1867 CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0,
1868 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
1869 stats_.SetCounter(Stats::MAX_ENTRIES, 0);
1871 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0,
1872 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
1873 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0,
1874 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
1875 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0,
1876 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
1877 stats_.SetCounter(Stats::FATAL_ERROR, 0);
1878 stats_.SetCounter(Stats::DOOM_CACHE, 0);
1879 stats_.SetCounter(Stats::DOOM_RECENT, 0);
1881 int age = (Time::Now() -
1882 Time::FromInternalValue(data_->header.create_time)).InHours();
1883 if (age)
1884 CACHE_UMA(HOURS, "FilesAge", 0, age);
1886 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
1887 if (!data_->header.create_time || !data_->header.lru.filled) {
1888 int cause = data_->header.create_time ? 0 : 1;
1889 if (!data_->header.lru.filled)
1890 cause |= 2;
1891 CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause);
1892 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours));
1893 return;
1896 // This is an up to date client that will report FirstEviction() data. After
1897 // that event, start reporting this:
1899 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
1900 // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total
1901 // time is the ratio of that bin's total count to the count in the same bin in
1902 // the TotalTime histogram.
1903 if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1904 CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours));
1906 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
1907 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
1909 // We may see users with no use_hours at this point if this is the first time
1910 // we are running this code.
1911 if (use_hours)
1912 use_hours = total_hours - use_hours;
1914 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
1915 return;
1917 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
1918 // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time
1919 // is the ratio of that bin's total count to the count in the same bin in the
1920 // UseTime histogram.
1921 if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1922 CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours));
1923 CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage);
1925 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
1926 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
1928 int avg_size = data_->header.num_bytes / GetEntryCount();
1929 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
1930 CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries);
1932 CACHE_UMA(PERCENTAGE, "IndexLoad", 0,
1933 data_->header.num_entries * 100 / (mask_ + 1));
1935 int large_entries_bytes = stats_.GetLargeEntriesSize();
1936 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
1937 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
1939 if (new_eviction_) {
1940 CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio());
1941 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
1942 data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
1943 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
1944 data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
1945 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
1946 data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
1947 CACHE_UMA(PERCENTAGE, "DeletedRatio", 0,
1948 data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
1951 stats_.ResetRatios();
1952 stats_.SetCounter(Stats::TRIM_ENTRY, 0);
1954 if (cache_type_ == net::DISK_CACHE)
1955 block_files_.ReportStats();
1958 void BackendImpl::UpgradeTo2_1() {
1959 // 2.1 is basically the same as 2.0, except that new fields are actually
1960 // updated by the new eviction algorithm.
1961 DCHECK(0x20000 == data_->header.version);
1962 data_->header.version = 0x20001;
1963 data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
1966 bool BackendImpl::CheckIndex() {
1967 DCHECK(data_);
1969 size_t current_size = index_->GetLength();
1970 if (current_size < sizeof(Index)) {
1971 LOG(ERROR) << "Corrupt Index file";
1972 return false;
1975 if (new_eviction_) {
1976 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
1977 if (kIndexMagic != data_->header.magic ||
1978 kCurrentVersion >> 16 != data_->header.version >> 16) {
1979 LOG(ERROR) << "Invalid file version or magic";
1980 return false;
1982 if (kCurrentVersion == data_->header.version) {
1983 // We need file version 2.1 for the new eviction algorithm.
1984 UpgradeTo2_1();
1986 } else {
1987 if (kIndexMagic != data_->header.magic ||
1988 kCurrentVersion != data_->header.version) {
1989 LOG(ERROR) << "Invalid file version or magic";
1990 return false;
1994 if (!data_->header.table_len) {
1995 LOG(ERROR) << "Invalid table size";
1996 return false;
1999 if (current_size < GetIndexSize(data_->header.table_len) ||
2000 data_->header.table_len & (kBaseTableLen - 1)) {
2001 LOG(ERROR) << "Corrupt Index file";
2002 return false;
2005 AdjustMaxCacheSize(data_->header.table_len);
2007 #if !defined(NET_BUILD_STRESS_CACHE)
2008 if (data_->header.num_bytes < 0 ||
2009 (max_size_ < kint32max - kDefaultCacheSize &&
2010 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
2011 LOG(ERROR) << "Invalid cache (current) size";
2012 return false;
2014 #endif
2016 if (data_->header.num_entries < 0) {
2017 LOG(ERROR) << "Invalid number of entries";
2018 return false;
2021 if (!mask_)
2022 mask_ = data_->header.table_len - 1;
2024 // Load the table into memory.
2025 return index_->Preload();
2028 int BackendImpl::CheckAllEntries() {
2029 int num_dirty = 0;
2030 int num_entries = 0;
2031 DCHECK(mask_ < kuint32max);
2032 for (unsigned int i = 0; i <= mask_; i++) {
2033 Addr address(data_->table[i]);
2034 if (!address.is_initialized())
2035 continue;
2036 for (;;) {
2037 EntryImpl* tmp;
2038 int ret = NewEntry(address, &tmp);
2039 if (ret) {
2040 STRESS_NOTREACHED();
2041 return ret;
2043 scoped_refptr<EntryImpl> cache_entry;
2044 cache_entry.swap(&tmp);
2046 if (cache_entry->dirty())
2047 num_dirty++;
2048 else if (CheckEntry(cache_entry.get()))
2049 num_entries++;
2050 else
2051 return ERR_INVALID_ENTRY;
2053 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
2054 address.set_value(cache_entry->GetNextAddress());
2055 if (!address.is_initialized())
2056 break;
2060 Trace("CheckAllEntries End");
2061 if (num_entries + num_dirty != data_->header.num_entries) {
2062 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
2063 " " << data_->header.num_entries;
2064 DCHECK_LT(num_entries, data_->header.num_entries);
2065 return ERR_NUM_ENTRIES_MISMATCH;
2068 return num_dirty;
2071 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
2072 bool ok = block_files_.IsValid(cache_entry->entry()->address());
2073 ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
2074 EntryStore* data = cache_entry->entry()->Data();
2075 for (size_t i = 0; i < arraysize(data->data_addr); i++) {
2076 if (data->data_addr[i]) {
2077 Addr address(data->data_addr[i]);
2078 if (address.is_block_file())
2079 ok = ok && block_files_.IsValid(address);
2083 return ok && cache_entry->rankings()->VerifyHash();
2086 int BackendImpl::MaxBuffersSize() {
2087 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
2088 static bool done = false;
2090 if (!done) {
2091 const int kMaxBuffersSize = 30 * 1024 * 1024;
2093 // We want to use up to 2% of the computer's memory.
2094 total_memory = total_memory * 2 / 100;
2095 if (total_memory > kMaxBuffersSize || total_memory <= 0)
2096 total_memory = kMaxBuffersSize;
2098 done = true;
2101 return static_cast<int>(total_memory);
2104 } // namespace disk_cache