Pass CreateDirectory errors up to IndexedDB.
[chromium-blink-merge.git] / net / disk_cache / backend_impl.cc
blob0a2618bebcba155f6e74345421175ed563da00ab
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/backend_impl.h"
7 #include "base/bind.h"
8 #include "base/bind_helpers.h"
9 #include "base/file_util.h"
10 #include "base/files/file_path.h"
11 #include "base/hash.h"
12 #include "base/message_loop.h"
13 #include "base/metrics/field_trial.h"
14 #include "base/metrics/histogram.h"
15 #include "base/metrics/stats_counters.h"
16 #include "base/rand_util.h"
17 #include "base/strings/string_util.h"
18 #include "base/strings/stringprintf.h"
19 #include "base/sys_info.h"
20 #include "base/threading/thread_restrictions.h"
21 #include "base/time.h"
22 #include "base/timer.h"
23 #include "net/base/net_errors.h"
24 #include "net/disk_cache/cache_util.h"
25 #include "net/disk_cache/disk_format.h"
26 #include "net/disk_cache/entry_impl.h"
27 #include "net/disk_cache/errors.h"
28 #include "net/disk_cache/experiments.h"
29 #include "net/disk_cache/file.h"
31 // This has to be defined before including histogram_macros.h from this file.
32 #define NET_DISK_CACHE_BACKEND_IMPL_CC_
33 #include "net/disk_cache/histogram_macros.h"
35 using base::Time;
36 using base::TimeDelta;
37 using base::TimeTicks;
39 namespace {
41 const char* kIndexName = "index";
43 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
44 // Note that the actual target is to keep the index table load factor under 55%
45 // for most users.
46 const int k64kEntriesStore = 240 * 1000 * 1000;
47 const int kBaseTableLen = 64 * 1024;
48 const int kDefaultCacheSize = 80 * 1024 * 1024;
50 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
51 const int kTrimDelay = 10;
53 int DesiredIndexTableLen(int32 storage_size) {
54 if (storage_size <= k64kEntriesStore)
55 return kBaseTableLen;
56 if (storage_size <= k64kEntriesStore * 2)
57 return kBaseTableLen * 2;
58 if (storage_size <= k64kEntriesStore * 4)
59 return kBaseTableLen * 4;
60 if (storage_size <= k64kEntriesStore * 8)
61 return kBaseTableLen * 8;
63 // The biggest storage_size for int32 requires a 4 MB table.
64 return kBaseTableLen * 16;
67 int MaxStorageSizeForTable(int table_len) {
68 return table_len * (k64kEntriesStore / kBaseTableLen);
71 size_t GetIndexSize(int table_len) {
72 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
73 return sizeof(disk_cache::IndexHeader) + table_size;
76 // ------------------------------------------------------------------------
78 // Sets group for the current experiment. Returns false if the files should be
79 // discarded.
80 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
81 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
82 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
83 // Discard current cache.
84 return false;
87 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
88 "ExperimentControl") {
89 if (cache_created) {
90 header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
91 return true;
92 } else if (header->experiment != disk_cache::EXPERIMENT_SIMPLE_CONTROL) {
93 return false;
97 header->experiment = disk_cache::NO_EXPERIMENT;
98 return true;
101 // A callback to perform final cleanup on the background thread.
102 void FinalCleanupCallback(disk_cache::BackendImpl* backend) {
103 backend->CleanupCache();
106 } // namespace
108 // ------------------------------------------------------------------------
110 namespace disk_cache {
112 // Returns the preferred maximum number of bytes for the cache given the
113 // number of available bytes.
114 int PreferedCacheSize(int64 available) {
115 // Return 80% of the available space if there is not enough space to use
116 // kDefaultCacheSize.
117 if (available < kDefaultCacheSize * 10 / 8)
118 return static_cast<int32>(available * 8 / 10);
120 // Return kDefaultCacheSize if it uses 80% to 10% of the available space.
121 if (available < kDefaultCacheSize * 10)
122 return kDefaultCacheSize;
124 // Return 10% of the available space if the target size
125 // (2.5 * kDefaultCacheSize) is more than 10%.
126 if (available < static_cast<int64>(kDefaultCacheSize) * 25)
127 return static_cast<int32>(available / 10);
129 // Return the target size (2.5 * kDefaultCacheSize) if it uses 10% to 1%
130 // of the available space.
131 if (available < static_cast<int64>(kDefaultCacheSize) * 250)
132 return kDefaultCacheSize * 5 / 2;
134 // Return 1% of the available space if it does not exceed kint32max.
135 if (available < static_cast<int64>(kint32max) * 100)
136 return static_cast<int32>(available / 100);
138 return kint32max;
141 // ------------------------------------------------------------------------
143 BackendImpl::BackendImpl(const base::FilePath& path,
144 base::MessageLoopProxy* cache_thread,
145 net::NetLog* net_log)
146 : background_queue_(this, cache_thread),
147 path_(path),
148 block_files_(path),
149 mask_(0),
150 max_size_(0),
151 up_ticks_(0),
152 cache_type_(net::DISK_CACHE),
153 uma_report_(0),
154 user_flags_(0),
155 init_(false),
156 restarted_(false),
157 unit_test_(false),
158 read_only_(false),
159 disabled_(false),
160 new_eviction_(false),
161 first_timer_(true),
162 user_load_(false),
163 net_log_(net_log),
164 done_(true, false),
165 ptr_factory_(this) {
168 BackendImpl::BackendImpl(const base::FilePath& path,
169 uint32 mask,
170 base::MessageLoopProxy* cache_thread,
171 net::NetLog* net_log)
172 : background_queue_(this, cache_thread),
173 path_(path),
174 block_files_(path),
175 mask_(mask),
176 max_size_(0),
177 up_ticks_(0),
178 cache_type_(net::DISK_CACHE),
179 uma_report_(0),
180 user_flags_(kMask),
181 init_(false),
182 restarted_(false),
183 unit_test_(false),
184 read_only_(false),
185 disabled_(false),
186 new_eviction_(false),
187 first_timer_(true),
188 user_load_(false),
189 net_log_(net_log),
190 done_(true, false),
191 ptr_factory_(this) {
194 BackendImpl::~BackendImpl() {
195 if (user_flags_ & kNoRandom) {
196 // This is a unit test, so we want to be strict about not leaking entries
197 // and completing all the work.
198 background_queue_.WaitForPendingIO();
199 } else {
200 // This is most likely not a test, so we want to do as little work as
201 // possible at this time, at the price of leaving dirty entries behind.
202 background_queue_.DropPendingIO();
205 if (background_queue_.BackgroundIsCurrentThread()) {
206 // Unit tests may use the same thread for everything.
207 CleanupCache();
208 } else {
209 background_queue_.background_thread()->PostTask(
210 FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
211 // http://crbug.com/74623
212 base::ThreadRestrictions::ScopedAllowWait allow_wait;
213 done_.Wait();
217 int BackendImpl::Init(const CompletionCallback& callback) {
218 background_queue_.Init(callback);
219 return net::ERR_IO_PENDING;
222 int BackendImpl::SyncInit() {
223 #if defined(NET_BUILD_STRESS_CACHE)
224 // Start evictions right away.
225 up_ticks_ = kTrimDelay * 2;
226 #endif
227 DCHECK(!init_);
228 if (init_)
229 return net::ERR_FAILED;
231 bool create_files = false;
232 if (!InitBackingStore(&create_files)) {
233 ReportError(ERR_STORAGE_ERROR);
234 return net::ERR_FAILED;
237 num_refs_ = num_pending_io_ = max_refs_ = 0;
238 entry_count_ = byte_count_ = 0;
240 if (!restarted_) {
241 buffer_bytes_ = 0;
242 trace_object_ = TraceObject::GetTraceObject();
243 // Create a recurrent timer of 30 secs.
244 int timer_delay = unit_test_ ? 1000 : 30000;
245 timer_.reset(new base::RepeatingTimer<BackendImpl>());
246 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
247 &BackendImpl::OnStatsTimer);
250 init_ = true;
251 Trace("Init");
253 if (data_->header.experiment != NO_EXPERIMENT &&
254 cache_type_ != net::DISK_CACHE) {
255 // No experiment for other caches.
256 return net::ERR_FAILED;
259 if (!(user_flags_ & kNoRandom)) {
260 // The unit test controls directly what to test.
261 new_eviction_ = (cache_type_ == net::DISK_CACHE);
264 if (!CheckIndex()) {
265 ReportError(ERR_INIT_FAILED);
266 return net::ERR_FAILED;
269 if (!restarted_ && (create_files || !data_->header.num_entries))
270 ReportError(ERR_CACHE_CREATED);
272 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
273 !InitExperiment(&data_->header, create_files)) {
274 return net::ERR_FAILED;
277 // We don't care if the value overflows. The only thing we care about is that
278 // the id cannot be zero, because that value is used as "not dirty".
279 // Increasing the value once per second gives us many years before we start
280 // having collisions.
281 data_->header.this_id++;
282 if (!data_->header.this_id)
283 data_->header.this_id++;
285 bool previous_crash = (data_->header.crash != 0);
286 data_->header.crash = 1;
288 if (!block_files_.Init(create_files))
289 return net::ERR_FAILED;
291 // We want to minimize the changes to cache for an AppCache.
292 if (cache_type() == net::APP_CACHE) {
293 DCHECK(!new_eviction_);
294 read_only_ = true;
295 } else if (cache_type() == net::SHADER_CACHE) {
296 DCHECK(!new_eviction_);
299 eviction_.Init(this);
301 // stats_ and rankings_ may end up calling back to us so we better be enabled.
302 disabled_ = false;
303 if (!InitStats())
304 return net::ERR_FAILED;
306 disabled_ = !rankings_.Init(this, new_eviction_);
308 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
309 trace_object_->EnableTracing(false);
310 int sc = SelfCheck();
311 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
312 NOTREACHED();
313 trace_object_->EnableTracing(true);
314 #endif
316 if (previous_crash) {
317 ReportError(ERR_PREVIOUS_CRASH);
318 } else if (!restarted_) {
319 ReportError(ERR_NO_ERROR);
322 FlushIndex();
324 return disabled_ ? net::ERR_FAILED : net::OK;
327 void BackendImpl::CleanupCache() {
328 Trace("Backend Cleanup");
329 eviction_.Stop();
330 timer_.reset();
332 if (init_) {
333 StoreStats();
334 if (data_)
335 data_->header.crash = 0;
337 if (user_flags_ & kNoRandom) {
338 // This is a net_unittest, verify that we are not 'leaking' entries.
339 File::WaitForPendingIO(&num_pending_io_);
340 DCHECK(!num_refs_);
341 } else {
342 File::DropPendingIO();
345 block_files_.CloseFiles();
346 FlushIndex();
347 index_ = NULL;
348 ptr_factory_.InvalidateWeakPtrs();
349 done_.Signal();
352 // ------------------------------------------------------------------------
354 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry,
355 const CompletionCallback& callback) {
356 DCHECK(!callback.is_null());
357 background_queue_.OpenPrevEntry(iter, prev_entry, callback);
358 return net::ERR_IO_PENDING;
361 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) {
362 DCHECK(entry);
363 *entry = OpenEntryImpl(key);
364 return (*entry) ? net::OK : net::ERR_FAILED;
367 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) {
368 DCHECK(entry);
369 *entry = CreateEntryImpl(key);
370 return (*entry) ? net::OK : net::ERR_FAILED;
373 int BackendImpl::SyncDoomEntry(const std::string& key) {
374 if (disabled_)
375 return net::ERR_FAILED;
377 EntryImpl* entry = OpenEntryImpl(key);
378 if (!entry)
379 return net::ERR_FAILED;
381 entry->DoomImpl();
382 entry->Release();
383 return net::OK;
386 int BackendImpl::SyncDoomAllEntries() {
387 // This is not really an error, but it is an interesting condition.
388 ReportError(ERR_CACHE_DOOMED);
389 stats_.OnEvent(Stats::DOOM_CACHE);
390 if (!num_refs_) {
391 RestartCache(false);
392 return disabled_ ? net::ERR_FAILED : net::OK;
393 } else {
394 if (disabled_)
395 return net::ERR_FAILED;
397 eviction_.TrimCache(true);
398 return net::OK;
402 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
403 const base::Time end_time) {
404 DCHECK_NE(net::APP_CACHE, cache_type_);
405 if (end_time.is_null())
406 return SyncDoomEntriesSince(initial_time);
408 DCHECK(end_time >= initial_time);
410 if (disabled_)
411 return net::ERR_FAILED;
413 EntryImpl* node;
414 void* iter = NULL;
415 EntryImpl* next = OpenNextEntryImpl(&iter);
416 if (!next)
417 return net::OK;
419 while (next) {
420 node = next;
421 next = OpenNextEntryImpl(&iter);
423 if (node->GetLastUsed() >= initial_time &&
424 node->GetLastUsed() < end_time) {
425 node->DoomImpl();
426 } else if (node->GetLastUsed() < initial_time) {
427 if (next)
428 next->Release();
429 next = NULL;
430 SyncEndEnumeration(iter);
433 node->Release();
436 return net::OK;
439 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get
440 // entries that are too old.
441 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
442 DCHECK_NE(net::APP_CACHE, cache_type_);
443 if (disabled_)
444 return net::ERR_FAILED;
446 stats_.OnEvent(Stats::DOOM_RECENT);
447 for (;;) {
448 void* iter = NULL;
449 EntryImpl* entry = OpenNextEntryImpl(&iter);
450 if (!entry)
451 return net::OK;
453 if (initial_time > entry->GetLastUsed()) {
454 entry->Release();
455 SyncEndEnumeration(iter);
456 return net::OK;
459 entry->DoomImpl();
460 entry->Release();
461 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator.
465 int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) {
466 *next_entry = OpenNextEntryImpl(iter);
467 return (*next_entry) ? net::OK : net::ERR_FAILED;
470 int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) {
471 *prev_entry = OpenPrevEntryImpl(iter);
472 return (*prev_entry) ? net::OK : net::ERR_FAILED;
475 void BackendImpl::SyncEndEnumeration(void* iter) {
476 scoped_ptr<Rankings::Iterator> iterator(
477 reinterpret_cast<Rankings::Iterator*>(iter));
480 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) {
481 if (disabled_)
482 return;
484 uint32 hash = base::Hash(key);
485 bool error;
486 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
487 if (cache_entry) {
488 if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) {
489 UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE);
491 cache_entry->Release();
495 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
496 if (disabled_)
497 return NULL;
499 TimeTicks start = TimeTicks::Now();
500 uint32 hash = base::Hash(key);
501 Trace("Open hash 0x%x", hash);
503 bool error;
504 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
505 if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
506 // The entry was already evicted.
507 cache_entry->Release();
508 cache_entry = NULL;
511 int current_size = data_->header.num_bytes / (1024 * 1024);
512 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
513 int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
514 int64 use_hours = total_hours - no_use_hours;
516 if (!cache_entry) {
517 CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start);
518 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size);
519 CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours);
520 CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours);
521 stats_.OnEvent(Stats::OPEN_MISS);
522 return NULL;
525 eviction_.OnOpenEntry(cache_entry);
526 entry_count_++;
528 Trace("Open hash 0x%x end: 0x%x", hash,
529 cache_entry->entry()->address().value());
530 CACHE_UMA(AGE_MS, "OpenTime", 0, start);
531 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size);
532 CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours);
533 CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours);
534 stats_.OnEvent(Stats::OPEN_HIT);
535 SIMPLE_STATS_COUNTER("disk_cache.hit");
536 return cache_entry;
539 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
540 if (disabled_ || key.empty())
541 return NULL;
543 TimeTicks start = TimeTicks::Now();
544 uint32 hash = base::Hash(key);
545 Trace("Create hash 0x%x", hash);
547 scoped_refptr<EntryImpl> parent;
548 Addr entry_address(data_->table[hash & mask_]);
549 if (entry_address.is_initialized()) {
550 // We have an entry already. It could be the one we are looking for, or just
551 // a hash conflict.
552 bool error;
553 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
554 if (old_entry)
555 return ResurrectEntry(old_entry);
557 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
558 DCHECK(!error);
559 if (parent_entry) {
560 parent.swap(&parent_entry);
561 } else if (data_->table[hash & mask_]) {
562 // We should have corrected the problem.
563 NOTREACHED();
564 return NULL;
568 // The general flow is to allocate disk space and initialize the entry data,
569 // followed by saving that to disk, then linking the entry though the index
570 // and finally through the lists. If there is a crash in this process, we may
571 // end up with:
572 // a. Used, unreferenced empty blocks on disk (basically just garbage).
573 // b. Used, unreferenced but meaningful data on disk (more garbage).
574 // c. A fully formed entry, reachable only through the index.
575 // d. A fully formed entry, also reachable through the lists, but still dirty.
577 // Anything after (b) can be automatically cleaned up. We may consider saving
578 // the current operation (as we do while manipulating the lists) so that we
579 // can detect and cleanup (a) and (b).
581 int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
582 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
583 LOG(ERROR) << "Create entry failed " << key.c_str();
584 stats_.OnEvent(Stats::CREATE_ERROR);
585 return NULL;
588 Addr node_address(0);
589 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
590 block_files_.DeleteBlock(entry_address, false);
591 LOG(ERROR) << "Create entry failed " << key.c_str();
592 stats_.OnEvent(Stats::CREATE_ERROR);
593 return NULL;
596 scoped_refptr<EntryImpl> cache_entry(
597 new EntryImpl(this, entry_address, false));
598 IncreaseNumRefs();
600 if (!cache_entry->CreateEntry(node_address, key, hash)) {
601 block_files_.DeleteBlock(entry_address, false);
602 block_files_.DeleteBlock(node_address, false);
603 LOG(ERROR) << "Create entry failed " << key.c_str();
604 stats_.OnEvent(Stats::CREATE_ERROR);
605 return NULL;
608 cache_entry->BeginLogging(net_log_, true);
610 // We are not failing the operation; let's add this to the map.
611 open_entries_[entry_address.value()] = cache_entry.get();
613 // Save the entry.
614 cache_entry->entry()->Store();
615 cache_entry->rankings()->Store();
616 IncreaseNumEntries();
617 entry_count_++;
619 // Link this entry through the index.
620 if (parent.get()) {
621 parent->SetNextAddress(entry_address);
622 } else {
623 data_->table[hash & mask_] = entry_address.value();
626 // Link this entry through the lists.
627 eviction_.OnCreateEntry(cache_entry.get());
629 CACHE_UMA(AGE_MS, "CreateTime", 0, start);
630 stats_.OnEvent(Stats::CREATE_HIT);
631 SIMPLE_STATS_COUNTER("disk_cache.miss");
632 Trace("create entry hit ");
633 FlushIndex();
634 cache_entry->AddRef();
635 return cache_entry.get();
638 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) {
639 return OpenFollowingEntry(true, iter);
642 EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) {
643 return OpenFollowingEntry(false, iter);
646 bool BackendImpl::SetMaxSize(int max_bytes) {
647 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
648 if (max_bytes < 0)
649 return false;
651 // Zero size means use the default.
652 if (!max_bytes)
653 return true;
655 // Avoid a DCHECK later on.
656 if (max_bytes >= kint32max - kint32max / 10)
657 max_bytes = kint32max - kint32max / 10 - 1;
659 user_flags_ |= kMaxSize;
660 max_size_ = max_bytes;
661 return true;
664 void BackendImpl::SetType(net::CacheType type) {
665 DCHECK_NE(net::MEMORY_CACHE, type);
666 cache_type_ = type;
669 base::FilePath BackendImpl::GetFileName(Addr address) const {
670 if (!address.is_separate_file() || !address.is_initialized()) {
671 NOTREACHED();
672 return base::FilePath();
675 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
676 return path_.AppendASCII(tmp);
679 MappedFile* BackendImpl::File(Addr address) {
680 if (disabled_)
681 return NULL;
682 return block_files_.GetFile(address);
685 base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() {
686 return background_queue_.GetWeakPtr();
689 bool BackendImpl::CreateExternalFile(Addr* address) {
690 int file_number = data_->header.last_file + 1;
691 Addr file_address(0);
692 bool success = false;
693 for (int i = 0; i < 0x0fffffff; i++, file_number++) {
694 if (!file_address.SetFileNumber(file_number)) {
695 file_number = 1;
696 continue;
698 base::FilePath name = GetFileName(file_address);
699 int flags = base::PLATFORM_FILE_READ |
700 base::PLATFORM_FILE_WRITE |
701 base::PLATFORM_FILE_CREATE |
702 base::PLATFORM_FILE_EXCLUSIVE_WRITE;
703 base::PlatformFileError error;
704 scoped_refptr<disk_cache::File> file(new disk_cache::File(
705 base::CreatePlatformFile(name, flags, NULL, &error)));
706 if (!file->IsValid()) {
707 if (error != base::PLATFORM_FILE_ERROR_EXISTS) {
708 LOG(ERROR) << "Unable to create file: " << error;
709 return false;
711 continue;
714 success = true;
715 break;
718 DCHECK(success);
719 if (!success)
720 return false;
722 data_->header.last_file = file_number;
723 address->set_value(file_address.value());
724 return true;
727 bool BackendImpl::CreateBlock(FileType block_type, int block_count,
728 Addr* block_address) {
729 return block_files_.CreateBlock(block_type, block_count, block_address);
732 void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
733 block_files_.DeleteBlock(block_address, deep);
736 LruData* BackendImpl::GetLruData() {
737 return &data_->header.lru;
740 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
741 if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE))
742 return;
743 eviction_.UpdateRank(entry, modified);
746 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
747 Addr address(rankings->Data()->contents);
748 EntryImpl* cache_entry = NULL;
749 if (NewEntry(address, &cache_entry)) {
750 STRESS_NOTREACHED();
751 return;
754 uint32 hash = cache_entry->GetHash();
755 cache_entry->Release();
757 // Anything on the table means that this entry is there.
758 if (data_->table[hash & mask_])
759 return;
761 data_->table[hash & mask_] = address.value();
762 FlushIndex();
765 void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
766 uint32 hash = entry->GetHash();
767 std::string key = entry->GetKey();
768 Addr entry_addr = entry->entry()->address();
769 bool error;
770 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
771 CacheAddr child(entry->GetNextAddress());
773 Trace("Doom entry 0x%p", entry);
775 if (!entry->doomed()) {
776 // We may have doomed this entry from within MatchEntry.
777 eviction_.OnDoomEntry(entry);
778 entry->InternalDoom();
779 if (!new_eviction_) {
780 DecreaseNumEntries();
782 stats_.OnEvent(Stats::DOOM_ENTRY);
785 if (parent_entry) {
786 parent_entry->SetNextAddress(Addr(child));
787 parent_entry->Release();
788 } else if (!error) {
789 data_->table[hash & mask_] = child;
792 FlushIndex();
795 #if defined(NET_BUILD_STRESS_CACHE)
797 CacheAddr BackendImpl::GetNextAddr(Addr address) {
798 EntriesMap::iterator it = open_entries_.find(address.value());
799 if (it != open_entries_.end()) {
800 EntryImpl* this_entry = it->second;
801 return this_entry->GetNextAddress();
803 DCHECK(block_files_.IsValid(address));
804 DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256);
806 CacheEntryBlock entry(File(address), address);
807 CHECK(entry.Load());
808 return entry.Data()->next;
811 void BackendImpl::NotLinked(EntryImpl* entry) {
812 Addr entry_addr = entry->entry()->address();
813 uint32 i = entry->GetHash() & mask_;
814 Addr address(data_->table[i]);
815 if (!address.is_initialized())
816 return;
818 for (;;) {
819 DCHECK(entry_addr.value() != address.value());
820 address.set_value(GetNextAddr(address));
821 if (!address.is_initialized())
822 break;
825 #endif // NET_BUILD_STRESS_CACHE
827 // An entry may be linked on the DELETED list for a while after being doomed.
828 // This function is called when we want to remove it.
829 void BackendImpl::RemoveEntry(EntryImpl* entry) {
830 #if defined(NET_BUILD_STRESS_CACHE)
831 NotLinked(entry);
832 #endif
833 if (!new_eviction_)
834 return;
836 DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state);
838 Trace("Remove entry 0x%p", entry);
839 eviction_.OnDestroyEntry(entry);
840 DecreaseNumEntries();
843 void BackendImpl::OnEntryDestroyBegin(Addr address) {
844 EntriesMap::iterator it = open_entries_.find(address.value());
845 if (it != open_entries_.end())
846 open_entries_.erase(it);
849 void BackendImpl::OnEntryDestroyEnd() {
850 DecreaseNumRefs();
851 if (data_->header.num_bytes > max_size_ && !read_only_ &&
852 (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
853 eviction_.TrimCache(false);
856 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
857 DCHECK(rankings->HasData());
858 EntriesMap::const_iterator it =
859 open_entries_.find(rankings->Data()->contents);
860 if (it != open_entries_.end()) {
861 // We have this entry in memory.
862 return it->second;
865 return NULL;
868 int32 BackendImpl::GetCurrentEntryId() const {
869 return data_->header.this_id;
872 int BackendImpl::MaxFileSize() const {
873 return max_size_ / 8;
876 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
877 if (disabled_ || old_size == new_size)
878 return;
879 if (old_size > new_size)
880 SubstractStorageSize(old_size - new_size);
881 else
882 AddStorageSize(new_size - old_size);
884 FlushIndex();
886 // Update the usage statistics.
887 stats_.ModifyStorageStats(old_size, new_size);
890 void BackendImpl::TooMuchStorageRequested(int32 size) {
891 stats_.ModifyStorageStats(0, size);
894 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) {
895 DCHECK_GT(new_size, current_size);
896 if (user_flags_ & kNoBuffering)
897 return false;
899 int to_add = new_size - current_size;
900 if (buffer_bytes_ + to_add > MaxBuffersSize())
901 return false;
903 buffer_bytes_ += to_add;
904 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024);
905 return true;
908 void BackendImpl::BufferDeleted(int size) {
909 buffer_bytes_ -= size;
910 DCHECK_GE(size, 0);
913 bool BackendImpl::IsLoaded() const {
914 CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_);
915 if (user_flags_ & kNoLoadProtection)
916 return false;
918 return (num_pending_io_ > 5 || user_load_);
921 std::string BackendImpl::HistogramName(const char* name, int experiment) const {
922 if (!experiment)
923 return base::StringPrintf("DiskCache.%d.%s", cache_type_, name);
924 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_,
925 name, experiment);
928 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
929 return ptr_factory_.GetWeakPtr();
932 // We want to remove biases from some histograms so we only send data once per
933 // week.
934 bool BackendImpl::ShouldReportAgain() {
935 if (uma_report_)
936 return uma_report_ == 2;
938 uma_report_++;
939 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
940 Time last_time = Time::FromInternalValue(last_report);
941 if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
942 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
943 uma_report_++;
944 return true;
946 return false;
949 void BackendImpl::FirstEviction() {
950 DCHECK(data_->header.create_time);
951 if (!GetEntryCount())
952 return; // This is just for unit tests.
954 Time create_time = Time::FromInternalValue(data_->header.create_time);
955 CACHE_UMA(AGE, "FillupAge", 0, create_time);
957 int64 use_time = stats_.GetCounter(Stats::TIMER);
958 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120));
959 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
961 if (!use_time)
962 use_time = 1;
963 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0,
964 static_cast<int>(data_->header.num_entries / use_time));
965 CACHE_UMA(COUNTS, "FirstByteIORate", 0,
966 static_cast<int>((data_->header.num_bytes / 1024) / use_time));
968 int avg_size = data_->header.num_bytes / GetEntryCount();
969 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
971 int large_entries_bytes = stats_.GetLargeEntriesSize();
972 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
973 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
975 if (new_eviction_) {
976 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio());
977 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0,
978 data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
979 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0,
980 data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
981 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0,
982 data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
985 stats_.ResetRatios();
988 void BackendImpl::CriticalError(int error) {
989 STRESS_NOTREACHED();
990 LOG(ERROR) << "Critical error found " << error;
991 if (disabled_)
992 return;
994 stats_.OnEvent(Stats::FATAL_ERROR);
995 LogStats();
996 ReportError(error);
998 // Setting the index table length to an invalid value will force re-creation
999 // of the cache files.
1000 data_->header.table_len = 1;
1001 disabled_ = true;
1003 if (!num_refs_)
1004 base::MessageLoop::current()->PostTask(
1005 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
1008 void BackendImpl::ReportError(int error) {
1009 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
1010 error == ERR_CACHE_CREATED);
1012 // We transmit positive numbers, instead of direct error codes.
1013 DCHECK_LE(error, 0);
1014 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
1017 void BackendImpl::OnEvent(Stats::Counters an_event) {
1018 stats_.OnEvent(an_event);
1021 void BackendImpl::OnRead(int32 bytes) {
1022 DCHECK_GE(bytes, 0);
1023 byte_count_ += bytes;
1024 if (byte_count_ < 0)
1025 byte_count_ = kint32max;
1028 void BackendImpl::OnWrite(int32 bytes) {
1029 // We use the same implementation as OnRead... just log the number of bytes.
1030 OnRead(bytes);
1033 void BackendImpl::OnStatsTimer() {
1034 stats_.OnEvent(Stats::TIMER);
1035 int64 time = stats_.GetCounter(Stats::TIMER);
1036 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
1038 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
1039 // the bias towards 0.
1040 if (num_refs_ && (current != num_refs_)) {
1041 int64 diff = (num_refs_ - current) / 50;
1042 if (!diff)
1043 diff = num_refs_ > current ? 1 : -1;
1044 current = current + diff;
1045 stats_.SetCounter(Stats::OPEN_ENTRIES, current);
1046 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
1049 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_);
1051 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_);
1052 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024);
1054 // These values cover about 99.5% of the population (Oct 2011).
1055 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
1056 entry_count_ = 0;
1057 byte_count_ = 0;
1058 up_ticks_++;
1060 if (!data_)
1061 first_timer_ = false;
1062 if (first_timer_) {
1063 first_timer_ = false;
1064 if (ShouldReportAgain())
1065 ReportStats();
1068 // Save stats to disk at 5 min intervals.
1069 if (time % 10 == 0)
1070 StoreStats();
1073 void BackendImpl::IncrementIoCount() {
1074 num_pending_io_++;
1077 void BackendImpl::DecrementIoCount() {
1078 num_pending_io_--;
1081 void BackendImpl::SetUnitTestMode() {
1082 user_flags_ |= kUnitTestMode;
1083 unit_test_ = true;
1086 void BackendImpl::SetUpgradeMode() {
1087 user_flags_ |= kUpgradeMode;
1088 read_only_ = true;
1091 void BackendImpl::SetNewEviction() {
1092 user_flags_ |= kNewEviction;
1093 new_eviction_ = true;
1096 void BackendImpl::SetFlags(uint32 flags) {
1097 user_flags_ |= flags;
1100 void BackendImpl::ClearRefCountForTest() {
1101 num_refs_ = 0;
1104 int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) {
1105 background_queue_.FlushQueue(callback);
1106 return net::ERR_IO_PENDING;
1109 int BackendImpl::RunTaskForTest(const base::Closure& task,
1110 const CompletionCallback& callback) {
1111 background_queue_.RunTask(task, callback);
1112 return net::ERR_IO_PENDING;
1115 void BackendImpl::TrimForTest(bool empty) {
1116 eviction_.SetTestMode();
1117 eviction_.TrimCache(empty);
1120 void BackendImpl::TrimDeletedListForTest(bool empty) {
1121 eviction_.SetTestMode();
1122 eviction_.TrimDeletedList(empty);
1125 int BackendImpl::SelfCheck() {
1126 if (!init_) {
1127 LOG(ERROR) << "Init failed";
1128 return ERR_INIT_FAILED;
1131 int num_entries = rankings_.SelfCheck();
1132 if (num_entries < 0) {
1133 LOG(ERROR) << "Invalid rankings list, error " << num_entries;
1134 #if !defined(NET_BUILD_STRESS_CACHE)
1135 return num_entries;
1136 #endif
1139 if (num_entries != data_->header.num_entries) {
1140 LOG(ERROR) << "Number of entries mismatch";
1141 #if !defined(NET_BUILD_STRESS_CACHE)
1142 return ERR_NUM_ENTRIES_MISMATCH;
1143 #endif
1146 return CheckAllEntries();
1149 void BackendImpl::FlushIndex() {
1150 if (index_.get() && !disabled_)
1151 index_->Flush();
1154 // ------------------------------------------------------------------------
1156 net::CacheType BackendImpl::GetCacheType() const {
1157 return cache_type_;
1160 int32 BackendImpl::GetEntryCount() const {
1161 if (!index_.get() || disabled_)
1162 return 0;
1163 // num_entries includes entries already evicted.
1164 int32 not_deleted = data_->header.num_entries -
1165 data_->header.lru.sizes[Rankings::DELETED];
1167 if (not_deleted < 0) {
1168 NOTREACHED();
1169 not_deleted = 0;
1172 return not_deleted;
1175 int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
1176 const CompletionCallback& callback) {
1177 DCHECK(!callback.is_null());
1178 background_queue_.OpenEntry(key, entry, callback);
1179 return net::ERR_IO_PENDING;
1182 int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
1183 const CompletionCallback& callback) {
1184 DCHECK(!callback.is_null());
1185 background_queue_.CreateEntry(key, entry, callback);
1186 return net::ERR_IO_PENDING;
1189 int BackendImpl::DoomEntry(const std::string& key,
1190 const CompletionCallback& callback) {
1191 DCHECK(!callback.is_null());
1192 background_queue_.DoomEntry(key, callback);
1193 return net::ERR_IO_PENDING;
1196 int BackendImpl::DoomAllEntries(const CompletionCallback& callback) {
1197 DCHECK(!callback.is_null());
1198 background_queue_.DoomAllEntries(callback);
1199 return net::ERR_IO_PENDING;
1202 int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
1203 const base::Time end_time,
1204 const CompletionCallback& callback) {
1205 DCHECK(!callback.is_null());
1206 background_queue_.DoomEntriesBetween(initial_time, end_time, callback);
1207 return net::ERR_IO_PENDING;
1210 int BackendImpl::DoomEntriesSince(const base::Time initial_time,
1211 const CompletionCallback& callback) {
1212 DCHECK(!callback.is_null());
1213 background_queue_.DoomEntriesSince(initial_time, callback);
1214 return net::ERR_IO_PENDING;
1217 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
1218 const CompletionCallback& callback) {
1219 DCHECK(!callback.is_null());
1220 background_queue_.OpenNextEntry(iter, next_entry, callback);
1221 return net::ERR_IO_PENDING;
1224 void BackendImpl::EndEnumeration(void** iter) {
1225 background_queue_.EndEnumeration(*iter);
1226 *iter = NULL;
1229 void BackendImpl::GetStats(StatsItems* stats) {
1230 if (disabled_)
1231 return;
1233 std::pair<std::string, std::string> item;
1235 item.first = "Entries";
1236 item.second = base::StringPrintf("%d", data_->header.num_entries);
1237 stats->push_back(item);
1239 item.first = "Pending IO";
1240 item.second = base::StringPrintf("%d", num_pending_io_);
1241 stats->push_back(item);
1243 item.first = "Max size";
1244 item.second = base::StringPrintf("%d", max_size_);
1245 stats->push_back(item);
1247 item.first = "Current size";
1248 item.second = base::StringPrintf("%d", data_->header.num_bytes);
1249 stats->push_back(item);
1251 item.first = "Cache type";
1252 item.second = "Blockfile Cache";
1253 stats->push_back(item);
1255 stats_.GetItems(stats);
1258 void BackendImpl::OnExternalCacheHit(const std::string& key) {
1259 background_queue_.OnExternalCacheHit(key);
1262 // ------------------------------------------------------------------------
1264 // We just created a new file so we're going to write the header and set the
1265 // file length to include the hash table (zero filled).
1266 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
1267 AdjustMaxCacheSize(0);
1269 IndexHeader header;
1270 header.table_len = DesiredIndexTableLen(max_size_);
1272 // We need file version 2.1 for the new eviction algorithm.
1273 if (new_eviction_)
1274 header.version = 0x20001;
1276 header.create_time = Time::Now().ToInternalValue();
1278 if (!file->Write(&header, sizeof(header), 0))
1279 return false;
1281 return file->SetLength(GetIndexSize(header.table_len));
1284 bool BackendImpl::InitBackingStore(bool* file_created) {
1285 if (!file_util::CreateDirectory(path_))
1286 return false;
1288 base::FilePath index_name = path_.AppendASCII(kIndexName);
1290 int flags = base::PLATFORM_FILE_READ |
1291 base::PLATFORM_FILE_WRITE |
1292 base::PLATFORM_FILE_OPEN_ALWAYS |
1293 base::PLATFORM_FILE_EXCLUSIVE_WRITE;
1294 scoped_refptr<disk_cache::File> file(new disk_cache::File(
1295 base::CreatePlatformFile(index_name, flags, file_created, NULL)));
1297 if (!file->IsValid())
1298 return false;
1300 bool ret = true;
1301 if (*file_created)
1302 ret = CreateBackingStore(file.get());
1304 file = NULL;
1305 if (!ret)
1306 return false;
1308 index_ = new MappedFile();
1309 data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
1310 if (!data_) {
1311 LOG(ERROR) << "Unable to map Index file";
1312 return false;
1315 if (index_->GetLength() < sizeof(Index)) {
1316 // We verify this again on CheckIndex() but it's easier to make sure now
1317 // that the header is there.
1318 LOG(ERROR) << "Corrupt Index file";
1319 return false;
1322 return true;
1325 // The maximum cache size will be either set explicitly by the caller, or
1326 // calculated by this code.
1327 void BackendImpl::AdjustMaxCacheSize(int table_len) {
1328 if (max_size_)
1329 return;
1331 // If table_len is provided, the index file exists.
1332 DCHECK(!table_len || data_->header.magic);
1334 // The user is not setting the size, let's figure it out.
1335 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
1336 if (available < 0) {
1337 max_size_ = kDefaultCacheSize;
1338 return;
1341 if (table_len)
1342 available += data_->header.num_bytes;
1344 max_size_ = PreferedCacheSize(available);
1346 // Let's not use more than the default size while we tune-up the performance
1347 // of bigger caches. TODO(rvargas): remove this limit.
1348 if (max_size_ > kDefaultCacheSize * 4)
1349 max_size_ = kDefaultCacheSize * 4;
1351 if (!table_len)
1352 return;
1354 // If we already have a table, adjust the size to it.
1355 int current_max_size = MaxStorageSizeForTable(table_len);
1356 if (max_size_ > current_max_size)
1357 max_size_= current_max_size;
1360 bool BackendImpl::InitStats() {
1361 Addr address(data_->header.stats);
1362 int size = stats_.StorageSize();
1364 if (!address.is_initialized()) {
1365 FileType file_type = Addr::RequiredFileType(size);
1366 DCHECK_NE(file_type, EXTERNAL);
1367 int num_blocks = Addr::RequiredBlocks(size, file_type);
1369 if (!CreateBlock(file_type, num_blocks, &address))
1370 return false;
1371 return stats_.Init(NULL, 0, address);
1374 if (!address.is_block_file()) {
1375 NOTREACHED();
1376 return false;
1379 // Load the required data.
1380 size = address.num_blocks() * address.BlockSize();
1381 MappedFile* file = File(address);
1382 if (!file)
1383 return false;
1385 scoped_ptr<char[]> data(new char[size]);
1386 size_t offset = address.start_block() * address.BlockSize() +
1387 kBlockHeaderSize;
1388 if (!file->Read(data.get(), size, offset))
1389 return false;
1391 if (!stats_.Init(data.get(), size, address))
1392 return false;
1393 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
1394 stats_.InitSizeHistogram();
1395 return true;
1398 void BackendImpl::StoreStats() {
1399 int size = stats_.StorageSize();
1400 scoped_ptr<char[]> data(new char[size]);
1401 Addr address;
1402 size = stats_.SerializeStats(data.get(), size, &address);
1403 DCHECK(size);
1404 if (!address.is_initialized())
1405 return;
1407 MappedFile* file = File(address);
1408 if (!file)
1409 return;
1411 size_t offset = address.start_block() * address.BlockSize() +
1412 kBlockHeaderSize;
1413 file->Write(data.get(), size, offset); // ignore result.
1416 void BackendImpl::RestartCache(bool failure) {
1417 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
1418 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
1419 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
1420 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
1422 PrepareForRestart();
1423 if (failure) {
1424 DCHECK(!num_refs_);
1425 DCHECK(!open_entries_.size());
1426 DelayedCacheCleanup(path_);
1427 } else {
1428 DeleteCache(path_, false);
1431 // Don't call Init() if directed by the unit test: we are simulating a failure
1432 // trying to re-enable the cache.
1433 if (unit_test_)
1434 init_ = true; // Let the destructor do proper cleanup.
1435 else if (SyncInit() == net::OK) {
1436 stats_.SetCounter(Stats::FATAL_ERROR, errors);
1437 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
1438 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
1439 stats_.SetCounter(Stats::LAST_REPORT, last_report);
1443 void BackendImpl::PrepareForRestart() {
1444 // Reset the mask_ if it was not given by the user.
1445 if (!(user_flags_ & kMask))
1446 mask_ = 0;
1448 if (!(user_flags_ & kNewEviction))
1449 new_eviction_ = false;
1451 disabled_ = true;
1452 data_->header.crash = 0;
1453 index_->Flush();
1454 index_ = NULL;
1455 data_ = NULL;
1456 block_files_.CloseFiles();
1457 rankings_.Reset();
1458 init_ = false;
1459 restarted_ = true;
1462 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) {
1463 EntriesMap::iterator it = open_entries_.find(address.value());
1464 if (it != open_entries_.end()) {
1465 // Easy job. This entry is already in memory.
1466 EntryImpl* this_entry = it->second;
1467 this_entry->AddRef();
1468 *entry = this_entry;
1469 return 0;
1472 STRESS_DCHECK(block_files_.IsValid(address));
1474 if (!address.SanityCheckForEntryV2()) {
1475 LOG(WARNING) << "Wrong entry address.";
1476 STRESS_NOTREACHED();
1477 return ERR_INVALID_ADDRESS;
1480 scoped_refptr<EntryImpl> cache_entry(
1481 new EntryImpl(this, address, read_only_));
1482 IncreaseNumRefs();
1483 *entry = NULL;
1485 TimeTicks start = TimeTicks::Now();
1486 if (!cache_entry->entry()->Load())
1487 return ERR_READ_FAILURE;
1489 if (IsLoaded()) {
1490 CACHE_UMA(AGE_MS, "LoadTime", 0, start);
1493 if (!cache_entry->SanityCheck()) {
1494 LOG(WARNING) << "Messed up entry found.";
1495 STRESS_NOTREACHED();
1496 return ERR_INVALID_ENTRY;
1499 STRESS_DCHECK(block_files_.IsValid(
1500 Addr(cache_entry->entry()->Data()->rankings_node)));
1502 if (!cache_entry->LoadNodeAddress())
1503 return ERR_READ_FAILURE;
1505 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
1506 STRESS_NOTREACHED();
1507 cache_entry->SetDirtyFlag(0);
1508 // Don't remove this from the list (it is not linked properly). Instead,
1509 // break the link back to the entry because it is going away, and leave the
1510 // rankings node to be deleted if we find it through a list.
1511 rankings_.SetContents(cache_entry->rankings(), 0);
1512 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
1513 STRESS_NOTREACHED();
1514 cache_entry->SetDirtyFlag(0);
1515 rankings_.SetContents(cache_entry->rankings(), address.value());
1518 if (!cache_entry->DataSanityCheck()) {
1519 LOG(WARNING) << "Messed up entry found.";
1520 cache_entry->SetDirtyFlag(0);
1521 cache_entry->FixForDelete();
1524 // Prevent overwriting the dirty flag on the destructor.
1525 cache_entry->SetDirtyFlag(GetCurrentEntryId());
1527 if (cache_entry->dirty()) {
1528 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
1529 address.value());
1532 open_entries_[address.value()] = cache_entry.get();
1534 cache_entry->BeginLogging(net_log_, false);
1535 cache_entry.swap(entry);
1536 return 0;
1539 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
1540 bool find_parent, Addr entry_addr,
1541 bool* match_error) {
1542 Addr address(data_->table[hash & mask_]);
1543 scoped_refptr<EntryImpl> cache_entry, parent_entry;
1544 EntryImpl* tmp = NULL;
1545 bool found = false;
1546 std::set<CacheAddr> visited;
1547 *match_error = false;
1549 for (;;) {
1550 if (disabled_)
1551 break;
1553 if (visited.find(address.value()) != visited.end()) {
1554 // It's possible for a buggy version of the code to write a loop. Just
1555 // break it.
1556 Trace("Hash collision loop 0x%x", address.value());
1557 address.set_value(0);
1558 parent_entry->SetNextAddress(address);
1560 visited.insert(address.value());
1562 if (!address.is_initialized()) {
1563 if (find_parent)
1564 found = true;
1565 break;
1568 int error = NewEntry(address, &tmp);
1569 cache_entry.swap(&tmp);
1571 if (error || cache_entry->dirty()) {
1572 // This entry is dirty on disk (it was not properly closed): we cannot
1573 // trust it.
1574 Addr child(0);
1575 if (!error)
1576 child.set_value(cache_entry->GetNextAddress());
1578 if (parent_entry.get()) {
1579 parent_entry->SetNextAddress(child);
1580 parent_entry = NULL;
1581 } else {
1582 data_->table[hash & mask_] = child.value();
1585 Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(),
1586 address.value());
1588 if (!error) {
1589 // It is important to call DestroyInvalidEntry after removing this
1590 // entry from the table.
1591 DestroyInvalidEntry(cache_entry.get());
1592 cache_entry = NULL;
1593 } else {
1594 Trace("NewEntry failed on MatchEntry 0x%x", address.value());
1597 // Restart the search.
1598 address.set_value(data_->table[hash & mask_]);
1599 visited.clear();
1600 continue;
1603 DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_);
1604 if (cache_entry->IsSameEntry(key, hash)) {
1605 if (!cache_entry->Update())
1606 cache_entry = NULL;
1607 found = true;
1608 if (find_parent && entry_addr.value() != address.value()) {
1609 Trace("Entry not on the index 0x%x", address.value());
1610 *match_error = true;
1611 parent_entry = NULL;
1613 break;
1615 if (!cache_entry->Update())
1616 cache_entry = NULL;
1617 parent_entry = cache_entry;
1618 cache_entry = NULL;
1619 if (!parent_entry.get())
1620 break;
1622 address.set_value(parent_entry->GetNextAddress());
1625 if (parent_entry.get() && (!find_parent || !found))
1626 parent_entry = NULL;
1628 if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) {
1629 *match_error = true;
1630 parent_entry = NULL;
1633 if (cache_entry.get() && (find_parent || !found))
1634 cache_entry = NULL;
1636 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp);
1637 FlushIndex();
1638 return tmp;
1641 // This is the actual implementation for OpenNextEntry and OpenPrevEntry.
1642 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) {
1643 if (disabled_)
1644 return NULL;
1646 DCHECK(iter);
1648 const int kListsToSearch = 3;
1649 scoped_refptr<EntryImpl> entries[kListsToSearch];
1650 scoped_ptr<Rankings::Iterator> iterator(
1651 reinterpret_cast<Rankings::Iterator*>(*iter));
1652 *iter = NULL;
1654 if (!iterator.get()) {
1655 iterator.reset(new Rankings::Iterator(&rankings_));
1656 bool ret = false;
1658 // Get an entry from each list.
1659 for (int i = 0; i < kListsToSearch; i++) {
1660 EntryImpl* temp = NULL;
1661 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
1662 &iterator->nodes[i], &temp);
1663 entries[i].swap(&temp); // The entry was already addref'd.
1665 if (!ret)
1666 return NULL;
1667 } else {
1668 // Get the next entry from the last list, and the actual entries for the
1669 // elements on the other lists.
1670 for (int i = 0; i < kListsToSearch; i++) {
1671 EntryImpl* temp = NULL;
1672 if (iterator->list == i) {
1673 OpenFollowingEntryFromList(forward, iterator->list,
1674 &iterator->nodes[i], &temp);
1675 } else {
1676 temp = GetEnumeratedEntry(iterator->nodes[i],
1677 static_cast<Rankings::List>(i));
1680 entries[i].swap(&temp); // The entry was already addref'd.
1684 int newest = -1;
1685 int oldest = -1;
1686 Time access_times[kListsToSearch];
1687 for (int i = 0; i < kListsToSearch; i++) {
1688 if (entries[i].get()) {
1689 access_times[i] = entries[i]->GetLastUsed();
1690 if (newest < 0) {
1691 DCHECK_LT(oldest, 0);
1692 newest = oldest = i;
1693 continue;
1695 if (access_times[i] > access_times[newest])
1696 newest = i;
1697 if (access_times[i] < access_times[oldest])
1698 oldest = i;
1702 if (newest < 0 || oldest < 0)
1703 return NULL;
1705 EntryImpl* next_entry;
1706 if (forward) {
1707 next_entry = entries[newest].get();
1708 iterator->list = static_cast<Rankings::List>(newest);
1709 } else {
1710 next_entry = entries[oldest].get();
1711 iterator->list = static_cast<Rankings::List>(oldest);
1714 *iter = iterator.release();
1715 next_entry->AddRef();
1716 return next_entry;
1719 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
1720 CacheRankingsBlock** from_entry,
1721 EntryImpl** next_entry) {
1722 if (disabled_)
1723 return false;
1725 if (!new_eviction_ && Rankings::NO_USE != list)
1726 return false;
1728 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
1729 CacheRankingsBlock* next_block = forward ?
1730 rankings_.GetNext(rankings.get(), list) :
1731 rankings_.GetPrev(rankings.get(), list);
1732 Rankings::ScopedRankingsBlock next(&rankings_, next_block);
1733 *from_entry = NULL;
1735 *next_entry = GetEnumeratedEntry(next.get(), list);
1736 if (!*next_entry)
1737 return false;
1739 *from_entry = next.release();
1740 return true;
1743 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next,
1744 Rankings::List list) {
1745 if (!next || disabled_)
1746 return NULL;
1748 EntryImpl* entry;
1749 int rv = NewEntry(Addr(next->Data()->contents), &entry);
1750 if (rv) {
1751 STRESS_NOTREACHED();
1752 rankings_.Remove(next, list, false);
1753 if (rv == ERR_INVALID_ADDRESS) {
1754 // There is nothing linked from the index. Delete the rankings node.
1755 DeleteBlock(next->address(), true);
1757 return NULL;
1760 if (entry->dirty()) {
1761 // We cannot trust this entry.
1762 InternalDoomEntry(entry);
1763 entry->Release();
1764 return NULL;
1767 if (!entry->Update()) {
1768 STRESS_NOTREACHED();
1769 entry->Release();
1770 return NULL;
1773 // Note that it is unfortunate (but possible) for this entry to be clean, but
1774 // not actually the real entry. In other words, we could have lost this entry
1775 // from the index, and it could have been replaced with a newer one. It's not
1776 // worth checking that this entry is "the real one", so we just return it and
1777 // let the enumeration continue; this entry will be evicted at some point, and
1778 // the regular path will work with the real entry. With time, this problem
1779 // will disasappear because this scenario is just a bug.
1781 // Make sure that we save the key for later.
1782 entry->GetKey();
1784 return entry;
1787 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) {
1788 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
1789 deleted_entry->Release();
1790 stats_.OnEvent(Stats::CREATE_MISS);
1791 Trace("create entry miss ");
1792 return NULL;
1795 // We are attempting to create an entry and found out that the entry was
1796 // previously deleted.
1798 eviction_.OnCreateEntry(deleted_entry);
1799 entry_count_++;
1801 stats_.OnEvent(Stats::RESURRECT_HIT);
1802 Trace("Resurrect entry hit ");
1803 return deleted_entry;
1806 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
1807 LOG(WARNING) << "Destroying invalid entry.";
1808 Trace("Destroying invalid entry 0x%p", entry);
1810 entry->SetPointerForInvalidEntry(GetCurrentEntryId());
1812 eviction_.OnDoomEntry(entry);
1813 entry->InternalDoom();
1815 if (!new_eviction_)
1816 DecreaseNumEntries();
1817 stats_.OnEvent(Stats::INVALID_ENTRY);
1820 void BackendImpl::AddStorageSize(int32 bytes) {
1821 data_->header.num_bytes += bytes;
1822 DCHECK_GE(data_->header.num_bytes, 0);
1825 void BackendImpl::SubstractStorageSize(int32 bytes) {
1826 data_->header.num_bytes -= bytes;
1827 DCHECK_GE(data_->header.num_bytes, 0);
1830 void BackendImpl::IncreaseNumRefs() {
1831 num_refs_++;
1832 if (max_refs_ < num_refs_)
1833 max_refs_ = num_refs_;
1836 void BackendImpl::DecreaseNumRefs() {
1837 DCHECK(num_refs_);
1838 num_refs_--;
1840 if (!num_refs_ && disabled_)
1841 base::MessageLoop::current()->PostTask(
1842 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
1845 void BackendImpl::IncreaseNumEntries() {
1846 data_->header.num_entries++;
1847 DCHECK_GT(data_->header.num_entries, 0);
1850 void BackendImpl::DecreaseNumEntries() {
1851 data_->header.num_entries--;
1852 if (data_->header.num_entries < 0) {
1853 NOTREACHED();
1854 data_->header.num_entries = 0;
1858 void BackendImpl::LogStats() {
1859 StatsItems stats;
1860 GetStats(&stats);
1862 for (size_t index = 0; index < stats.size(); index++)
1863 VLOG(1) << stats[index].first << ": " << stats[index].second;
1866 void BackendImpl::ReportStats() {
1867 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries);
1869 int current_size = data_->header.num_bytes / (1024 * 1024);
1870 int max_size = max_size_ / (1024 * 1024);
1871 int hit_ratio_as_percentage = stats_.GetHitRatio();
1873 CACHE_UMA(COUNTS_10000, "Size2", 0, current_size);
1874 // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the
1875 // ratio of that bin's total count to the count in the same bin in the Size2
1876 // histogram.
1877 if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1878 CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size);
1879 CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size);
1880 if (!max_size)
1881 max_size++;
1882 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size);
1884 CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0,
1885 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
1886 CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0,
1887 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
1888 stats_.SetCounter(Stats::MAX_ENTRIES, 0);
1890 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0,
1891 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
1892 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0,
1893 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
1894 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0,
1895 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
1896 stats_.SetCounter(Stats::FATAL_ERROR, 0);
1897 stats_.SetCounter(Stats::DOOM_CACHE, 0);
1898 stats_.SetCounter(Stats::DOOM_RECENT, 0);
1900 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
1901 if (!data_->header.create_time || !data_->header.lru.filled) {
1902 int cause = data_->header.create_time ? 0 : 1;
1903 if (!data_->header.lru.filled)
1904 cause |= 2;
1905 CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause);
1906 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours));
1907 return;
1910 // This is an up to date client that will report FirstEviction() data. After
1911 // that event, start reporting this:
1913 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
1914 // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total
1915 // time is the ratio of that bin's total count to the count in the same bin in
1916 // the TotalTime histogram.
1917 if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1918 CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours));
1920 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
1921 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
1923 // We may see users with no use_hours at this point if this is the first time
1924 // we are running this code.
1925 if (use_hours)
1926 use_hours = total_hours - use_hours;
1928 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
1929 return;
1931 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
1932 // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time
1933 // is the ratio of that bin's total count to the count in the same bin in the
1934 // UseTime histogram.
1935 if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1936 CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours));
1937 CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage);
1939 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
1940 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
1942 int avg_size = data_->header.num_bytes / GetEntryCount();
1943 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
1944 CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries);
1946 CACHE_UMA(PERCENTAGE, "IndexLoad", 0,
1947 data_->header.num_entries * 100 / (mask_ + 1));
1949 int large_entries_bytes = stats_.GetLargeEntriesSize();
1950 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
1951 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
1953 if (new_eviction_) {
1954 CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio());
1955 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
1956 data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
1957 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
1958 data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
1959 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
1960 data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
1961 CACHE_UMA(PERCENTAGE, "DeletedRatio", 0,
1962 data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
1965 stats_.ResetRatios();
1966 stats_.SetCounter(Stats::TRIM_ENTRY, 0);
1968 if (cache_type_ == net::DISK_CACHE)
1969 block_files_.ReportStats();
1972 void BackendImpl::UpgradeTo2_1() {
1973 // 2.1 is basically the same as 2.0, except that new fields are actually
1974 // updated by the new eviction algorithm.
1975 DCHECK(0x20000 == data_->header.version);
1976 data_->header.version = 0x20001;
1977 data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
1980 bool BackendImpl::CheckIndex() {
1981 DCHECK(data_);
1983 size_t current_size = index_->GetLength();
1984 if (current_size < sizeof(Index)) {
1985 LOG(ERROR) << "Corrupt Index file";
1986 return false;
1989 if (new_eviction_) {
1990 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
1991 if (kIndexMagic != data_->header.magic ||
1992 kCurrentVersion >> 16 != data_->header.version >> 16) {
1993 LOG(ERROR) << "Invalid file version or magic";
1994 return false;
1996 if (kCurrentVersion == data_->header.version) {
1997 // We need file version 2.1 for the new eviction algorithm.
1998 UpgradeTo2_1();
2000 } else {
2001 if (kIndexMagic != data_->header.magic ||
2002 kCurrentVersion != data_->header.version) {
2003 LOG(ERROR) << "Invalid file version or magic";
2004 return false;
2008 if (!data_->header.table_len) {
2009 LOG(ERROR) << "Invalid table size";
2010 return false;
2013 if (current_size < GetIndexSize(data_->header.table_len) ||
2014 data_->header.table_len & (kBaseTableLen - 1)) {
2015 LOG(ERROR) << "Corrupt Index file";
2016 return false;
2019 AdjustMaxCacheSize(data_->header.table_len);
2021 #if !defined(NET_BUILD_STRESS_CACHE)
2022 if (data_->header.num_bytes < 0 ||
2023 (max_size_ < kint32max - kDefaultCacheSize &&
2024 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
2025 LOG(ERROR) << "Invalid cache (current) size";
2026 return false;
2028 #endif
2030 if (data_->header.num_entries < 0) {
2031 LOG(ERROR) << "Invalid number of entries";
2032 return false;
2035 if (!mask_)
2036 mask_ = data_->header.table_len - 1;
2038 // Load the table into memory with a single read.
2039 scoped_ptr<char[]> buf(new char[current_size]);
2040 return index_->Read(buf.get(), current_size, 0);
2043 int BackendImpl::CheckAllEntries() {
2044 int num_dirty = 0;
2045 int num_entries = 0;
2046 DCHECK(mask_ < kuint32max);
2047 for (unsigned int i = 0; i <= mask_; i++) {
2048 Addr address(data_->table[i]);
2049 if (!address.is_initialized())
2050 continue;
2051 for (;;) {
2052 EntryImpl* tmp;
2053 int ret = NewEntry(address, &tmp);
2054 if (ret) {
2055 STRESS_NOTREACHED();
2056 return ret;
2058 scoped_refptr<EntryImpl> cache_entry;
2059 cache_entry.swap(&tmp);
2061 if (cache_entry->dirty())
2062 num_dirty++;
2063 else if (CheckEntry(cache_entry.get()))
2064 num_entries++;
2065 else
2066 return ERR_INVALID_ENTRY;
2068 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
2069 address.set_value(cache_entry->GetNextAddress());
2070 if (!address.is_initialized())
2071 break;
2075 Trace("CheckAllEntries End");
2076 if (num_entries + num_dirty != data_->header.num_entries) {
2077 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
2078 " " << data_->header.num_entries;
2079 DCHECK_LT(num_entries, data_->header.num_entries);
2080 return ERR_NUM_ENTRIES_MISMATCH;
2083 return num_dirty;
2086 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
2087 bool ok = block_files_.IsValid(cache_entry->entry()->address());
2088 ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
2089 EntryStore* data = cache_entry->entry()->Data();
2090 for (size_t i = 0; i < arraysize(data->data_addr); i++) {
2091 if (data->data_addr[i]) {
2092 Addr address(data->data_addr[i]);
2093 if (address.is_block_file())
2094 ok = ok && block_files_.IsValid(address);
2098 return ok && cache_entry->rankings()->VerifyHash();
2101 int BackendImpl::MaxBuffersSize() {
2102 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
2103 static bool done = false;
2105 if (!done) {
2106 const int kMaxBuffersSize = 30 * 1024 * 1024;
2108 // We want to use up to 2% of the computer's memory.
2109 total_memory = total_memory * 2 / 100;
2110 if (total_memory > kMaxBuffersSize || total_memory <= 0)
2111 total_memory = kMaxBuffersSize;
2113 done = true;
2116 return static_cast<int>(total_memory);
2119 } // namespace disk_cache