cc: Added inline to Tile::IsReadyToDraw
[chromium-blink-merge.git] / net / disk_cache / backend_impl.cc
blob8d7fd461102abafb528e785d5103e8f1197d026d
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/backend_impl.h"
7 #include "base/bind.h"
8 #include "base/bind_helpers.h"
9 #include "base/file_util.h"
10 #include "base/files/file_path.h"
11 #include "base/hash.h"
12 #include "base/message_loop/message_loop.h"
13 #include "base/metrics/field_trial.h"
14 #include "base/metrics/histogram.h"
15 #include "base/metrics/stats_counters.h"
16 #include "base/rand_util.h"
17 #include "base/strings/string_util.h"
18 #include "base/strings/stringprintf.h"
19 #include "base/sys_info.h"
20 #include "base/threading/thread_restrictions.h"
21 #include "base/time/time.h"
22 #include "base/timer/timer.h"
23 #include "net/base/net_errors.h"
24 #include "net/disk_cache/cache_util.h"
25 #include "net/disk_cache/disk_format.h"
26 #include "net/disk_cache/entry_impl.h"
27 #include "net/disk_cache/errors.h"
28 #include "net/disk_cache/experiments.h"
29 #include "net/disk_cache/file.h"
31 // This has to be defined before including histogram_macros.h from this file.
32 #define NET_DISK_CACHE_BACKEND_IMPL_CC_
33 #include "net/disk_cache/histogram_macros.h"
35 using base::Time;
36 using base::TimeDelta;
37 using base::TimeTicks;
39 namespace {
41 const char* kIndexName = "index";
43 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
44 // Note that the actual target is to keep the index table load factor under 55%
45 // for most users.
46 const int k64kEntriesStore = 240 * 1000 * 1000;
47 const int kBaseTableLen = 64 * 1024;
48 const int kDefaultCacheSize = 80 * 1024 * 1024;
50 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
51 const int kTrimDelay = 10;
53 int DesiredIndexTableLen(int32 storage_size) {
54 if (storage_size <= k64kEntriesStore)
55 return kBaseTableLen;
56 if (storage_size <= k64kEntriesStore * 2)
57 return kBaseTableLen * 2;
58 if (storage_size <= k64kEntriesStore * 4)
59 return kBaseTableLen * 4;
60 if (storage_size <= k64kEntriesStore * 8)
61 return kBaseTableLen * 8;
63 // The biggest storage_size for int32 requires a 4 MB table.
64 return kBaseTableLen * 16;
67 int MaxStorageSizeForTable(int table_len) {
68 return table_len * (k64kEntriesStore / kBaseTableLen);
71 size_t GetIndexSize(int table_len) {
72 size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
73 return sizeof(disk_cache::IndexHeader) + table_size;
76 // ------------------------------------------------------------------------
78 // Sets group for the current experiment. Returns false if the files should be
79 // discarded.
80 bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
81 if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
82 header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
83 // Discard current cache.
84 return false;
87 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
88 "ExperimentControl") {
89 if (cache_created) {
90 header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
91 return true;
93 return header->experiment == disk_cache::EXPERIMENT_SIMPLE_CONTROL;
96 header->experiment = disk_cache::NO_EXPERIMENT;
97 return true;
100 // A callback to perform final cleanup on the background thread.
101 void FinalCleanupCallback(disk_cache::BackendImpl* backend) {
102 backend->CleanupCache();
105 } // namespace
107 // ------------------------------------------------------------------------
109 namespace disk_cache {
111 // Returns the preferred maximum number of bytes for the cache given the
112 // number of available bytes.
113 int PreferedCacheSize(int64 available) {
114 // Return 80% of the available space if there is not enough space to use
115 // kDefaultCacheSize.
116 if (available < kDefaultCacheSize * 10 / 8)
117 return static_cast<int32>(available * 8 / 10);
119 // Return kDefaultCacheSize if it uses 80% to 10% of the available space.
120 if (available < kDefaultCacheSize * 10)
121 return kDefaultCacheSize;
123 // Return 10% of the available space if the target size
124 // (2.5 * kDefaultCacheSize) is more than 10%.
125 if (available < static_cast<int64>(kDefaultCacheSize) * 25)
126 return static_cast<int32>(available / 10);
128 // Return the target size (2.5 * kDefaultCacheSize) if it uses 10% to 1%
129 // of the available space.
130 if (available < static_cast<int64>(kDefaultCacheSize) * 250)
131 return kDefaultCacheSize * 5 / 2;
133 // Return 1% of the available space if it does not exceed kint32max.
134 if (available < static_cast<int64>(kint32max) * 100)
135 return static_cast<int32>(available / 100);
137 return kint32max;
140 // ------------------------------------------------------------------------
142 BackendImpl::BackendImpl(const base::FilePath& path,
143 base::MessageLoopProxy* cache_thread,
144 net::NetLog* net_log)
145 : background_queue_(this, cache_thread),
146 path_(path),
147 block_files_(path),
148 mask_(0),
149 max_size_(0),
150 up_ticks_(0),
151 cache_type_(net::DISK_CACHE),
152 uma_report_(0),
153 user_flags_(0),
154 init_(false),
155 restarted_(false),
156 unit_test_(false),
157 read_only_(false),
158 disabled_(false),
159 new_eviction_(false),
160 first_timer_(true),
161 user_load_(false),
162 net_log_(net_log),
163 done_(true, false),
164 ptr_factory_(this) {
167 BackendImpl::BackendImpl(const base::FilePath& path,
168 uint32 mask,
169 base::MessageLoopProxy* cache_thread,
170 net::NetLog* net_log)
171 : background_queue_(this, cache_thread),
172 path_(path),
173 block_files_(path),
174 mask_(mask),
175 max_size_(0),
176 up_ticks_(0),
177 cache_type_(net::DISK_CACHE),
178 uma_report_(0),
179 user_flags_(kMask),
180 init_(false),
181 restarted_(false),
182 unit_test_(false),
183 read_only_(false),
184 disabled_(false),
185 new_eviction_(false),
186 first_timer_(true),
187 user_load_(false),
188 net_log_(net_log),
189 done_(true, false),
190 ptr_factory_(this) {
193 BackendImpl::~BackendImpl() {
194 if (user_flags_ & kNoRandom) {
195 // This is a unit test, so we want to be strict about not leaking entries
196 // and completing all the work.
197 background_queue_.WaitForPendingIO();
198 } else {
199 // This is most likely not a test, so we want to do as little work as
200 // possible at this time, at the price of leaving dirty entries behind.
201 background_queue_.DropPendingIO();
204 if (background_queue_.BackgroundIsCurrentThread()) {
205 // Unit tests may use the same thread for everything.
206 CleanupCache();
207 } else {
208 background_queue_.background_thread()->PostTask(
209 FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
210 // http://crbug.com/74623
211 base::ThreadRestrictions::ScopedAllowWait allow_wait;
212 done_.Wait();
216 int BackendImpl::Init(const CompletionCallback& callback) {
217 background_queue_.Init(callback);
218 return net::ERR_IO_PENDING;
221 int BackendImpl::SyncInit() {
222 #if defined(NET_BUILD_STRESS_CACHE)
223 // Start evictions right away.
224 up_ticks_ = kTrimDelay * 2;
225 #endif
226 DCHECK(!init_);
227 if (init_)
228 return net::ERR_FAILED;
230 bool create_files = false;
231 if (!InitBackingStore(&create_files)) {
232 ReportError(ERR_STORAGE_ERROR);
233 return net::ERR_FAILED;
236 num_refs_ = num_pending_io_ = max_refs_ = 0;
237 entry_count_ = byte_count_ = 0;
239 if (!restarted_) {
240 buffer_bytes_ = 0;
241 trace_object_ = TraceObject::GetTraceObject();
242 // Create a recurrent timer of 30 secs.
243 int timer_delay = unit_test_ ? 1000 : 30000;
244 timer_.reset(new base::RepeatingTimer<BackendImpl>());
245 timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
246 &BackendImpl::OnStatsTimer);
249 init_ = true;
250 Trace("Init");
252 if (data_->header.experiment != NO_EXPERIMENT &&
253 cache_type_ != net::DISK_CACHE) {
254 // No experiment for other caches.
255 return net::ERR_FAILED;
258 if (!(user_flags_ & kNoRandom)) {
259 // The unit test controls directly what to test.
260 new_eviction_ = (cache_type_ == net::DISK_CACHE);
263 if (!CheckIndex()) {
264 ReportError(ERR_INIT_FAILED);
265 return net::ERR_FAILED;
268 if (!restarted_ && (create_files || !data_->header.num_entries))
269 ReportError(ERR_CACHE_CREATED);
271 if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
272 !InitExperiment(&data_->header, create_files)) {
273 return net::ERR_FAILED;
276 // We don't care if the value overflows. The only thing we care about is that
277 // the id cannot be zero, because that value is used as "not dirty".
278 // Increasing the value once per second gives us many years before we start
279 // having collisions.
280 data_->header.this_id++;
281 if (!data_->header.this_id)
282 data_->header.this_id++;
284 bool previous_crash = (data_->header.crash != 0);
285 data_->header.crash = 1;
287 if (!block_files_.Init(create_files))
288 return net::ERR_FAILED;
290 // We want to minimize the changes to cache for an AppCache.
291 if (cache_type() == net::APP_CACHE) {
292 DCHECK(!new_eviction_);
293 read_only_ = true;
294 } else if (cache_type() == net::SHADER_CACHE) {
295 DCHECK(!new_eviction_);
298 eviction_.Init(this);
300 // stats_ and rankings_ may end up calling back to us so we better be enabled.
301 disabled_ = false;
302 if (!InitStats())
303 return net::ERR_FAILED;
305 disabled_ = !rankings_.Init(this, new_eviction_);
307 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
308 trace_object_->EnableTracing(false);
309 int sc = SelfCheck();
310 if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
311 NOTREACHED();
312 trace_object_->EnableTracing(true);
313 #endif
315 if (previous_crash) {
316 ReportError(ERR_PREVIOUS_CRASH);
317 } else if (!restarted_) {
318 ReportError(ERR_NO_ERROR);
321 FlushIndex();
323 return disabled_ ? net::ERR_FAILED : net::OK;
326 void BackendImpl::CleanupCache() {
327 Trace("Backend Cleanup");
328 eviction_.Stop();
329 timer_.reset();
331 if (init_) {
332 StoreStats();
333 if (data_)
334 data_->header.crash = 0;
336 if (user_flags_ & kNoRandom) {
337 // This is a net_unittest, verify that we are not 'leaking' entries.
338 File::WaitForPendingIO(&num_pending_io_);
339 DCHECK(!num_refs_);
340 } else {
341 File::DropPendingIO();
344 block_files_.CloseFiles();
345 FlushIndex();
346 index_ = NULL;
347 ptr_factory_.InvalidateWeakPtrs();
348 done_.Signal();
351 // ------------------------------------------------------------------------
353 int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry,
354 const CompletionCallback& callback) {
355 DCHECK(!callback.is_null());
356 background_queue_.OpenPrevEntry(iter, prev_entry, callback);
357 return net::ERR_IO_PENDING;
360 int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) {
361 DCHECK(entry);
362 *entry = OpenEntryImpl(key);
363 return (*entry) ? net::OK : net::ERR_FAILED;
366 int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) {
367 DCHECK(entry);
368 *entry = CreateEntryImpl(key);
369 return (*entry) ? net::OK : net::ERR_FAILED;
372 int BackendImpl::SyncDoomEntry(const std::string& key) {
373 if (disabled_)
374 return net::ERR_FAILED;
376 EntryImpl* entry = OpenEntryImpl(key);
377 if (!entry)
378 return net::ERR_FAILED;
380 entry->DoomImpl();
381 entry->Release();
382 return net::OK;
385 int BackendImpl::SyncDoomAllEntries() {
386 // This is not really an error, but it is an interesting condition.
387 ReportError(ERR_CACHE_DOOMED);
388 stats_.OnEvent(Stats::DOOM_CACHE);
389 if (!num_refs_) {
390 RestartCache(false);
391 return disabled_ ? net::ERR_FAILED : net::OK;
392 } else {
393 if (disabled_)
394 return net::ERR_FAILED;
396 eviction_.TrimCache(true);
397 return net::OK;
401 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
402 const base::Time end_time) {
403 DCHECK_NE(net::APP_CACHE, cache_type_);
404 if (end_time.is_null())
405 return SyncDoomEntriesSince(initial_time);
407 DCHECK(end_time >= initial_time);
409 if (disabled_)
410 return net::ERR_FAILED;
412 EntryImpl* node;
413 void* iter = NULL;
414 EntryImpl* next = OpenNextEntryImpl(&iter);
415 if (!next)
416 return net::OK;
418 while (next) {
419 node = next;
420 next = OpenNextEntryImpl(&iter);
422 if (node->GetLastUsed() >= initial_time &&
423 node->GetLastUsed() < end_time) {
424 node->DoomImpl();
425 } else if (node->GetLastUsed() < initial_time) {
426 if (next)
427 next->Release();
428 next = NULL;
429 SyncEndEnumeration(iter);
432 node->Release();
435 return net::OK;
438 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get
439 // entries that are too old.
440 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
441 DCHECK_NE(net::APP_CACHE, cache_type_);
442 if (disabled_)
443 return net::ERR_FAILED;
445 stats_.OnEvent(Stats::DOOM_RECENT);
446 for (;;) {
447 void* iter = NULL;
448 EntryImpl* entry = OpenNextEntryImpl(&iter);
449 if (!entry)
450 return net::OK;
452 if (initial_time > entry->GetLastUsed()) {
453 entry->Release();
454 SyncEndEnumeration(iter);
455 return net::OK;
458 entry->DoomImpl();
459 entry->Release();
460 SyncEndEnumeration(iter); // Dooming the entry invalidates the iterator.
464 int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) {
465 *next_entry = OpenNextEntryImpl(iter);
466 return (*next_entry) ? net::OK : net::ERR_FAILED;
469 int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) {
470 *prev_entry = OpenPrevEntryImpl(iter);
471 return (*prev_entry) ? net::OK : net::ERR_FAILED;
474 void BackendImpl::SyncEndEnumeration(void* iter) {
475 scoped_ptr<Rankings::Iterator> iterator(
476 reinterpret_cast<Rankings::Iterator*>(iter));
479 void BackendImpl::SyncOnExternalCacheHit(const std::string& key) {
480 if (disabled_)
481 return;
483 uint32 hash = base::Hash(key);
484 bool error;
485 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
486 if (cache_entry) {
487 if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) {
488 UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE);
490 cache_entry->Release();
494 EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
495 if (disabled_)
496 return NULL;
498 TimeTicks start = TimeTicks::Now();
499 uint32 hash = base::Hash(key);
500 Trace("Open hash 0x%x", hash);
502 bool error;
503 EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
504 if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
505 // The entry was already evicted.
506 cache_entry->Release();
507 cache_entry = NULL;
510 int current_size = data_->header.num_bytes / (1024 * 1024);
511 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
512 int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
513 int64 use_hours = total_hours - no_use_hours;
515 if (!cache_entry) {
516 CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start);
517 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size);
518 CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours);
519 CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours);
520 stats_.OnEvent(Stats::OPEN_MISS);
521 return NULL;
524 eviction_.OnOpenEntry(cache_entry);
525 entry_count_++;
527 Trace("Open hash 0x%x end: 0x%x", hash,
528 cache_entry->entry()->address().value());
529 CACHE_UMA(AGE_MS, "OpenTime", 0, start);
530 CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size);
531 CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours);
532 CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours);
533 stats_.OnEvent(Stats::OPEN_HIT);
534 SIMPLE_STATS_COUNTER("disk_cache.hit");
535 return cache_entry;
538 EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
539 if (disabled_ || key.empty())
540 return NULL;
542 TimeTicks start = TimeTicks::Now();
543 uint32 hash = base::Hash(key);
544 Trace("Create hash 0x%x", hash);
546 scoped_refptr<EntryImpl> parent;
547 Addr entry_address(data_->table[hash & mask_]);
548 if (entry_address.is_initialized()) {
549 // We have an entry already. It could be the one we are looking for, or just
550 // a hash conflict.
551 bool error;
552 EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
553 if (old_entry)
554 return ResurrectEntry(old_entry);
556 EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
557 DCHECK(!error);
558 if (parent_entry) {
559 parent.swap(&parent_entry);
560 } else if (data_->table[hash & mask_]) {
561 // We should have corrected the problem.
562 NOTREACHED();
563 return NULL;
567 // The general flow is to allocate disk space and initialize the entry data,
568 // followed by saving that to disk, then linking the entry though the index
569 // and finally through the lists. If there is a crash in this process, we may
570 // end up with:
571 // a. Used, unreferenced empty blocks on disk (basically just garbage).
572 // b. Used, unreferenced but meaningful data on disk (more garbage).
573 // c. A fully formed entry, reachable only through the index.
574 // d. A fully formed entry, also reachable through the lists, but still dirty.
576 // Anything after (b) can be automatically cleaned up. We may consider saving
577 // the current operation (as we do while manipulating the lists) so that we
578 // can detect and cleanup (a) and (b).
580 int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
581 if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
582 LOG(ERROR) << "Create entry failed " << key.c_str();
583 stats_.OnEvent(Stats::CREATE_ERROR);
584 return NULL;
587 Addr node_address(0);
588 if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
589 block_files_.DeleteBlock(entry_address, false);
590 LOG(ERROR) << "Create entry failed " << key.c_str();
591 stats_.OnEvent(Stats::CREATE_ERROR);
592 return NULL;
595 scoped_refptr<EntryImpl> cache_entry(
596 new EntryImpl(this, entry_address, false));
597 IncreaseNumRefs();
599 if (!cache_entry->CreateEntry(node_address, key, hash)) {
600 block_files_.DeleteBlock(entry_address, false);
601 block_files_.DeleteBlock(node_address, false);
602 LOG(ERROR) << "Create entry failed " << key.c_str();
603 stats_.OnEvent(Stats::CREATE_ERROR);
604 return NULL;
607 cache_entry->BeginLogging(net_log_, true);
609 // We are not failing the operation; let's add this to the map.
610 open_entries_[entry_address.value()] = cache_entry.get();
612 // Save the entry.
613 cache_entry->entry()->Store();
614 cache_entry->rankings()->Store();
615 IncreaseNumEntries();
616 entry_count_++;
618 // Link this entry through the index.
619 if (parent.get()) {
620 parent->SetNextAddress(entry_address);
621 } else {
622 data_->table[hash & mask_] = entry_address.value();
625 // Link this entry through the lists.
626 eviction_.OnCreateEntry(cache_entry.get());
628 CACHE_UMA(AGE_MS, "CreateTime", 0, start);
629 stats_.OnEvent(Stats::CREATE_HIT);
630 SIMPLE_STATS_COUNTER("disk_cache.miss");
631 Trace("create entry hit ");
632 FlushIndex();
633 cache_entry->AddRef();
634 return cache_entry.get();
637 EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) {
638 return OpenFollowingEntry(true, iter);
641 EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) {
642 return OpenFollowingEntry(false, iter);
645 bool BackendImpl::SetMaxSize(int max_bytes) {
646 COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
647 if (max_bytes < 0)
648 return false;
650 // Zero size means use the default.
651 if (!max_bytes)
652 return true;
654 // Avoid a DCHECK later on.
655 if (max_bytes >= kint32max - kint32max / 10)
656 max_bytes = kint32max - kint32max / 10 - 1;
658 user_flags_ |= kMaxSize;
659 max_size_ = max_bytes;
660 return true;
663 void BackendImpl::SetType(net::CacheType type) {
664 DCHECK_NE(net::MEMORY_CACHE, type);
665 cache_type_ = type;
668 base::FilePath BackendImpl::GetFileName(Addr address) const {
669 if (!address.is_separate_file() || !address.is_initialized()) {
670 NOTREACHED();
671 return base::FilePath();
674 std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
675 return path_.AppendASCII(tmp);
678 MappedFile* BackendImpl::File(Addr address) {
679 if (disabled_)
680 return NULL;
681 return block_files_.GetFile(address);
684 base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() {
685 return background_queue_.GetWeakPtr();
688 bool BackendImpl::CreateExternalFile(Addr* address) {
689 int file_number = data_->header.last_file + 1;
690 Addr file_address(0);
691 bool success = false;
692 for (int i = 0; i < 0x0fffffff; i++, file_number++) {
693 if (!file_address.SetFileNumber(file_number)) {
694 file_number = 1;
695 continue;
697 base::FilePath name = GetFileName(file_address);
698 int flags = base::PLATFORM_FILE_READ |
699 base::PLATFORM_FILE_WRITE |
700 base::PLATFORM_FILE_CREATE |
701 base::PLATFORM_FILE_EXCLUSIVE_WRITE;
702 base::PlatformFileError error;
703 scoped_refptr<disk_cache::File> file(new disk_cache::File(
704 base::CreatePlatformFile(name, flags, NULL, &error)));
705 if (!file->IsValid()) {
706 if (error != base::PLATFORM_FILE_ERROR_EXISTS) {
707 LOG(ERROR) << "Unable to create file: " << error;
708 return false;
710 continue;
713 success = true;
714 break;
717 DCHECK(success);
718 if (!success)
719 return false;
721 data_->header.last_file = file_number;
722 address->set_value(file_address.value());
723 return true;
726 bool BackendImpl::CreateBlock(FileType block_type, int block_count,
727 Addr* block_address) {
728 return block_files_.CreateBlock(block_type, block_count, block_address);
731 void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
732 block_files_.DeleteBlock(block_address, deep);
735 LruData* BackendImpl::GetLruData() {
736 return &data_->header.lru;
739 void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
740 if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE))
741 return;
742 eviction_.UpdateRank(entry, modified);
745 void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
746 Addr address(rankings->Data()->contents);
747 EntryImpl* cache_entry = NULL;
748 if (NewEntry(address, &cache_entry)) {
749 STRESS_NOTREACHED();
750 return;
753 uint32 hash = cache_entry->GetHash();
754 cache_entry->Release();
756 // Anything on the table means that this entry is there.
757 if (data_->table[hash & mask_])
758 return;
760 data_->table[hash & mask_] = address.value();
761 FlushIndex();
764 void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
765 uint32 hash = entry->GetHash();
766 std::string key = entry->GetKey();
767 Addr entry_addr = entry->entry()->address();
768 bool error;
769 EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
770 CacheAddr child(entry->GetNextAddress());
772 Trace("Doom entry 0x%p", entry);
774 if (!entry->doomed()) {
775 // We may have doomed this entry from within MatchEntry.
776 eviction_.OnDoomEntry(entry);
777 entry->InternalDoom();
778 if (!new_eviction_) {
779 DecreaseNumEntries();
781 stats_.OnEvent(Stats::DOOM_ENTRY);
784 if (parent_entry) {
785 parent_entry->SetNextAddress(Addr(child));
786 parent_entry->Release();
787 } else if (!error) {
788 data_->table[hash & mask_] = child;
791 FlushIndex();
794 #if defined(NET_BUILD_STRESS_CACHE)
796 CacheAddr BackendImpl::GetNextAddr(Addr address) {
797 EntriesMap::iterator it = open_entries_.find(address.value());
798 if (it != open_entries_.end()) {
799 EntryImpl* this_entry = it->second;
800 return this_entry->GetNextAddress();
802 DCHECK(block_files_.IsValid(address));
803 DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256);
805 CacheEntryBlock entry(File(address), address);
806 CHECK(entry.Load());
807 return entry.Data()->next;
810 void BackendImpl::NotLinked(EntryImpl* entry) {
811 Addr entry_addr = entry->entry()->address();
812 uint32 i = entry->GetHash() & mask_;
813 Addr address(data_->table[i]);
814 if (!address.is_initialized())
815 return;
817 for (;;) {
818 DCHECK(entry_addr.value() != address.value());
819 address.set_value(GetNextAddr(address));
820 if (!address.is_initialized())
821 break;
824 #endif // NET_BUILD_STRESS_CACHE
826 // An entry may be linked on the DELETED list for a while after being doomed.
827 // This function is called when we want to remove it.
828 void BackendImpl::RemoveEntry(EntryImpl* entry) {
829 #if defined(NET_BUILD_STRESS_CACHE)
830 NotLinked(entry);
831 #endif
832 if (!new_eviction_)
833 return;
835 DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state);
837 Trace("Remove entry 0x%p", entry);
838 eviction_.OnDestroyEntry(entry);
839 DecreaseNumEntries();
842 void BackendImpl::OnEntryDestroyBegin(Addr address) {
843 EntriesMap::iterator it = open_entries_.find(address.value());
844 if (it != open_entries_.end())
845 open_entries_.erase(it);
848 void BackendImpl::OnEntryDestroyEnd() {
849 DecreaseNumRefs();
850 if (data_->header.num_bytes > max_size_ && !read_only_ &&
851 (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
852 eviction_.TrimCache(false);
855 EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
856 DCHECK(rankings->HasData());
857 EntriesMap::const_iterator it =
858 open_entries_.find(rankings->Data()->contents);
859 if (it != open_entries_.end()) {
860 // We have this entry in memory.
861 return it->second;
864 return NULL;
867 int32 BackendImpl::GetCurrentEntryId() const {
868 return data_->header.this_id;
871 int BackendImpl::MaxFileSize() const {
872 return max_size_ / 8;
875 void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
876 if (disabled_ || old_size == new_size)
877 return;
878 if (old_size > new_size)
879 SubstractStorageSize(old_size - new_size);
880 else
881 AddStorageSize(new_size - old_size);
883 FlushIndex();
885 // Update the usage statistics.
886 stats_.ModifyStorageStats(old_size, new_size);
889 void BackendImpl::TooMuchStorageRequested(int32 size) {
890 stats_.ModifyStorageStats(0, size);
893 bool BackendImpl::IsAllocAllowed(int current_size, int new_size) {
894 DCHECK_GT(new_size, current_size);
895 if (user_flags_ & kNoBuffering)
896 return false;
898 int to_add = new_size - current_size;
899 if (buffer_bytes_ + to_add > MaxBuffersSize())
900 return false;
902 buffer_bytes_ += to_add;
903 CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024);
904 return true;
907 void BackendImpl::BufferDeleted(int size) {
908 buffer_bytes_ -= size;
909 DCHECK_GE(size, 0);
912 bool BackendImpl::IsLoaded() const {
913 CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_);
914 if (user_flags_ & kNoLoadProtection)
915 return false;
917 return (num_pending_io_ > 5 || user_load_);
920 std::string BackendImpl::HistogramName(const char* name, int experiment) const {
921 if (!experiment)
922 return base::StringPrintf("DiskCache.%d.%s", cache_type_, name);
923 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_,
924 name, experiment);
927 base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
928 return ptr_factory_.GetWeakPtr();
931 // We want to remove biases from some histograms so we only send data once per
932 // week.
933 bool BackendImpl::ShouldReportAgain() {
934 if (uma_report_)
935 return uma_report_ == 2;
937 uma_report_++;
938 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
939 Time last_time = Time::FromInternalValue(last_report);
940 if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
941 stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
942 uma_report_++;
943 return true;
945 return false;
948 void BackendImpl::FirstEviction() {
949 DCHECK(data_->header.create_time);
950 if (!GetEntryCount())
951 return; // This is just for unit tests.
953 Time create_time = Time::FromInternalValue(data_->header.create_time);
954 CACHE_UMA(AGE, "FillupAge", 0, create_time);
956 int64 use_time = stats_.GetCounter(Stats::TIMER);
957 CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120));
958 CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
960 if (!use_time)
961 use_time = 1;
962 CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0,
963 static_cast<int>(data_->header.num_entries / use_time));
964 CACHE_UMA(COUNTS, "FirstByteIORate", 0,
965 static_cast<int>((data_->header.num_bytes / 1024) / use_time));
967 int avg_size = data_->header.num_bytes / GetEntryCount();
968 CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
970 int large_entries_bytes = stats_.GetLargeEntriesSize();
971 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
972 CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
974 if (new_eviction_) {
975 CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio());
976 CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0,
977 data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
978 CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0,
979 data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
980 CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0,
981 data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
984 stats_.ResetRatios();
987 void BackendImpl::CriticalError(int error) {
988 STRESS_NOTREACHED();
989 LOG(ERROR) << "Critical error found " << error;
990 if (disabled_)
991 return;
993 stats_.OnEvent(Stats::FATAL_ERROR);
994 LogStats();
995 ReportError(error);
997 // Setting the index table length to an invalid value will force re-creation
998 // of the cache files.
999 data_->header.table_len = 1;
1000 disabled_ = true;
1002 if (!num_refs_)
1003 base::MessageLoop::current()->PostTask(
1004 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
1007 void BackendImpl::ReportError(int error) {
1008 STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
1009 error == ERR_CACHE_CREATED);
1011 // We transmit positive numbers, instead of direct error codes.
1012 DCHECK_LE(error, 0);
1013 CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
1016 void BackendImpl::OnEvent(Stats::Counters an_event) {
1017 stats_.OnEvent(an_event);
1020 void BackendImpl::OnRead(int32 bytes) {
1021 DCHECK_GE(bytes, 0);
1022 byte_count_ += bytes;
1023 if (byte_count_ < 0)
1024 byte_count_ = kint32max;
1027 void BackendImpl::OnWrite(int32 bytes) {
1028 // We use the same implementation as OnRead... just log the number of bytes.
1029 OnRead(bytes);
1032 void BackendImpl::OnStatsTimer() {
1033 stats_.OnEvent(Stats::TIMER);
1034 int64 time = stats_.GetCounter(Stats::TIMER);
1035 int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
1037 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
1038 // the bias towards 0.
1039 if (num_refs_ && (current != num_refs_)) {
1040 int64 diff = (num_refs_ - current) / 50;
1041 if (!diff)
1042 diff = num_refs_ > current ? 1 : -1;
1043 current = current + diff;
1044 stats_.SetCounter(Stats::OPEN_ENTRIES, current);
1045 stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
1048 CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_);
1050 CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_);
1051 CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024);
1053 // These values cover about 99.5% of the population (Oct 2011).
1054 user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
1055 entry_count_ = 0;
1056 byte_count_ = 0;
1057 up_ticks_++;
1059 if (!data_)
1060 first_timer_ = false;
1061 if (first_timer_) {
1062 first_timer_ = false;
1063 if (ShouldReportAgain())
1064 ReportStats();
1067 // Save stats to disk at 5 min intervals.
1068 if (time % 10 == 0)
1069 StoreStats();
1072 void BackendImpl::IncrementIoCount() {
1073 num_pending_io_++;
1076 void BackendImpl::DecrementIoCount() {
1077 num_pending_io_--;
1080 void BackendImpl::SetUnitTestMode() {
1081 user_flags_ |= kUnitTestMode;
1082 unit_test_ = true;
1085 void BackendImpl::SetUpgradeMode() {
1086 user_flags_ |= kUpgradeMode;
1087 read_only_ = true;
1090 void BackendImpl::SetNewEviction() {
1091 user_flags_ |= kNewEviction;
1092 new_eviction_ = true;
1095 void BackendImpl::SetFlags(uint32 flags) {
1096 user_flags_ |= flags;
1099 void BackendImpl::ClearRefCountForTest() {
1100 num_refs_ = 0;
1103 int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) {
1104 background_queue_.FlushQueue(callback);
1105 return net::ERR_IO_PENDING;
1108 int BackendImpl::RunTaskForTest(const base::Closure& task,
1109 const CompletionCallback& callback) {
1110 background_queue_.RunTask(task, callback);
1111 return net::ERR_IO_PENDING;
1114 void BackendImpl::TrimForTest(bool empty) {
1115 eviction_.SetTestMode();
1116 eviction_.TrimCache(empty);
1119 void BackendImpl::TrimDeletedListForTest(bool empty) {
1120 eviction_.SetTestMode();
1121 eviction_.TrimDeletedList(empty);
1124 int BackendImpl::SelfCheck() {
1125 if (!init_) {
1126 LOG(ERROR) << "Init failed";
1127 return ERR_INIT_FAILED;
1130 int num_entries = rankings_.SelfCheck();
1131 if (num_entries < 0) {
1132 LOG(ERROR) << "Invalid rankings list, error " << num_entries;
1133 #if !defined(NET_BUILD_STRESS_CACHE)
1134 return num_entries;
1135 #endif
1138 if (num_entries != data_->header.num_entries) {
1139 LOG(ERROR) << "Number of entries mismatch";
1140 #if !defined(NET_BUILD_STRESS_CACHE)
1141 return ERR_NUM_ENTRIES_MISMATCH;
1142 #endif
1145 return CheckAllEntries();
1148 void BackendImpl::FlushIndex() {
1149 if (index_.get() && !disabled_)
1150 index_->Flush();
1153 // ------------------------------------------------------------------------
1155 net::CacheType BackendImpl::GetCacheType() const {
1156 return cache_type_;
1159 int32 BackendImpl::GetEntryCount() const {
1160 if (!index_.get() || disabled_)
1161 return 0;
1162 // num_entries includes entries already evicted.
1163 int32 not_deleted = data_->header.num_entries -
1164 data_->header.lru.sizes[Rankings::DELETED];
1166 if (not_deleted < 0) {
1167 NOTREACHED();
1168 not_deleted = 0;
1171 return not_deleted;
1174 int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
1175 const CompletionCallback& callback) {
1176 DCHECK(!callback.is_null());
1177 background_queue_.OpenEntry(key, entry, callback);
1178 return net::ERR_IO_PENDING;
1181 int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
1182 const CompletionCallback& callback) {
1183 DCHECK(!callback.is_null());
1184 background_queue_.CreateEntry(key, entry, callback);
1185 return net::ERR_IO_PENDING;
1188 int BackendImpl::DoomEntry(const std::string& key,
1189 const CompletionCallback& callback) {
1190 DCHECK(!callback.is_null());
1191 background_queue_.DoomEntry(key, callback);
1192 return net::ERR_IO_PENDING;
1195 int BackendImpl::DoomAllEntries(const CompletionCallback& callback) {
1196 DCHECK(!callback.is_null());
1197 background_queue_.DoomAllEntries(callback);
1198 return net::ERR_IO_PENDING;
1201 int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
1202 const base::Time end_time,
1203 const CompletionCallback& callback) {
1204 DCHECK(!callback.is_null());
1205 background_queue_.DoomEntriesBetween(initial_time, end_time, callback);
1206 return net::ERR_IO_PENDING;
1209 int BackendImpl::DoomEntriesSince(const base::Time initial_time,
1210 const CompletionCallback& callback) {
1211 DCHECK(!callback.is_null());
1212 background_queue_.DoomEntriesSince(initial_time, callback);
1213 return net::ERR_IO_PENDING;
1216 int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
1217 const CompletionCallback& callback) {
1218 DCHECK(!callback.is_null());
1219 background_queue_.OpenNextEntry(iter, next_entry, callback);
1220 return net::ERR_IO_PENDING;
1223 void BackendImpl::EndEnumeration(void** iter) {
1224 background_queue_.EndEnumeration(*iter);
1225 *iter = NULL;
1228 void BackendImpl::GetStats(StatsItems* stats) {
1229 if (disabled_)
1230 return;
1232 std::pair<std::string, std::string> item;
1234 item.first = "Entries";
1235 item.second = base::StringPrintf("%d", data_->header.num_entries);
1236 stats->push_back(item);
1238 item.first = "Pending IO";
1239 item.second = base::StringPrintf("%d", num_pending_io_);
1240 stats->push_back(item);
1242 item.first = "Max size";
1243 item.second = base::StringPrintf("%d", max_size_);
1244 stats->push_back(item);
1246 item.first = "Current size";
1247 item.second = base::StringPrintf("%d", data_->header.num_bytes);
1248 stats->push_back(item);
1250 item.first = "Cache type";
1251 item.second = "Blockfile Cache";
1252 stats->push_back(item);
1254 stats_.GetItems(stats);
1257 void BackendImpl::OnExternalCacheHit(const std::string& key) {
1258 background_queue_.OnExternalCacheHit(key);
1261 // ------------------------------------------------------------------------
1263 // We just created a new file so we're going to write the header and set the
1264 // file length to include the hash table (zero filled).
1265 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
1266 AdjustMaxCacheSize(0);
1268 IndexHeader header;
1269 header.table_len = DesiredIndexTableLen(max_size_);
1271 // We need file version 2.1 for the new eviction algorithm.
1272 if (new_eviction_)
1273 header.version = 0x20001;
1275 header.create_time = Time::Now().ToInternalValue();
1277 if (!file->Write(&header, sizeof(header), 0))
1278 return false;
1280 return file->SetLength(GetIndexSize(header.table_len));
1283 bool BackendImpl::InitBackingStore(bool* file_created) {
1284 if (!file_util::CreateDirectory(path_))
1285 return false;
1287 base::FilePath index_name = path_.AppendASCII(kIndexName);
1289 int flags = base::PLATFORM_FILE_READ |
1290 base::PLATFORM_FILE_WRITE |
1291 base::PLATFORM_FILE_OPEN_ALWAYS |
1292 base::PLATFORM_FILE_EXCLUSIVE_WRITE;
1293 scoped_refptr<disk_cache::File> file(new disk_cache::File(
1294 base::CreatePlatformFile(index_name, flags, file_created, NULL)));
1296 if (!file->IsValid())
1297 return false;
1299 bool ret = true;
1300 if (*file_created)
1301 ret = CreateBackingStore(file.get());
1303 file = NULL;
1304 if (!ret)
1305 return false;
1307 index_ = new MappedFile();
1308 data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
1309 if (!data_) {
1310 LOG(ERROR) << "Unable to map Index file";
1311 return false;
1314 if (index_->GetLength() < sizeof(Index)) {
1315 // We verify this again on CheckIndex() but it's easier to make sure now
1316 // that the header is there.
1317 LOG(ERROR) << "Corrupt Index file";
1318 return false;
1321 return true;
1324 // The maximum cache size will be either set explicitly by the caller, or
1325 // calculated by this code.
1326 void BackendImpl::AdjustMaxCacheSize(int table_len) {
1327 if (max_size_)
1328 return;
1330 // If table_len is provided, the index file exists.
1331 DCHECK(!table_len || data_->header.magic);
1333 // The user is not setting the size, let's figure it out.
1334 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
1335 if (available < 0) {
1336 max_size_ = kDefaultCacheSize;
1337 return;
1340 if (table_len)
1341 available += data_->header.num_bytes;
1343 max_size_ = PreferedCacheSize(available);
1345 // Let's not use more than the default size while we tune-up the performance
1346 // of bigger caches. TODO(rvargas): remove this limit.
1347 if (max_size_ > kDefaultCacheSize * 4)
1348 max_size_ = kDefaultCacheSize * 4;
1350 if (!table_len)
1351 return;
1353 // If we already have a table, adjust the size to it.
1354 int current_max_size = MaxStorageSizeForTable(table_len);
1355 if (max_size_ > current_max_size)
1356 max_size_= current_max_size;
1359 bool BackendImpl::InitStats() {
1360 Addr address(data_->header.stats);
1361 int size = stats_.StorageSize();
1363 if (!address.is_initialized()) {
1364 FileType file_type = Addr::RequiredFileType(size);
1365 DCHECK_NE(file_type, EXTERNAL);
1366 int num_blocks = Addr::RequiredBlocks(size, file_type);
1368 if (!CreateBlock(file_type, num_blocks, &address))
1369 return false;
1371 data_->header.stats = address.value();
1372 return stats_.Init(NULL, 0, address);
1375 if (!address.is_block_file()) {
1376 NOTREACHED();
1377 return false;
1380 // Load the required data.
1381 size = address.num_blocks() * address.BlockSize();
1382 MappedFile* file = File(address);
1383 if (!file)
1384 return false;
1386 scoped_ptr<char[]> data(new char[size]);
1387 size_t offset = address.start_block() * address.BlockSize() +
1388 kBlockHeaderSize;
1389 if (!file->Read(data.get(), size, offset))
1390 return false;
1392 if (!stats_.Init(data.get(), size, address))
1393 return false;
1394 if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
1395 stats_.InitSizeHistogram();
1396 return true;
1399 void BackendImpl::StoreStats() {
1400 int size = stats_.StorageSize();
1401 scoped_ptr<char[]> data(new char[size]);
1402 Addr address;
1403 size = stats_.SerializeStats(data.get(), size, &address);
1404 DCHECK(size);
1405 if (!address.is_initialized())
1406 return;
1408 MappedFile* file = File(address);
1409 if (!file)
1410 return;
1412 size_t offset = address.start_block() * address.BlockSize() +
1413 kBlockHeaderSize;
1414 file->Write(data.get(), size, offset); // ignore result.
1417 void BackendImpl::RestartCache(bool failure) {
1418 int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
1419 int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
1420 int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
1421 int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
1423 PrepareForRestart();
1424 if (failure) {
1425 DCHECK(!num_refs_);
1426 DCHECK(!open_entries_.size());
1427 DelayedCacheCleanup(path_);
1428 } else {
1429 DeleteCache(path_, false);
1432 // Don't call Init() if directed by the unit test: we are simulating a failure
1433 // trying to re-enable the cache.
1434 if (unit_test_)
1435 init_ = true; // Let the destructor do proper cleanup.
1436 else if (SyncInit() == net::OK) {
1437 stats_.SetCounter(Stats::FATAL_ERROR, errors);
1438 stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
1439 stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
1440 stats_.SetCounter(Stats::LAST_REPORT, last_report);
1444 void BackendImpl::PrepareForRestart() {
1445 // Reset the mask_ if it was not given by the user.
1446 if (!(user_flags_ & kMask))
1447 mask_ = 0;
1449 if (!(user_flags_ & kNewEviction))
1450 new_eviction_ = false;
1452 disabled_ = true;
1453 data_->header.crash = 0;
1454 index_->Flush();
1455 index_ = NULL;
1456 data_ = NULL;
1457 block_files_.CloseFiles();
1458 rankings_.Reset();
1459 init_ = false;
1460 restarted_ = true;
1463 int BackendImpl::NewEntry(Addr address, EntryImpl** entry) {
1464 EntriesMap::iterator it = open_entries_.find(address.value());
1465 if (it != open_entries_.end()) {
1466 // Easy job. This entry is already in memory.
1467 EntryImpl* this_entry = it->second;
1468 this_entry->AddRef();
1469 *entry = this_entry;
1470 return 0;
1473 STRESS_DCHECK(block_files_.IsValid(address));
1475 if (!address.SanityCheckForEntryV2()) {
1476 LOG(WARNING) << "Wrong entry address.";
1477 STRESS_NOTREACHED();
1478 return ERR_INVALID_ADDRESS;
1481 scoped_refptr<EntryImpl> cache_entry(
1482 new EntryImpl(this, address, read_only_));
1483 IncreaseNumRefs();
1484 *entry = NULL;
1486 TimeTicks start = TimeTicks::Now();
1487 if (!cache_entry->entry()->Load())
1488 return ERR_READ_FAILURE;
1490 if (IsLoaded()) {
1491 CACHE_UMA(AGE_MS, "LoadTime", 0, start);
1494 if (!cache_entry->SanityCheck()) {
1495 LOG(WARNING) << "Messed up entry found.";
1496 STRESS_NOTREACHED();
1497 return ERR_INVALID_ENTRY;
1500 STRESS_DCHECK(block_files_.IsValid(
1501 Addr(cache_entry->entry()->Data()->rankings_node)));
1503 if (!cache_entry->LoadNodeAddress())
1504 return ERR_READ_FAILURE;
1506 if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
1507 STRESS_NOTREACHED();
1508 cache_entry->SetDirtyFlag(0);
1509 // Don't remove this from the list (it is not linked properly). Instead,
1510 // break the link back to the entry because it is going away, and leave the
1511 // rankings node to be deleted if we find it through a list.
1512 rankings_.SetContents(cache_entry->rankings(), 0);
1513 } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
1514 STRESS_NOTREACHED();
1515 cache_entry->SetDirtyFlag(0);
1516 rankings_.SetContents(cache_entry->rankings(), address.value());
1519 if (!cache_entry->DataSanityCheck()) {
1520 LOG(WARNING) << "Messed up entry found.";
1521 cache_entry->SetDirtyFlag(0);
1522 cache_entry->FixForDelete();
1525 // Prevent overwriting the dirty flag on the destructor.
1526 cache_entry->SetDirtyFlag(GetCurrentEntryId());
1528 if (cache_entry->dirty()) {
1529 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
1530 address.value());
1533 open_entries_[address.value()] = cache_entry.get();
1535 cache_entry->BeginLogging(net_log_, false);
1536 cache_entry.swap(entry);
1537 return 0;
1540 EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
1541 bool find_parent, Addr entry_addr,
1542 bool* match_error) {
1543 Addr address(data_->table[hash & mask_]);
1544 scoped_refptr<EntryImpl> cache_entry, parent_entry;
1545 EntryImpl* tmp = NULL;
1546 bool found = false;
1547 std::set<CacheAddr> visited;
1548 *match_error = false;
1550 for (;;) {
1551 if (disabled_)
1552 break;
1554 if (visited.find(address.value()) != visited.end()) {
1555 // It's possible for a buggy version of the code to write a loop. Just
1556 // break it.
1557 Trace("Hash collision loop 0x%x", address.value());
1558 address.set_value(0);
1559 parent_entry->SetNextAddress(address);
1561 visited.insert(address.value());
1563 if (!address.is_initialized()) {
1564 if (find_parent)
1565 found = true;
1566 break;
1569 int error = NewEntry(address, &tmp);
1570 cache_entry.swap(&tmp);
1572 if (error || cache_entry->dirty()) {
1573 // This entry is dirty on disk (it was not properly closed): we cannot
1574 // trust it.
1575 Addr child(0);
1576 if (!error)
1577 child.set_value(cache_entry->GetNextAddress());
1579 if (parent_entry.get()) {
1580 parent_entry->SetNextAddress(child);
1581 parent_entry = NULL;
1582 } else {
1583 data_->table[hash & mask_] = child.value();
1586 Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(),
1587 address.value());
1589 if (!error) {
1590 // It is important to call DestroyInvalidEntry after removing this
1591 // entry from the table.
1592 DestroyInvalidEntry(cache_entry.get());
1593 cache_entry = NULL;
1594 } else {
1595 Trace("NewEntry failed on MatchEntry 0x%x", address.value());
1598 // Restart the search.
1599 address.set_value(data_->table[hash & mask_]);
1600 visited.clear();
1601 continue;
1604 DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_);
1605 if (cache_entry->IsSameEntry(key, hash)) {
1606 if (!cache_entry->Update())
1607 cache_entry = NULL;
1608 found = true;
1609 if (find_parent && entry_addr.value() != address.value()) {
1610 Trace("Entry not on the index 0x%x", address.value());
1611 *match_error = true;
1612 parent_entry = NULL;
1614 break;
1616 if (!cache_entry->Update())
1617 cache_entry = NULL;
1618 parent_entry = cache_entry;
1619 cache_entry = NULL;
1620 if (!parent_entry.get())
1621 break;
1623 address.set_value(parent_entry->GetNextAddress());
1626 if (parent_entry.get() && (!find_parent || !found))
1627 parent_entry = NULL;
1629 if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) {
1630 *match_error = true;
1631 parent_entry = NULL;
1634 if (cache_entry.get() && (find_parent || !found))
1635 cache_entry = NULL;
1637 find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp);
1638 FlushIndex();
1639 return tmp;
1642 // This is the actual implementation for OpenNextEntry and OpenPrevEntry.
1643 EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) {
1644 if (disabled_)
1645 return NULL;
1647 DCHECK(iter);
1649 const int kListsToSearch = 3;
1650 scoped_refptr<EntryImpl> entries[kListsToSearch];
1651 scoped_ptr<Rankings::Iterator> iterator(
1652 reinterpret_cast<Rankings::Iterator*>(*iter));
1653 *iter = NULL;
1655 if (!iterator.get()) {
1656 iterator.reset(new Rankings::Iterator(&rankings_));
1657 bool ret = false;
1659 // Get an entry from each list.
1660 for (int i = 0; i < kListsToSearch; i++) {
1661 EntryImpl* temp = NULL;
1662 ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
1663 &iterator->nodes[i], &temp);
1664 entries[i].swap(&temp); // The entry was already addref'd.
1666 if (!ret)
1667 return NULL;
1668 } else {
1669 // Get the next entry from the last list, and the actual entries for the
1670 // elements on the other lists.
1671 for (int i = 0; i < kListsToSearch; i++) {
1672 EntryImpl* temp = NULL;
1673 if (iterator->list == i) {
1674 OpenFollowingEntryFromList(forward, iterator->list,
1675 &iterator->nodes[i], &temp);
1676 } else {
1677 temp = GetEnumeratedEntry(iterator->nodes[i],
1678 static_cast<Rankings::List>(i));
1681 entries[i].swap(&temp); // The entry was already addref'd.
1685 int newest = -1;
1686 int oldest = -1;
1687 Time access_times[kListsToSearch];
1688 for (int i = 0; i < kListsToSearch; i++) {
1689 if (entries[i].get()) {
1690 access_times[i] = entries[i]->GetLastUsed();
1691 if (newest < 0) {
1692 DCHECK_LT(oldest, 0);
1693 newest = oldest = i;
1694 continue;
1696 if (access_times[i] > access_times[newest])
1697 newest = i;
1698 if (access_times[i] < access_times[oldest])
1699 oldest = i;
1703 if (newest < 0 || oldest < 0)
1704 return NULL;
1706 EntryImpl* next_entry;
1707 if (forward) {
1708 next_entry = entries[newest].get();
1709 iterator->list = static_cast<Rankings::List>(newest);
1710 } else {
1711 next_entry = entries[oldest].get();
1712 iterator->list = static_cast<Rankings::List>(oldest);
1715 *iter = iterator.release();
1716 next_entry->AddRef();
1717 return next_entry;
1720 bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
1721 CacheRankingsBlock** from_entry,
1722 EntryImpl** next_entry) {
1723 if (disabled_)
1724 return false;
1726 if (!new_eviction_ && Rankings::NO_USE != list)
1727 return false;
1729 Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
1730 CacheRankingsBlock* next_block = forward ?
1731 rankings_.GetNext(rankings.get(), list) :
1732 rankings_.GetPrev(rankings.get(), list);
1733 Rankings::ScopedRankingsBlock next(&rankings_, next_block);
1734 *from_entry = NULL;
1736 *next_entry = GetEnumeratedEntry(next.get(), list);
1737 if (!*next_entry)
1738 return false;
1740 *from_entry = next.release();
1741 return true;
1744 EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next,
1745 Rankings::List list) {
1746 if (!next || disabled_)
1747 return NULL;
1749 EntryImpl* entry;
1750 int rv = NewEntry(Addr(next->Data()->contents), &entry);
1751 if (rv) {
1752 STRESS_NOTREACHED();
1753 rankings_.Remove(next, list, false);
1754 if (rv == ERR_INVALID_ADDRESS) {
1755 // There is nothing linked from the index. Delete the rankings node.
1756 DeleteBlock(next->address(), true);
1758 return NULL;
1761 if (entry->dirty()) {
1762 // We cannot trust this entry.
1763 InternalDoomEntry(entry);
1764 entry->Release();
1765 return NULL;
1768 if (!entry->Update()) {
1769 STRESS_NOTREACHED();
1770 entry->Release();
1771 return NULL;
1774 // Note that it is unfortunate (but possible) for this entry to be clean, but
1775 // not actually the real entry. In other words, we could have lost this entry
1776 // from the index, and it could have been replaced with a newer one. It's not
1777 // worth checking that this entry is "the real one", so we just return it and
1778 // let the enumeration continue; this entry will be evicted at some point, and
1779 // the regular path will work with the real entry. With time, this problem
1780 // will disasappear because this scenario is just a bug.
1782 // Make sure that we save the key for later.
1783 entry->GetKey();
1785 return entry;
1788 EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) {
1789 if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
1790 deleted_entry->Release();
1791 stats_.OnEvent(Stats::CREATE_MISS);
1792 Trace("create entry miss ");
1793 return NULL;
1796 // We are attempting to create an entry and found out that the entry was
1797 // previously deleted.
1799 eviction_.OnCreateEntry(deleted_entry);
1800 entry_count_++;
1802 stats_.OnEvent(Stats::RESURRECT_HIT);
1803 Trace("Resurrect entry hit ");
1804 return deleted_entry;
1807 void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
1808 LOG(WARNING) << "Destroying invalid entry.";
1809 Trace("Destroying invalid entry 0x%p", entry);
1811 entry->SetPointerForInvalidEntry(GetCurrentEntryId());
1813 eviction_.OnDoomEntry(entry);
1814 entry->InternalDoom();
1816 if (!new_eviction_)
1817 DecreaseNumEntries();
1818 stats_.OnEvent(Stats::INVALID_ENTRY);
1821 void BackendImpl::AddStorageSize(int32 bytes) {
1822 data_->header.num_bytes += bytes;
1823 DCHECK_GE(data_->header.num_bytes, 0);
1826 void BackendImpl::SubstractStorageSize(int32 bytes) {
1827 data_->header.num_bytes -= bytes;
1828 DCHECK_GE(data_->header.num_bytes, 0);
1831 void BackendImpl::IncreaseNumRefs() {
1832 num_refs_++;
1833 if (max_refs_ < num_refs_)
1834 max_refs_ = num_refs_;
1837 void BackendImpl::DecreaseNumRefs() {
1838 DCHECK(num_refs_);
1839 num_refs_--;
1841 if (!num_refs_ && disabled_)
1842 base::MessageLoop::current()->PostTask(
1843 FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
1846 void BackendImpl::IncreaseNumEntries() {
1847 data_->header.num_entries++;
1848 DCHECK_GT(data_->header.num_entries, 0);
1851 void BackendImpl::DecreaseNumEntries() {
1852 data_->header.num_entries--;
1853 if (data_->header.num_entries < 0) {
1854 NOTREACHED();
1855 data_->header.num_entries = 0;
1859 void BackendImpl::LogStats() {
1860 StatsItems stats;
1861 GetStats(&stats);
1863 for (size_t index = 0; index < stats.size(); index++)
1864 VLOG(1) << stats[index].first << ": " << stats[index].second;
1867 void BackendImpl::ReportStats() {
1868 CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries);
1870 int current_size = data_->header.num_bytes / (1024 * 1024);
1871 int max_size = max_size_ / (1024 * 1024);
1872 int hit_ratio_as_percentage = stats_.GetHitRatio();
1874 CACHE_UMA(COUNTS_10000, "Size2", 0, current_size);
1875 // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the
1876 // ratio of that bin's total count to the count in the same bin in the Size2
1877 // histogram.
1878 if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1879 CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size);
1880 CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size);
1881 if (!max_size)
1882 max_size++;
1883 CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size);
1885 CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0,
1886 static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
1887 CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0,
1888 static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
1889 stats_.SetCounter(Stats::MAX_ENTRIES, 0);
1891 CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0,
1892 static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
1893 CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0,
1894 static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
1895 CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0,
1896 static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
1897 stats_.SetCounter(Stats::FATAL_ERROR, 0);
1898 stats_.SetCounter(Stats::DOOM_CACHE, 0);
1899 stats_.SetCounter(Stats::DOOM_RECENT, 0);
1901 int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
1902 if (!data_->header.create_time || !data_->header.lru.filled) {
1903 int cause = data_->header.create_time ? 0 : 1;
1904 if (!data_->header.lru.filled)
1905 cause |= 2;
1906 CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause);
1907 CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours));
1908 return;
1911 // This is an up to date client that will report FirstEviction() data. After
1912 // that event, start reporting this:
1914 CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
1915 // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total
1916 // time is the ratio of that bin's total count to the count in the same bin in
1917 // the TotalTime histogram.
1918 if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1919 CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours));
1921 int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
1922 stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
1924 // We may see users with no use_hours at this point if this is the first time
1925 // we are running this code.
1926 if (use_hours)
1927 use_hours = total_hours - use_hours;
1929 if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
1930 return;
1932 CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
1933 // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time
1934 // is the ratio of that bin's total count to the count in the same bin in the
1935 // UseTime histogram.
1936 if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1937 CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours));
1938 CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage);
1940 int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
1941 CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
1943 int avg_size = data_->header.num_bytes / GetEntryCount();
1944 CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
1945 CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries);
1947 CACHE_UMA(PERCENTAGE, "IndexLoad", 0,
1948 data_->header.num_entries * 100 / (mask_ + 1));
1950 int large_entries_bytes = stats_.GetLargeEntriesSize();
1951 int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
1952 CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
1954 if (new_eviction_) {
1955 CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio());
1956 CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
1957 data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
1958 CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
1959 data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
1960 CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
1961 data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
1962 CACHE_UMA(PERCENTAGE, "DeletedRatio", 0,
1963 data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
1966 stats_.ResetRatios();
1967 stats_.SetCounter(Stats::TRIM_ENTRY, 0);
1969 if (cache_type_ == net::DISK_CACHE)
1970 block_files_.ReportStats();
1973 void BackendImpl::UpgradeTo2_1() {
1974 // 2.1 is basically the same as 2.0, except that new fields are actually
1975 // updated by the new eviction algorithm.
1976 DCHECK(0x20000 == data_->header.version);
1977 data_->header.version = 0x20001;
1978 data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
1981 bool BackendImpl::CheckIndex() {
1982 DCHECK(data_);
1984 size_t current_size = index_->GetLength();
1985 if (current_size < sizeof(Index)) {
1986 LOG(ERROR) << "Corrupt Index file";
1987 return false;
1990 if (new_eviction_) {
1991 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
1992 if (kIndexMagic != data_->header.magic ||
1993 kCurrentVersion >> 16 != data_->header.version >> 16) {
1994 LOG(ERROR) << "Invalid file version or magic";
1995 return false;
1997 if (kCurrentVersion == data_->header.version) {
1998 // We need file version 2.1 for the new eviction algorithm.
1999 UpgradeTo2_1();
2001 } else {
2002 if (kIndexMagic != data_->header.magic ||
2003 kCurrentVersion != data_->header.version) {
2004 LOG(ERROR) << "Invalid file version or magic";
2005 return false;
2009 if (!data_->header.table_len) {
2010 LOG(ERROR) << "Invalid table size";
2011 return false;
2014 if (current_size < GetIndexSize(data_->header.table_len) ||
2015 data_->header.table_len & (kBaseTableLen - 1)) {
2016 LOG(ERROR) << "Corrupt Index file";
2017 return false;
2020 AdjustMaxCacheSize(data_->header.table_len);
2022 #if !defined(NET_BUILD_STRESS_CACHE)
2023 if (data_->header.num_bytes < 0 ||
2024 (max_size_ < kint32max - kDefaultCacheSize &&
2025 data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
2026 LOG(ERROR) << "Invalid cache (current) size";
2027 return false;
2029 #endif
2031 if (data_->header.num_entries < 0) {
2032 LOG(ERROR) << "Invalid number of entries";
2033 return false;
2036 if (!mask_)
2037 mask_ = data_->header.table_len - 1;
2039 // Load the table into memory with a single read.
2040 scoped_ptr<char[]> buf(new char[current_size]);
2041 return index_->Read(buf.get(), current_size, 0);
2044 int BackendImpl::CheckAllEntries() {
2045 int num_dirty = 0;
2046 int num_entries = 0;
2047 DCHECK(mask_ < kuint32max);
2048 for (unsigned int i = 0; i <= mask_; i++) {
2049 Addr address(data_->table[i]);
2050 if (!address.is_initialized())
2051 continue;
2052 for (;;) {
2053 EntryImpl* tmp;
2054 int ret = NewEntry(address, &tmp);
2055 if (ret) {
2056 STRESS_NOTREACHED();
2057 return ret;
2059 scoped_refptr<EntryImpl> cache_entry;
2060 cache_entry.swap(&tmp);
2062 if (cache_entry->dirty())
2063 num_dirty++;
2064 else if (CheckEntry(cache_entry.get()))
2065 num_entries++;
2066 else
2067 return ERR_INVALID_ENTRY;
2069 DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
2070 address.set_value(cache_entry->GetNextAddress());
2071 if (!address.is_initialized())
2072 break;
2076 Trace("CheckAllEntries End");
2077 if (num_entries + num_dirty != data_->header.num_entries) {
2078 LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
2079 " " << data_->header.num_entries;
2080 DCHECK_LT(num_entries, data_->header.num_entries);
2081 return ERR_NUM_ENTRIES_MISMATCH;
2084 return num_dirty;
2087 bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
2088 bool ok = block_files_.IsValid(cache_entry->entry()->address());
2089 ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
2090 EntryStore* data = cache_entry->entry()->Data();
2091 for (size_t i = 0; i < arraysize(data->data_addr); i++) {
2092 if (data->data_addr[i]) {
2093 Addr address(data->data_addr[i]);
2094 if (address.is_block_file())
2095 ok = ok && block_files_.IsValid(address);
2099 return ok && cache_entry->rankings()->VerifyHash();
2102 int BackendImpl::MaxBuffersSize() {
2103 static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
2104 static bool done = false;
2106 if (!done) {
2107 const int kMaxBuffersSize = 30 * 1024 * 1024;
2109 // We want to use up to 2% of the computer's memory.
2110 total_memory = total_memory * 2 / 100;
2111 if (total_memory > kMaxBuffersSize || total_memory <= 0)
2112 total_memory = kMaxBuffersSize;
2114 done = true;
2117 return static_cast<int>(total_memory);
2120 } // namespace disk_cache