Cast: Stop logging kVideoFrameSentToEncoder and rename a couple events.
[chromium-blink-merge.git] / net / disk_cache / simple / simple_index.cc
blob1f316a59ed65d198ac732d7beb634089be54912e
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_index.h"
7 #include <algorithm>
8 #include <limits>
9 #include <string>
10 #include <utility>
12 #include "base/bind.h"
13 #include "base/bind_helpers.h"
14 #include "base/file_util.h"
15 #include "base/files/file_enumerator.h"
16 #include "base/logging.h"
17 #include "base/message_loop/message_loop.h"
18 #include "base/metrics/field_trial.h"
19 #include "base/pickle.h"
20 #include "base/strings/string_number_conversions.h"
21 #include "base/strings/string_tokenizer.h"
22 #include "base/task_runner.h"
23 #include "base/threading/worker_pool.h"
24 #include "base/time/time.h"
25 #include "net/base/net_errors.h"
26 #include "net/disk_cache/simple/simple_entry_format.h"
27 #include "net/disk_cache/simple/simple_histogram_macros.h"
28 #include "net/disk_cache/simple/simple_index_delegate.h"
29 #include "net/disk_cache/simple/simple_index_file.h"
30 #include "net/disk_cache/simple/simple_synchronous_entry.h"
31 #include "net/disk_cache/simple/simple_util.h"
33 #if defined(OS_POSIX)
34 #include <sys/stat.h>
35 #include <sys/time.h>
36 #endif
38 namespace {
40 // How many milliseconds we delay writing the index to disk since the last cache
41 // operation has happened.
42 const int kDefaultWriteToDiskDelayMSecs = 20000;
43 const int kDefaultWriteToDiskOnBackgroundDelayMSecs = 100;
45 // Divides the cache space into this amount of parts to evict when only one part
46 // is left.
47 const uint32 kEvictionMarginDivisor = 20;
49 const uint32 kBytesInKb = 1024;
51 // Utility class used for timestamp comparisons in entry metadata while sorting.
52 class CompareHashesForTimestamp {
53 typedef disk_cache::SimpleIndex SimpleIndex;
54 typedef disk_cache::SimpleIndex::EntrySet EntrySet;
55 public:
56 explicit CompareHashesForTimestamp(const EntrySet& set);
58 bool operator()(uint64 hash1, uint64 hash2);
59 private:
60 const EntrySet& entry_set_;
63 CompareHashesForTimestamp::CompareHashesForTimestamp(const EntrySet& set)
64 : entry_set_(set) {
67 bool CompareHashesForTimestamp::operator()(uint64 hash1, uint64 hash2) {
68 EntrySet::const_iterator it1 = entry_set_.find(hash1);
69 DCHECK(it1 != entry_set_.end());
70 EntrySet::const_iterator it2 = entry_set_.find(hash2);
71 DCHECK(it2 != entry_set_.end());
72 return it1->second.GetLastUsedTime() < it2->second.GetLastUsedTime();
75 } // namespace
77 namespace disk_cache {
79 EntryMetadata::EntryMetadata()
80 : last_used_time_seconds_since_epoch_(0),
81 entry_size_(0) {
84 EntryMetadata::EntryMetadata(base::Time last_used_time, int entry_size)
85 : last_used_time_seconds_since_epoch_(0),
86 entry_size_(entry_size) {
87 SetLastUsedTime(last_used_time);
90 base::Time EntryMetadata::GetLastUsedTime() const {
91 // Preserve nullity.
92 if (last_used_time_seconds_since_epoch_ == 0)
93 return base::Time();
95 return base::Time::UnixEpoch() +
96 base::TimeDelta::FromSeconds(last_used_time_seconds_since_epoch_);
99 void EntryMetadata::SetLastUsedTime(const base::Time& last_used_time) {
100 // Preserve nullity.
101 if (last_used_time.is_null()) {
102 last_used_time_seconds_since_epoch_ = 0;
103 return;
106 const base::TimeDelta since_unix_epoch =
107 last_used_time - base::Time::UnixEpoch();
108 const int64 seconds_since_unix_epoch = since_unix_epoch.InSeconds();
109 DCHECK_LE(implicit_cast<int64>(std::numeric_limits<uint32>::min()),
110 seconds_since_unix_epoch);
111 DCHECK_GE(implicit_cast<int64>(std::numeric_limits<uint32>::max()),
112 seconds_since_unix_epoch);
114 last_used_time_seconds_since_epoch_ = seconds_since_unix_epoch;
115 // Avoid accidental nullity.
116 if (last_used_time_seconds_since_epoch_ == 0)
117 last_used_time_seconds_since_epoch_ = 1;
120 void EntryMetadata::Serialize(Pickle* pickle) const {
121 DCHECK(pickle);
122 int64 internal_last_used_time = GetLastUsedTime().ToInternalValue();
123 pickle->WriteInt64(internal_last_used_time);
124 pickle->WriteUInt64(entry_size_);
127 bool EntryMetadata::Deserialize(PickleIterator* it) {
128 DCHECK(it);
129 int64 tmp_last_used_time;
130 uint64 tmp_entry_size;
131 if (!it->ReadInt64(&tmp_last_used_time) || !it->ReadUInt64(&tmp_entry_size))
132 return false;
133 SetLastUsedTime(base::Time::FromInternalValue(tmp_last_used_time));
134 entry_size_ = tmp_entry_size;
135 return true;
138 SimpleIndex::SimpleIndex(base::SingleThreadTaskRunner* io_thread,
139 SimpleIndexDelegate* delegate,
140 net::CacheType cache_type,
141 scoped_ptr<SimpleIndexFile> index_file)
142 : delegate_(delegate),
143 cache_type_(cache_type),
144 cache_size_(0),
145 max_size_(0),
146 high_watermark_(0),
147 low_watermark_(0),
148 eviction_in_progress_(false),
149 initialized_(false),
150 index_file_(index_file.Pass()),
151 io_thread_(io_thread),
152 // Creating the callback once so it is reused every time
153 // write_to_disk_timer_.Start() is called.
154 write_to_disk_cb_(base::Bind(&SimpleIndex::WriteToDisk, AsWeakPtr())),
155 app_on_background_(false) {}
157 SimpleIndex::~SimpleIndex() {
158 DCHECK(io_thread_checker_.CalledOnValidThread());
160 // Fail all callbacks waiting for the index to come up.
161 for (CallbackList::iterator it = to_run_when_initialized_.begin(),
162 end = to_run_when_initialized_.end(); it != end; ++it) {
163 it->Run(net::ERR_ABORTED);
167 void SimpleIndex::Initialize(base::Time cache_mtime) {
168 DCHECK(io_thread_checker_.CalledOnValidThread());
170 // Take the foreground and background index flush delays from the experiment
171 // settings only if both are valid.
172 foreground_flush_delay_ = kDefaultWriteToDiskDelayMSecs;
173 background_flush_delay_ = kDefaultWriteToDiskOnBackgroundDelayMSecs;
174 const std::string index_flush_intervals = base::FieldTrialList::FindFullName(
175 "SimpleCacheIndexFlushDelay_Foreground_Background");
176 if (!index_flush_intervals.empty()) {
177 base::StringTokenizer tokens(index_flush_intervals, "_");
178 int foreground_delay, background_delay;
179 if (tokens.GetNext() &&
180 base::StringToInt(tokens.token(), &foreground_delay) &&
181 tokens.GetNext() &&
182 base::StringToInt(tokens.token(), &background_delay)) {
183 foreground_flush_delay_ = foreground_delay;
184 background_flush_delay_ = background_delay;
188 #if defined(OS_ANDROID)
189 if (base::android::IsVMInitialized()) {
190 app_status_listener_.reset(new base::android::ApplicationStatusListener(
191 base::Bind(&SimpleIndex::OnApplicationStateChange, AsWeakPtr())));
193 #endif
195 SimpleIndexLoadResult* load_result = new SimpleIndexLoadResult();
196 scoped_ptr<SimpleIndexLoadResult> load_result_scoped(load_result);
197 base::Closure reply = base::Bind(
198 &SimpleIndex::MergeInitializingSet,
199 AsWeakPtr(),
200 base::Passed(&load_result_scoped));
201 index_file_->LoadIndexEntries(cache_mtime, reply, load_result);
204 bool SimpleIndex::SetMaxSize(int max_bytes) {
205 if (max_bytes < 0)
206 return false;
208 // Zero size means use the default.
209 if (!max_bytes)
210 return true;
212 max_size_ = max_bytes;
213 high_watermark_ = max_size_ - max_size_ / kEvictionMarginDivisor;
214 low_watermark_ = max_size_ - 2 * (max_size_ / kEvictionMarginDivisor);
215 return true;
218 int SimpleIndex::ExecuteWhenReady(const net::CompletionCallback& task) {
219 DCHECK(io_thread_checker_.CalledOnValidThread());
220 if (initialized_)
221 io_thread_->PostTask(FROM_HERE, base::Bind(task, net::OK));
222 else
223 to_run_when_initialized_.push_back(task);
224 return net::ERR_IO_PENDING;
227 scoped_ptr<SimpleIndex::HashList> SimpleIndex::GetEntriesBetween(
228 base::Time initial_time, base::Time end_time) {
229 DCHECK_EQ(true, initialized_);
231 if (!initial_time.is_null())
232 initial_time -= EntryMetadata::GetLowerEpsilonForTimeComparisons();
233 if (end_time.is_null())
234 end_time = base::Time::Max();
235 else
236 end_time += EntryMetadata::GetUpperEpsilonForTimeComparisons();
237 const base::Time extended_end_time =
238 end_time.is_null() ? base::Time::Max() : end_time;
239 DCHECK(extended_end_time >= initial_time);
240 scoped_ptr<HashList> ret_hashes(new HashList());
241 for (EntrySet::iterator it = entries_set_.begin(), end = entries_set_.end();
242 it != end; ++it) {
243 EntryMetadata& metadata = it->second;
244 base::Time entry_time = metadata.GetLastUsedTime();
245 if (initial_time <= entry_time && entry_time < extended_end_time)
246 ret_hashes->push_back(it->first);
248 return ret_hashes.Pass();
251 scoped_ptr<SimpleIndex::HashList> SimpleIndex::GetAllHashes() {
252 return GetEntriesBetween(base::Time(), base::Time());
255 int32 SimpleIndex::GetEntryCount() const {
256 // TODO(pasko): return a meaningful initial estimate before initialized.
257 return entries_set_.size();
260 void SimpleIndex::Insert(uint64 entry_hash) {
261 DCHECK(io_thread_checker_.CalledOnValidThread());
262 // Upon insert we don't know yet the size of the entry.
263 // It will be updated later when the SimpleEntryImpl finishes opening or
264 // creating the new entry, and then UpdateEntrySize will be called.
265 InsertInEntrySet(
266 entry_hash, EntryMetadata(base::Time::Now(), 0), &entries_set_);
267 if (!initialized_)
268 removed_entries_.erase(entry_hash);
269 PostponeWritingToDisk();
272 void SimpleIndex::Remove(uint64 entry_hash) {
273 DCHECK(io_thread_checker_.CalledOnValidThread());
274 EntrySet::iterator it = entries_set_.find(entry_hash);
275 if (it != entries_set_.end()) {
276 UpdateEntryIteratorSize(&it, 0);
277 entries_set_.erase(it);
280 if (!initialized_)
281 removed_entries_.insert(entry_hash);
282 PostponeWritingToDisk();
285 bool SimpleIndex::Has(uint64 hash) const {
286 DCHECK(io_thread_checker_.CalledOnValidThread());
287 // If not initialized, always return true, forcing it to go to the disk.
288 return !initialized_ || entries_set_.count(hash) > 0;
291 bool SimpleIndex::UseIfExists(uint64 entry_hash) {
292 DCHECK(io_thread_checker_.CalledOnValidThread());
293 // Always update the last used time, even if it is during initialization.
294 // It will be merged later.
295 EntrySet::iterator it = entries_set_.find(entry_hash);
296 if (it == entries_set_.end())
297 // If not initialized, always return true, forcing it to go to the disk.
298 return !initialized_;
299 it->second.SetLastUsedTime(base::Time::Now());
300 PostponeWritingToDisk();
301 return true;
304 void SimpleIndex::StartEvictionIfNeeded() {
305 DCHECK(io_thread_checker_.CalledOnValidThread());
306 if (eviction_in_progress_ || cache_size_ <= high_watermark_)
307 return;
308 // Take all live key hashes from the index and sort them by time.
309 eviction_in_progress_ = true;
310 eviction_start_time_ = base::TimeTicks::Now();
311 SIMPLE_CACHE_UMA(MEMORY_KB,
312 "Eviction.CacheSizeOnStart2", cache_type_,
313 cache_size_ / kBytesInKb);
314 SIMPLE_CACHE_UMA(MEMORY_KB,
315 "Eviction.MaxCacheSizeOnStart2", cache_type_,
316 max_size_ / kBytesInKb);
317 std::vector<uint64> entry_hashes;
318 entry_hashes.reserve(entries_set_.size());
319 for (EntrySet::const_iterator it = entries_set_.begin(),
320 end = entries_set_.end(); it != end; ++it) {
321 entry_hashes.push_back(it->first);
323 std::sort(entry_hashes.begin(), entry_hashes.end(),
324 CompareHashesForTimestamp(entries_set_));
326 // Remove as many entries from the index to get below |low_watermark_|.
327 std::vector<uint64>::iterator it = entry_hashes.begin();
328 uint64 evicted_so_far_size = 0;
329 while (evicted_so_far_size < cache_size_ - low_watermark_) {
330 DCHECK(it != entry_hashes.end());
331 EntrySet::iterator found_meta = entries_set_.find(*it);
332 DCHECK(found_meta != entries_set_.end());
333 uint64 to_evict_size = found_meta->second.GetEntrySize();
334 evicted_so_far_size += to_evict_size;
335 ++it;
338 // Take out the rest of hashes from the eviction list.
339 entry_hashes.erase(it, entry_hashes.end());
340 SIMPLE_CACHE_UMA(COUNTS,
341 "Eviction.EntryCount", cache_type_, entry_hashes.size());
342 SIMPLE_CACHE_UMA(TIMES,
343 "Eviction.TimeToSelectEntries", cache_type_,
344 base::TimeTicks::Now() - eviction_start_time_);
345 SIMPLE_CACHE_UMA(MEMORY_KB,
346 "Eviction.SizeOfEvicted2", cache_type_,
347 evicted_so_far_size / kBytesInKb);
349 delegate_->DoomEntries(&entry_hashes, base::Bind(&SimpleIndex::EvictionDone,
350 AsWeakPtr()));
353 bool SimpleIndex::UpdateEntrySize(uint64 entry_hash, int entry_size) {
354 DCHECK(io_thread_checker_.CalledOnValidThread());
355 EntrySet::iterator it = entries_set_.find(entry_hash);
356 if (it == entries_set_.end())
357 return false;
359 UpdateEntryIteratorSize(&it, entry_size);
360 PostponeWritingToDisk();
361 StartEvictionIfNeeded();
362 return true;
365 void SimpleIndex::EvictionDone(int result) {
366 DCHECK(io_thread_checker_.CalledOnValidThread());
368 // Ignore the result of eviction. We did our best.
369 eviction_in_progress_ = false;
370 SIMPLE_CACHE_UMA(BOOLEAN, "Eviction.Result", cache_type_, result == net::OK);
371 SIMPLE_CACHE_UMA(TIMES,
372 "Eviction.TimeToDone", cache_type_,
373 base::TimeTicks::Now() - eviction_start_time_);
374 SIMPLE_CACHE_UMA(MEMORY_KB,
375 "Eviction.SizeWhenDone2", cache_type_,
376 cache_size_ / kBytesInKb);
379 // static
380 void SimpleIndex::InsertInEntrySet(
381 uint64 entry_hash,
382 const disk_cache::EntryMetadata& entry_metadata,
383 EntrySet* entry_set) {
384 DCHECK(entry_set);
385 entry_set->insert(std::make_pair(entry_hash, entry_metadata));
388 void SimpleIndex::PostponeWritingToDisk() {
389 if (!initialized_)
390 return;
391 const int delay = app_on_background_ ? background_flush_delay_
392 : foreground_flush_delay_;
393 // If the timer is already active, Start() will just Reset it, postponing it.
394 write_to_disk_timer_.Start(
395 FROM_HERE, base::TimeDelta::FromMilliseconds(delay), write_to_disk_cb_);
398 void SimpleIndex::UpdateEntryIteratorSize(EntrySet::iterator* it,
399 int entry_size) {
400 // Update the total cache size with the new entry size.
401 DCHECK(io_thread_checker_.CalledOnValidThread());
402 DCHECK_GE(cache_size_, implicit_cast<uint64>((*it)->second.GetEntrySize()));
403 cache_size_ -= (*it)->second.GetEntrySize();
404 cache_size_ += entry_size;
405 (*it)->second.SetEntrySize(entry_size);
408 void SimpleIndex::MergeInitializingSet(
409 scoped_ptr<SimpleIndexLoadResult> load_result) {
410 DCHECK(io_thread_checker_.CalledOnValidThread());
411 DCHECK(load_result->did_load);
413 EntrySet* index_file_entries = &load_result->entries;
415 for (base::hash_set<uint64>::const_iterator it = removed_entries_.begin();
416 it != removed_entries_.end(); ++it) {
417 index_file_entries->erase(*it);
419 removed_entries_.clear();
421 for (EntrySet::const_iterator it = entries_set_.begin();
422 it != entries_set_.end(); ++it) {
423 const uint64 entry_hash = it->first;
424 std::pair<EntrySet::iterator, bool> insert_result =
425 index_file_entries->insert(EntrySet::value_type(entry_hash,
426 EntryMetadata()));
427 EntrySet::iterator& possibly_inserted_entry = insert_result.first;
428 possibly_inserted_entry->second = it->second;
431 uint64 merged_cache_size = 0;
432 for (EntrySet::iterator it = index_file_entries->begin();
433 it != index_file_entries->end(); ++it) {
434 merged_cache_size += it->second.GetEntrySize();
437 entries_set_.swap(*index_file_entries);
438 cache_size_ = merged_cache_size;
439 initialized_ = true;
441 // The actual IO is asynchronous, so calling WriteToDisk() shouldn't slow the
442 // merge down much.
443 if (load_result->flush_required)
444 WriteToDisk();
446 SIMPLE_CACHE_UMA(CUSTOM_COUNTS,
447 "IndexInitializationWaiters", cache_type_,
448 to_run_when_initialized_.size(), 0, 100, 20);
449 // Run all callbacks waiting for the index to come up.
450 for (CallbackList::iterator it = to_run_when_initialized_.begin(),
451 end = to_run_when_initialized_.end(); it != end; ++it) {
452 io_thread_->PostTask(FROM_HERE, base::Bind((*it), net::OK));
454 to_run_when_initialized_.clear();
457 #if defined(OS_ANDROID)
458 void SimpleIndex::OnApplicationStateChange(
459 base::android::ApplicationState state) {
460 DCHECK(io_thread_checker_.CalledOnValidThread());
461 // For more info about android activities, see:
462 // developer.android.com/training/basics/activity-lifecycle/pausing.html
463 if (state == base::android::APPLICATION_STATE_HAS_RUNNING_ACTIVITIES) {
464 app_on_background_ = false;
465 } else if (state ==
466 base::android::APPLICATION_STATE_HAS_STOPPED_ACTIVITIES) {
467 app_on_background_ = true;
468 WriteToDisk();
471 #endif
473 void SimpleIndex::WriteToDisk() {
474 DCHECK(io_thread_checker_.CalledOnValidThread());
475 if (!initialized_)
476 return;
477 SIMPLE_CACHE_UMA(CUSTOM_COUNTS,
478 "IndexNumEntriesOnWrite", cache_type_,
479 entries_set_.size(), 0, 100000, 50);
480 const base::TimeTicks start = base::TimeTicks::Now();
481 if (!last_write_to_disk_.is_null()) {
482 if (app_on_background_) {
483 SIMPLE_CACHE_UMA(MEDIUM_TIMES,
484 "IndexWriteInterval.Background", cache_type_,
485 start - last_write_to_disk_);
486 } else {
487 SIMPLE_CACHE_UMA(MEDIUM_TIMES,
488 "IndexWriteInterval.Foreground", cache_type_,
489 start - last_write_to_disk_);
492 last_write_to_disk_ = start;
494 index_file_->WriteToDisk(entries_set_, cache_size_,
495 start, app_on_background_);
498 } // namespace disk_cache