1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_index.h"
12 #include "base/bind.h"
13 #include "base/bind_helpers.h"
14 #include "base/files/file_enumerator.h"
15 #include "base/files/file_util.h"
16 #include "base/logging.h"
17 #include "base/message_loop/message_loop.h"
18 #include "base/metrics/field_trial.h"
19 #include "base/numerics/safe_conversions.h"
20 #include "base/pickle.h"
21 #include "base/strings/string_number_conversions.h"
22 #include "base/strings/string_tokenizer.h"
23 #include "base/task_runner.h"
24 #include "base/threading/worker_pool.h"
25 #include "base/time/time.h"
26 #include "net/base/net_errors.h"
27 #include "net/disk_cache/simple/simple_entry_format.h"
28 #include "net/disk_cache/simple/simple_histogram_macros.h"
29 #include "net/disk_cache/simple/simple_index_delegate.h"
30 #include "net/disk_cache/simple/simple_index_file.h"
31 #include "net/disk_cache/simple/simple_synchronous_entry.h"
32 #include "net/disk_cache/simple/simple_util.h"
41 // How many milliseconds we delay writing the index to disk since the last cache
42 // operation has happened.
43 const int kWriteToDiskDelayMSecs
= 20000;
44 const int kWriteToDiskOnBackgroundDelayMSecs
= 100;
46 // Divides the cache space into this amount of parts to evict when only one part
48 const uint32 kEvictionMarginDivisor
= 20;
50 const uint32 kBytesInKb
= 1024;
52 // Utility class used for timestamp comparisons in entry metadata while sorting.
53 class CompareHashesForTimestamp
{
54 typedef disk_cache::SimpleIndex SimpleIndex
;
55 typedef disk_cache::SimpleIndex::EntrySet EntrySet
;
57 explicit CompareHashesForTimestamp(const EntrySet
& set
);
59 bool operator()(uint64 hash1
, uint64 hash2
);
61 const EntrySet
& entry_set_
;
64 CompareHashesForTimestamp::CompareHashesForTimestamp(const EntrySet
& set
)
68 bool CompareHashesForTimestamp::operator()(uint64 hash1
, uint64 hash2
) {
69 EntrySet::const_iterator it1
= entry_set_
.find(hash1
);
70 DCHECK(it1
!= entry_set_
.end());
71 EntrySet::const_iterator it2
= entry_set_
.find(hash2
);
72 DCHECK(it2
!= entry_set_
.end());
73 return it1
->second
.GetLastUsedTime() < it2
->second
.GetLastUsedTime();
78 namespace disk_cache
{
80 EntryMetadata::EntryMetadata()
81 : last_used_time_seconds_since_epoch_(0),
85 EntryMetadata::EntryMetadata(base::Time last_used_time
, int entry_size
)
86 : last_used_time_seconds_since_epoch_(0),
87 entry_size_(entry_size
) {
88 SetLastUsedTime(last_used_time
);
91 base::Time
EntryMetadata::GetLastUsedTime() const {
93 if (last_used_time_seconds_since_epoch_
== 0)
96 return base::Time::UnixEpoch() +
97 base::TimeDelta::FromSeconds(last_used_time_seconds_since_epoch_
);
100 void EntryMetadata::SetLastUsedTime(const base::Time
& last_used_time
) {
102 if (last_used_time
.is_null()) {
103 last_used_time_seconds_since_epoch_
= 0;
107 last_used_time_seconds_since_epoch_
= base::checked_cast
<uint32
>(
108 (last_used_time
- base::Time::UnixEpoch()).InSeconds());
109 // Avoid accidental nullity.
110 if (last_used_time_seconds_since_epoch_
== 0)
111 last_used_time_seconds_since_epoch_
= 1;
114 void EntryMetadata::Serialize(Pickle
* pickle
) const {
116 int64 internal_last_used_time
= GetLastUsedTime().ToInternalValue();
117 pickle
->WriteInt64(internal_last_used_time
);
118 pickle
->WriteUInt64(entry_size_
);
121 bool EntryMetadata::Deserialize(PickleIterator
* it
) {
123 int64 tmp_last_used_time
;
124 uint64 tmp_entry_size
;
125 if (!it
->ReadInt64(&tmp_last_used_time
) || !it
->ReadUInt64(&tmp_entry_size
))
127 SetLastUsedTime(base::Time::FromInternalValue(tmp_last_used_time
));
128 entry_size_
= tmp_entry_size
;
132 SimpleIndex::SimpleIndex(
133 const scoped_refptr
<base::SingleThreadTaskRunner
>& io_thread
,
134 SimpleIndexDelegate
* delegate
,
135 net::CacheType cache_type
,
136 scoped_ptr
<SimpleIndexFile
> index_file
)
137 : delegate_(delegate
),
138 cache_type_(cache_type
),
143 eviction_in_progress_(false),
145 index_file_(index_file
.Pass()),
146 io_thread_(io_thread
),
147 // Creating the callback once so it is reused every time
148 // write_to_disk_timer_.Start() is called.
149 write_to_disk_cb_(base::Bind(&SimpleIndex::WriteToDisk
, AsWeakPtr())),
150 app_on_background_(false) {
153 SimpleIndex::~SimpleIndex() {
154 DCHECK(io_thread_checker_
.CalledOnValidThread());
156 // Fail all callbacks waiting for the index to come up.
157 for (CallbackList::iterator it
= to_run_when_initialized_
.begin(),
158 end
= to_run_when_initialized_
.end(); it
!= end
; ++it
) {
159 it
->Run(net::ERR_ABORTED
);
163 void SimpleIndex::Initialize(base::Time cache_mtime
) {
164 DCHECK(io_thread_checker_
.CalledOnValidThread());
166 #if defined(OS_ANDROID)
167 if (base::android::IsVMInitialized()) {
168 app_status_listener_
.reset(new base::android::ApplicationStatusListener(
169 base::Bind(&SimpleIndex::OnApplicationStateChange
, AsWeakPtr())));
173 SimpleIndexLoadResult
* load_result
= new SimpleIndexLoadResult();
174 scoped_ptr
<SimpleIndexLoadResult
> load_result_scoped(load_result
);
175 base::Closure reply
= base::Bind(
176 &SimpleIndex::MergeInitializingSet
,
178 base::Passed(&load_result_scoped
));
179 index_file_
->LoadIndexEntries(cache_mtime
, reply
, load_result
);
182 bool SimpleIndex::SetMaxSize(int max_bytes
) {
186 // Zero size means use the default.
190 max_size_
= max_bytes
;
191 high_watermark_
= max_size_
- max_size_
/ kEvictionMarginDivisor
;
192 low_watermark_
= max_size_
- 2 * (max_size_
/ kEvictionMarginDivisor
);
196 int SimpleIndex::ExecuteWhenReady(const net::CompletionCallback
& task
) {
197 DCHECK(io_thread_checker_
.CalledOnValidThread());
199 io_thread_
->PostTask(FROM_HERE
, base::Bind(task
, net::OK
));
201 to_run_when_initialized_
.push_back(task
);
202 return net::ERR_IO_PENDING
;
205 scoped_ptr
<SimpleIndex::HashList
> SimpleIndex::GetEntriesBetween(
206 base::Time initial_time
, base::Time end_time
) {
207 DCHECK_EQ(true, initialized_
);
209 if (!initial_time
.is_null())
210 initial_time
-= EntryMetadata::GetLowerEpsilonForTimeComparisons();
211 if (end_time
.is_null())
212 end_time
= base::Time::Max();
214 end_time
+= EntryMetadata::GetUpperEpsilonForTimeComparisons();
215 const base::Time extended_end_time
=
216 end_time
.is_null() ? base::Time::Max() : end_time
;
217 DCHECK(extended_end_time
>= initial_time
);
218 scoped_ptr
<HashList
> ret_hashes(new HashList());
219 for (EntrySet::iterator it
= entries_set_
.begin(), end
= entries_set_
.end();
221 EntryMetadata
& metadata
= it
->second
;
222 base::Time entry_time
= metadata
.GetLastUsedTime();
223 if (initial_time
<= entry_time
&& entry_time
< extended_end_time
)
224 ret_hashes
->push_back(it
->first
);
226 return ret_hashes
.Pass();
229 scoped_ptr
<SimpleIndex::HashList
> SimpleIndex::GetAllHashes() {
230 return GetEntriesBetween(base::Time(), base::Time());
233 int32
SimpleIndex::GetEntryCount() const {
234 // TODO(pasko): return a meaningful initial estimate before initialized.
235 return entries_set_
.size();
238 void SimpleIndex::Insert(uint64 entry_hash
) {
239 DCHECK(io_thread_checker_
.CalledOnValidThread());
240 // Upon insert we don't know yet the size of the entry.
241 // It will be updated later when the SimpleEntryImpl finishes opening or
242 // creating the new entry, and then UpdateEntrySize will be called.
244 entry_hash
, EntryMetadata(base::Time::Now(), 0), &entries_set_
);
246 removed_entries_
.erase(entry_hash
);
247 PostponeWritingToDisk();
250 void SimpleIndex::Remove(uint64 entry_hash
) {
251 DCHECK(io_thread_checker_
.CalledOnValidThread());
252 EntrySet::iterator it
= entries_set_
.find(entry_hash
);
253 if (it
!= entries_set_
.end()) {
254 UpdateEntryIteratorSize(&it
, 0);
255 entries_set_
.erase(it
);
259 removed_entries_
.insert(entry_hash
);
260 PostponeWritingToDisk();
263 bool SimpleIndex::Has(uint64 hash
) const {
264 DCHECK(io_thread_checker_
.CalledOnValidThread());
265 // If not initialized, always return true, forcing it to go to the disk.
266 return !initialized_
|| entries_set_
.count(hash
) > 0;
269 bool SimpleIndex::UseIfExists(uint64 entry_hash
) {
270 DCHECK(io_thread_checker_
.CalledOnValidThread());
271 // Always update the last used time, even if it is during initialization.
272 // It will be merged later.
273 EntrySet::iterator it
= entries_set_
.find(entry_hash
);
274 if (it
== entries_set_
.end())
275 // If not initialized, always return true, forcing it to go to the disk.
276 return !initialized_
;
277 it
->second
.SetLastUsedTime(base::Time::Now());
278 PostponeWritingToDisk();
282 void SimpleIndex::StartEvictionIfNeeded() {
283 DCHECK(io_thread_checker_
.CalledOnValidThread());
284 if (eviction_in_progress_
|| cache_size_
<= high_watermark_
)
286 // Take all live key hashes from the index and sort them by time.
287 eviction_in_progress_
= true;
288 eviction_start_time_
= base::TimeTicks::Now();
289 SIMPLE_CACHE_UMA(MEMORY_KB
,
290 "Eviction.CacheSizeOnStart2", cache_type_
,
291 cache_size_
/ kBytesInKb
);
292 SIMPLE_CACHE_UMA(MEMORY_KB
,
293 "Eviction.MaxCacheSizeOnStart2", cache_type_
,
294 max_size_
/ kBytesInKb
);
295 std::vector
<uint64
> entry_hashes
;
296 entry_hashes
.reserve(entries_set_
.size());
297 for (EntrySet::const_iterator it
= entries_set_
.begin(),
298 end
= entries_set_
.end(); it
!= end
; ++it
) {
299 entry_hashes
.push_back(it
->first
);
301 std::sort(entry_hashes
.begin(), entry_hashes
.end(),
302 CompareHashesForTimestamp(entries_set_
));
304 // Remove as many entries from the index to get below |low_watermark_|.
305 std::vector
<uint64
>::iterator it
= entry_hashes
.begin();
306 uint64 evicted_so_far_size
= 0;
307 while (evicted_so_far_size
< cache_size_
- low_watermark_
) {
308 DCHECK(it
!= entry_hashes
.end());
309 EntrySet::iterator found_meta
= entries_set_
.find(*it
);
310 DCHECK(found_meta
!= entries_set_
.end());
311 uint64 to_evict_size
= found_meta
->second
.GetEntrySize();
312 evicted_so_far_size
+= to_evict_size
;
316 // Take out the rest of hashes from the eviction list.
317 entry_hashes
.erase(it
, entry_hashes
.end());
318 SIMPLE_CACHE_UMA(COUNTS
,
319 "Eviction.EntryCount", cache_type_
, entry_hashes
.size());
320 SIMPLE_CACHE_UMA(TIMES
,
321 "Eviction.TimeToSelectEntries", cache_type_
,
322 base::TimeTicks::Now() - eviction_start_time_
);
323 SIMPLE_CACHE_UMA(MEMORY_KB
,
324 "Eviction.SizeOfEvicted2", cache_type_
,
325 evicted_so_far_size
/ kBytesInKb
);
327 delegate_
->DoomEntries(&entry_hashes
, base::Bind(&SimpleIndex::EvictionDone
,
331 bool SimpleIndex::UpdateEntrySize(uint64 entry_hash
, int entry_size
) {
332 DCHECK(io_thread_checker_
.CalledOnValidThread());
333 EntrySet::iterator it
= entries_set_
.find(entry_hash
);
334 if (it
== entries_set_
.end())
337 UpdateEntryIteratorSize(&it
, entry_size
);
338 PostponeWritingToDisk();
339 StartEvictionIfNeeded();
343 void SimpleIndex::EvictionDone(int result
) {
344 DCHECK(io_thread_checker_
.CalledOnValidThread());
346 // Ignore the result of eviction. We did our best.
347 eviction_in_progress_
= false;
348 SIMPLE_CACHE_UMA(BOOLEAN
, "Eviction.Result", cache_type_
, result
== net::OK
);
349 SIMPLE_CACHE_UMA(TIMES
,
350 "Eviction.TimeToDone", cache_type_
,
351 base::TimeTicks::Now() - eviction_start_time_
);
352 SIMPLE_CACHE_UMA(MEMORY_KB
,
353 "Eviction.SizeWhenDone2", cache_type_
,
354 cache_size_
/ kBytesInKb
);
358 void SimpleIndex::InsertInEntrySet(
360 const disk_cache::EntryMetadata
& entry_metadata
,
361 EntrySet
* entry_set
) {
363 entry_set
->insert(std::make_pair(entry_hash
, entry_metadata
));
366 void SimpleIndex::PostponeWritingToDisk() {
369 const int delay
= app_on_background_
? kWriteToDiskOnBackgroundDelayMSecs
370 : kWriteToDiskDelayMSecs
;
371 // If the timer is already active, Start() will just Reset it, postponing it.
372 write_to_disk_timer_
.Start(
373 FROM_HERE
, base::TimeDelta::FromMilliseconds(delay
), write_to_disk_cb_
);
376 void SimpleIndex::UpdateEntryIteratorSize(EntrySet::iterator
* it
,
378 // Update the total cache size with the new entry size.
379 DCHECK(io_thread_checker_
.CalledOnValidThread());
380 DCHECK_GE(cache_size_
, implicit_cast
<uint64
>((*it
)->second
.GetEntrySize()));
381 cache_size_
-= (*it
)->second
.GetEntrySize();
382 cache_size_
+= entry_size
;
383 (*it
)->second
.SetEntrySize(entry_size
);
386 void SimpleIndex::MergeInitializingSet(
387 scoped_ptr
<SimpleIndexLoadResult
> load_result
) {
388 DCHECK(io_thread_checker_
.CalledOnValidThread());
389 DCHECK(load_result
->did_load
);
391 EntrySet
* index_file_entries
= &load_result
->entries
;
393 for (base::hash_set
<uint64
>::const_iterator it
= removed_entries_
.begin();
394 it
!= removed_entries_
.end(); ++it
) {
395 index_file_entries
->erase(*it
);
397 removed_entries_
.clear();
399 for (EntrySet::const_iterator it
= entries_set_
.begin();
400 it
!= entries_set_
.end(); ++it
) {
401 const uint64 entry_hash
= it
->first
;
402 std::pair
<EntrySet::iterator
, bool> insert_result
=
403 index_file_entries
->insert(EntrySet::value_type(entry_hash
,
405 EntrySet::iterator
& possibly_inserted_entry
= insert_result
.first
;
406 possibly_inserted_entry
->second
= it
->second
;
409 uint64 merged_cache_size
= 0;
410 for (EntrySet::iterator it
= index_file_entries
->begin();
411 it
!= index_file_entries
->end(); ++it
) {
412 merged_cache_size
+= it
->second
.GetEntrySize();
415 entries_set_
.swap(*index_file_entries
);
416 cache_size_
= merged_cache_size
;
419 // The actual IO is asynchronous, so calling WriteToDisk() shouldn't slow the
421 if (load_result
->flush_required
)
424 SIMPLE_CACHE_UMA(CUSTOM_COUNTS
,
425 "IndexInitializationWaiters", cache_type_
,
426 to_run_when_initialized_
.size(), 0, 100, 20);
427 // Run all callbacks waiting for the index to come up.
428 for (CallbackList::iterator it
= to_run_when_initialized_
.begin(),
429 end
= to_run_when_initialized_
.end(); it
!= end
; ++it
) {
430 io_thread_
->PostTask(FROM_HERE
, base::Bind((*it
), net::OK
));
432 to_run_when_initialized_
.clear();
435 #if defined(OS_ANDROID)
436 void SimpleIndex::OnApplicationStateChange(
437 base::android::ApplicationState state
) {
438 DCHECK(io_thread_checker_
.CalledOnValidThread());
439 // For more info about android activities, see:
440 // developer.android.com/training/basics/activity-lifecycle/pausing.html
441 if (state
== base::android::APPLICATION_STATE_HAS_RUNNING_ACTIVITIES
) {
442 app_on_background_
= false;
444 base::android::APPLICATION_STATE_HAS_STOPPED_ACTIVITIES
) {
445 app_on_background_
= true;
451 void SimpleIndex::WriteToDisk() {
452 DCHECK(io_thread_checker_
.CalledOnValidThread());
455 SIMPLE_CACHE_UMA(CUSTOM_COUNTS
,
456 "IndexNumEntriesOnWrite", cache_type_
,
457 entries_set_
.size(), 0, 100000, 50);
458 const base::TimeTicks start
= base::TimeTicks::Now();
459 if (!last_write_to_disk_
.is_null()) {
460 if (app_on_background_
) {
461 SIMPLE_CACHE_UMA(MEDIUM_TIMES
,
462 "IndexWriteInterval.Background", cache_type_
,
463 start
- last_write_to_disk_
);
465 SIMPLE_CACHE_UMA(MEDIUM_TIMES
,
466 "IndexWriteInterval.Foreground", cache_type_
,
467 start
- last_write_to_disk_
);
470 last_write_to_disk_
= start
;
472 index_file_
->WriteToDisk(entries_set_
, cache_size_
,
473 start
, app_on_background_
);
476 } // namespace disk_cache