1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_index.h"
12 #include "base/bind.h"
13 #include "base/bind_helpers.h"
14 #include "base/files/file_enumerator.h"
15 #include "base/files/file_util.h"
16 #include "base/logging.h"
17 #include "base/message_loop/message_loop.h"
18 #include "base/metrics/field_trial.h"
19 #include "base/numerics/safe_conversions.h"
20 #include "base/pickle.h"
21 #include "base/strings/string_number_conversions.h"
22 #include "base/strings/string_tokenizer.h"
23 #include "base/task_runner.h"
24 #include "base/threading/worker_pool.h"
25 #include "base/time/time.h"
26 #include "net/base/net_errors.h"
27 #include "net/disk_cache/simple/simple_entry_format.h"
28 #include "net/disk_cache/simple/simple_histogram_macros.h"
29 #include "net/disk_cache/simple/simple_index_delegate.h"
30 #include "net/disk_cache/simple/simple_index_file.h"
31 #include "net/disk_cache/simple/simple_synchronous_entry.h"
32 #include "net/disk_cache/simple/simple_util.h"
41 // How many milliseconds we delay writing the index to disk since the last cache
42 // operation has happened.
43 const int kWriteToDiskDelayMSecs
= 20000;
44 const int kWriteToDiskOnBackgroundDelayMSecs
= 100;
46 // Divides the cache space into this amount of parts to evict when only one part
48 const uint32 kEvictionMarginDivisor
= 20;
50 const uint32 kBytesInKb
= 1024;
52 // Utility class used for timestamp comparisons in entry metadata while sorting.
53 class CompareHashesForTimestamp
{
54 typedef disk_cache::SimpleIndex SimpleIndex
;
55 typedef disk_cache::SimpleIndex::EntrySet EntrySet
;
57 explicit CompareHashesForTimestamp(const EntrySet
& set
);
59 bool operator()(uint64 hash1
, uint64 hash2
);
61 const EntrySet
& entry_set_
;
64 CompareHashesForTimestamp::CompareHashesForTimestamp(const EntrySet
& set
)
68 bool CompareHashesForTimestamp::operator()(uint64 hash1
, uint64 hash2
) {
69 EntrySet::const_iterator it1
= entry_set_
.find(hash1
);
70 DCHECK(it1
!= entry_set_
.end());
71 EntrySet::const_iterator it2
= entry_set_
.find(hash2
);
72 DCHECK(it2
!= entry_set_
.end());
73 return it1
->second
.GetLastUsedTime() < it2
->second
.GetLastUsedTime();
78 namespace disk_cache
{
80 EntryMetadata::EntryMetadata()
81 : last_used_time_seconds_since_epoch_(0),
85 EntryMetadata::EntryMetadata(base::Time last_used_time
, uint64 entry_size
)
86 : last_used_time_seconds_since_epoch_(0),
87 entry_size_(base::checked_cast
<int32
>(entry_size
)) {
88 SetLastUsedTime(last_used_time
);
91 base::Time
EntryMetadata::GetLastUsedTime() const {
93 if (last_used_time_seconds_since_epoch_
== 0)
96 return base::Time::UnixEpoch() +
97 base::TimeDelta::FromSeconds(last_used_time_seconds_since_epoch_
);
100 void EntryMetadata::SetLastUsedTime(const base::Time
& last_used_time
) {
102 if (last_used_time
.is_null()) {
103 last_used_time_seconds_since_epoch_
= 0;
107 last_used_time_seconds_since_epoch_
= base::checked_cast
<uint32
>(
108 (last_used_time
- base::Time::UnixEpoch()).InSeconds());
109 // Avoid accidental nullity.
110 if (last_used_time_seconds_since_epoch_
== 0)
111 last_used_time_seconds_since_epoch_
= 1;
114 uint64
EntryMetadata::GetEntrySize() const {
118 void EntryMetadata::SetEntrySize(uint64 entry_size
) {
119 entry_size_
= base::checked_cast
<int32
>(entry_size
);
122 void EntryMetadata::Serialize(Pickle
* pickle
) const {
124 int64 internal_last_used_time
= GetLastUsedTime().ToInternalValue();
125 pickle
->WriteInt64(internal_last_used_time
);
126 pickle
->WriteUInt64(entry_size_
);
129 bool EntryMetadata::Deserialize(PickleIterator
* it
) {
131 int64 tmp_last_used_time
;
132 uint64 tmp_entry_size
;
133 if (!it
->ReadInt64(&tmp_last_used_time
) || !it
->ReadUInt64(&tmp_entry_size
) ||
134 tmp_entry_size
> static_cast<uint64
>(std::numeric_limits
<int32
>::max()))
136 SetLastUsedTime(base::Time::FromInternalValue(tmp_last_used_time
));
137 entry_size_
= static_cast<int32
>(tmp_entry_size
);
141 SimpleIndex::SimpleIndex(
142 const scoped_refptr
<base::SingleThreadTaskRunner
>& io_thread
,
143 SimpleIndexDelegate
* delegate
,
144 net::CacheType cache_type
,
145 scoped_ptr
<SimpleIndexFile
> index_file
)
146 : delegate_(delegate
),
147 cache_type_(cache_type
),
152 eviction_in_progress_(false),
154 index_file_(index_file
.Pass()),
155 io_thread_(io_thread
),
156 // Creating the callback once so it is reused every time
157 // write_to_disk_timer_.Start() is called.
158 write_to_disk_cb_(base::Bind(&SimpleIndex::WriteToDisk
, AsWeakPtr())),
159 app_on_background_(false) {
162 SimpleIndex::~SimpleIndex() {
163 DCHECK(io_thread_checker_
.CalledOnValidThread());
165 // Fail all callbacks waiting for the index to come up.
166 for (CallbackList::iterator it
= to_run_when_initialized_
.begin(),
167 end
= to_run_when_initialized_
.end(); it
!= end
; ++it
) {
168 it
->Run(net::ERR_ABORTED
);
172 void SimpleIndex::Initialize(base::Time cache_mtime
) {
173 DCHECK(io_thread_checker_
.CalledOnValidThread());
175 #if defined(OS_ANDROID)
176 if (base::android::IsVMInitialized()) {
177 app_status_listener_
.reset(new base::android::ApplicationStatusListener(
178 base::Bind(&SimpleIndex::OnApplicationStateChange
, AsWeakPtr())));
182 SimpleIndexLoadResult
* load_result
= new SimpleIndexLoadResult();
183 scoped_ptr
<SimpleIndexLoadResult
> load_result_scoped(load_result
);
184 base::Closure reply
= base::Bind(
185 &SimpleIndex::MergeInitializingSet
,
187 base::Passed(&load_result_scoped
));
188 index_file_
->LoadIndexEntries(cache_mtime
, reply
, load_result
);
191 void SimpleIndex::SetMaxSize(uint64 max_bytes
) {
192 // Zero size means use the default.
194 max_size_
= max_bytes
;
195 high_watermark_
= max_size_
- max_size_
/ kEvictionMarginDivisor
;
196 low_watermark_
= max_size_
- 2 * (max_size_
/ kEvictionMarginDivisor
);
200 int SimpleIndex::ExecuteWhenReady(const net::CompletionCallback
& task
) {
201 DCHECK(io_thread_checker_
.CalledOnValidThread());
203 io_thread_
->PostTask(FROM_HERE
, base::Bind(task
, net::OK
));
205 to_run_when_initialized_
.push_back(task
);
206 return net::ERR_IO_PENDING
;
209 scoped_ptr
<SimpleIndex::HashList
> SimpleIndex::GetEntriesBetween(
210 base::Time initial_time
, base::Time end_time
) {
211 DCHECK_EQ(true, initialized_
);
213 if (!initial_time
.is_null())
214 initial_time
-= EntryMetadata::GetLowerEpsilonForTimeComparisons();
215 if (end_time
.is_null())
216 end_time
= base::Time::Max();
218 end_time
+= EntryMetadata::GetUpperEpsilonForTimeComparisons();
219 const base::Time extended_end_time
=
220 end_time
.is_null() ? base::Time::Max() : end_time
;
221 DCHECK(extended_end_time
>= initial_time
);
222 scoped_ptr
<HashList
> ret_hashes(new HashList());
223 for (EntrySet::iterator it
= entries_set_
.begin(), end
= entries_set_
.end();
225 EntryMetadata
& metadata
= it
->second
;
226 base::Time entry_time
= metadata
.GetLastUsedTime();
227 if (initial_time
<= entry_time
&& entry_time
< extended_end_time
)
228 ret_hashes
->push_back(it
->first
);
230 return ret_hashes
.Pass();
233 scoped_ptr
<SimpleIndex::HashList
> SimpleIndex::GetAllHashes() {
234 return GetEntriesBetween(base::Time(), base::Time());
237 int32
SimpleIndex::GetEntryCount() const {
238 // TODO(pasko): return a meaningful initial estimate before initialized.
239 return entries_set_
.size();
242 void SimpleIndex::Insert(uint64 entry_hash
) {
243 DCHECK(io_thread_checker_
.CalledOnValidThread());
244 // Upon insert we don't know yet the size of the entry.
245 // It will be updated later when the SimpleEntryImpl finishes opening or
246 // creating the new entry, and then UpdateEntrySize will be called.
248 entry_hash
, EntryMetadata(base::Time::Now(), 0), &entries_set_
);
250 removed_entries_
.erase(entry_hash
);
251 PostponeWritingToDisk();
254 void SimpleIndex::Remove(uint64 entry_hash
) {
255 DCHECK(io_thread_checker_
.CalledOnValidThread());
256 EntrySet::iterator it
= entries_set_
.find(entry_hash
);
257 if (it
!= entries_set_
.end()) {
258 UpdateEntryIteratorSize(&it
, 0);
259 entries_set_
.erase(it
);
263 removed_entries_
.insert(entry_hash
);
264 PostponeWritingToDisk();
267 bool SimpleIndex::Has(uint64 hash
) const {
268 DCHECK(io_thread_checker_
.CalledOnValidThread());
269 // If not initialized, always return true, forcing it to go to the disk.
270 return !initialized_
|| entries_set_
.count(hash
) > 0;
273 bool SimpleIndex::UseIfExists(uint64 entry_hash
) {
274 DCHECK(io_thread_checker_
.CalledOnValidThread());
275 // Always update the last used time, even if it is during initialization.
276 // It will be merged later.
277 EntrySet::iterator it
= entries_set_
.find(entry_hash
);
278 if (it
== entries_set_
.end())
279 // If not initialized, always return true, forcing it to go to the disk.
280 return !initialized_
;
281 it
->second
.SetLastUsedTime(base::Time::Now());
282 PostponeWritingToDisk();
286 void SimpleIndex::StartEvictionIfNeeded() {
287 DCHECK(io_thread_checker_
.CalledOnValidThread());
288 if (eviction_in_progress_
|| cache_size_
<= high_watermark_
)
290 // Take all live key hashes from the index and sort them by time.
291 eviction_in_progress_
= true;
292 eviction_start_time_
= base::TimeTicks::Now();
294 MEMORY_KB
, "Eviction.CacheSizeOnStart2", cache_type_
,
295 static_cast<base::HistogramBase::Sample
>(cache_size_
/ kBytesInKb
));
297 MEMORY_KB
, "Eviction.MaxCacheSizeOnStart2", cache_type_
,
298 static_cast<base::HistogramBase::Sample
>(max_size_
/ kBytesInKb
));
299 std::vector
<uint64
> entry_hashes
;
300 entry_hashes
.reserve(entries_set_
.size());
301 for (EntrySet::const_iterator it
= entries_set_
.begin(),
302 end
= entries_set_
.end(); it
!= end
; ++it
) {
303 entry_hashes
.push_back(it
->first
);
305 std::sort(entry_hashes
.begin(), entry_hashes
.end(),
306 CompareHashesForTimestamp(entries_set_
));
308 // Remove as many entries from the index to get below |low_watermark_|.
309 std::vector
<uint64
>::iterator it
= entry_hashes
.begin();
310 uint64 evicted_so_far_size
= 0;
311 while (evicted_so_far_size
< cache_size_
- low_watermark_
) {
312 DCHECK(it
!= entry_hashes
.end());
313 EntrySet::iterator found_meta
= entries_set_
.find(*it
);
314 DCHECK(found_meta
!= entries_set_
.end());
315 evicted_so_far_size
+= found_meta
->second
.GetEntrySize();
319 // Take out the rest of hashes from the eviction list.
320 entry_hashes
.erase(it
, entry_hashes
.end());
321 SIMPLE_CACHE_UMA(COUNTS
,
322 "Eviction.EntryCount", cache_type_
, entry_hashes
.size());
323 SIMPLE_CACHE_UMA(TIMES
,
324 "Eviction.TimeToSelectEntries", cache_type_
,
325 base::TimeTicks::Now() - eviction_start_time_
);
327 MEMORY_KB
, "Eviction.SizeOfEvicted2", cache_type_
,
328 static_cast<base::HistogramBase::Sample
>(
329 evicted_so_far_size
/ kBytesInKb
));
331 delegate_
->DoomEntries(&entry_hashes
, base::Bind(&SimpleIndex::EvictionDone
,
335 bool SimpleIndex::UpdateEntrySize(uint64 entry_hash
, int64 entry_size
) {
336 DCHECK(io_thread_checker_
.CalledOnValidThread());
337 EntrySet::iterator it
= entries_set_
.find(entry_hash
);
338 if (it
== entries_set_
.end())
341 UpdateEntryIteratorSize(&it
, entry_size
);
342 PostponeWritingToDisk();
343 StartEvictionIfNeeded();
347 void SimpleIndex::EvictionDone(int result
) {
348 DCHECK(io_thread_checker_
.CalledOnValidThread());
350 // Ignore the result of eviction. We did our best.
351 eviction_in_progress_
= false;
352 SIMPLE_CACHE_UMA(BOOLEAN
, "Eviction.Result", cache_type_
, result
== net::OK
);
353 SIMPLE_CACHE_UMA(TIMES
,
354 "Eviction.TimeToDone", cache_type_
,
355 base::TimeTicks::Now() - eviction_start_time_
);
357 MEMORY_KB
, "Eviction.SizeWhenDone2", cache_type_
,
358 static_cast<base::HistogramBase::Sample
>(cache_size_
/ kBytesInKb
));
362 void SimpleIndex::InsertInEntrySet(
364 const disk_cache::EntryMetadata
& entry_metadata
,
365 EntrySet
* entry_set
) {
367 entry_set
->insert(std::make_pair(entry_hash
, entry_metadata
));
370 void SimpleIndex::PostponeWritingToDisk() {
373 const int delay
= app_on_background_
? kWriteToDiskOnBackgroundDelayMSecs
374 : kWriteToDiskDelayMSecs
;
375 // If the timer is already active, Start() will just Reset it, postponing it.
376 write_to_disk_timer_
.Start(
377 FROM_HERE
, base::TimeDelta::FromMilliseconds(delay
), write_to_disk_cb_
);
380 void SimpleIndex::UpdateEntryIteratorSize(EntrySet::iterator
* it
,
382 // Update the total cache size with the new entry size.
383 DCHECK(io_thread_checker_
.CalledOnValidThread());
384 DCHECK_GE(cache_size_
, (*it
)->second
.GetEntrySize());
385 cache_size_
-= (*it
)->second
.GetEntrySize();
386 cache_size_
+= entry_size
;
387 (*it
)->second
.SetEntrySize(entry_size
);
390 void SimpleIndex::MergeInitializingSet(
391 scoped_ptr
<SimpleIndexLoadResult
> load_result
) {
392 DCHECK(io_thread_checker_
.CalledOnValidThread());
393 DCHECK(load_result
->did_load
);
395 EntrySet
* index_file_entries
= &load_result
->entries
;
397 for (base::hash_set
<uint64
>::const_iterator it
= removed_entries_
.begin();
398 it
!= removed_entries_
.end(); ++it
) {
399 index_file_entries
->erase(*it
);
401 removed_entries_
.clear();
403 for (EntrySet::const_iterator it
= entries_set_
.begin();
404 it
!= entries_set_
.end(); ++it
) {
405 const uint64 entry_hash
= it
->first
;
406 std::pair
<EntrySet::iterator
, bool> insert_result
=
407 index_file_entries
->insert(EntrySet::value_type(entry_hash
,
409 EntrySet::iterator
& possibly_inserted_entry
= insert_result
.first
;
410 possibly_inserted_entry
->second
= it
->second
;
413 uint64 merged_cache_size
= 0;
414 for (EntrySet::iterator it
= index_file_entries
->begin();
415 it
!= index_file_entries
->end(); ++it
) {
416 merged_cache_size
+= it
->second
.GetEntrySize();
419 entries_set_
.swap(*index_file_entries
);
420 cache_size_
= merged_cache_size
;
423 // The actual IO is asynchronous, so calling WriteToDisk() shouldn't slow the
425 if (load_result
->flush_required
)
428 SIMPLE_CACHE_UMA(CUSTOM_COUNTS
,
429 "IndexInitializationWaiters", cache_type_
,
430 to_run_when_initialized_
.size(), 0, 100, 20);
431 // Run all callbacks waiting for the index to come up.
432 for (CallbackList::iterator it
= to_run_when_initialized_
.begin(),
433 end
= to_run_when_initialized_
.end(); it
!= end
; ++it
) {
434 io_thread_
->PostTask(FROM_HERE
, base::Bind((*it
), net::OK
));
436 to_run_when_initialized_
.clear();
439 #if defined(OS_ANDROID)
440 void SimpleIndex::OnApplicationStateChange(
441 base::android::ApplicationState state
) {
442 DCHECK(io_thread_checker_
.CalledOnValidThread());
443 // For more info about android activities, see:
444 // developer.android.com/training/basics/activity-lifecycle/pausing.html
445 if (state
== base::android::APPLICATION_STATE_HAS_RUNNING_ACTIVITIES
) {
446 app_on_background_
= false;
448 base::android::APPLICATION_STATE_HAS_STOPPED_ACTIVITIES
) {
449 app_on_background_
= true;
455 void SimpleIndex::WriteToDisk() {
456 DCHECK(io_thread_checker_
.CalledOnValidThread());
459 SIMPLE_CACHE_UMA(CUSTOM_COUNTS
,
460 "IndexNumEntriesOnWrite", cache_type_
,
461 entries_set_
.size(), 0, 100000, 50);
462 const base::TimeTicks start
= base::TimeTicks::Now();
463 if (!last_write_to_disk_
.is_null()) {
464 if (app_on_background_
) {
465 SIMPLE_CACHE_UMA(MEDIUM_TIMES
,
466 "IndexWriteInterval.Background", cache_type_
,
467 start
- last_write_to_disk_
);
469 SIMPLE_CACHE_UMA(MEDIUM_TIMES
,
470 "IndexWriteInterval.Foreground", cache_type_
,
471 start
- last_write_to_disk_
);
474 last_write_to_disk_
= start
;
476 index_file_
->WriteToDisk(entries_set_
, cache_size_
,
477 start
, app_on_background_
, base::Closure());
480 } // namespace disk_cache