1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_backend_impl.h"
12 #include <sys/resource.h>
15 #include "base/bind.h"
16 #include "base/callback.h"
17 #include "base/file_util.h"
18 #include "base/location.h"
19 #include "base/metrics/field_trial.h"
20 #include "base/metrics/histogram.h"
21 #include "base/metrics/sparse_histogram.h"
22 #include "base/single_thread_task_runner.h"
23 #include "base/sys_info.h"
24 #include "base/task_runner_util.h"
25 #include "base/thread_task_runner_handle.h"
26 #include "base/threading/sequenced_worker_pool.h"
27 #include "base/time/time.h"
28 #include "net/base/net_errors.h"
29 #include "net/disk_cache/cache_util.h"
30 #include "net/disk_cache/simple/simple_entry_format.h"
31 #include "net/disk_cache/simple/simple_entry_impl.h"
32 #include "net/disk_cache/simple/simple_histogram_macros.h"
33 #include "net/disk_cache/simple/simple_index.h"
34 #include "net/disk_cache/simple/simple_index_file.h"
35 #include "net/disk_cache/simple/simple_synchronous_entry.h"
36 #include "net/disk_cache/simple/simple_util.h"
37 #include "net/disk_cache/simple/simple_version_upgrade.h"
42 using base::SequencedWorkerPool
;
44 using base::DirectoryExists
;
45 using base::CreateDirectory
;
47 namespace disk_cache
{
51 // Maximum number of concurrent worker pool threads, which also is the limit
52 // on concurrent IO (as we use one thread per IO request).
53 const int kDefaultMaxWorkerThreads
= 50;
55 const char kThreadNamePrefix
[] = "SimpleCache";
57 // Maximum fraction of the cache that one entry can consume.
58 const int kMaxFileRatio
= 8;
60 // A global sequenced worker pool to use for launching all tasks.
61 SequencedWorkerPool
* g_sequenced_worker_pool
= NULL
;
63 void MaybeCreateSequencedWorkerPool() {
64 if (!g_sequenced_worker_pool
) {
65 int max_worker_threads
= kDefaultMaxWorkerThreads
;
67 const std::string thread_count_field_trial
=
68 base::FieldTrialList::FindFullName("SimpleCacheMaxThreads");
69 if (!thread_count_field_trial
.empty()) {
71 std::max(1, std::atoi(thread_count_field_trial
.c_str()));
74 g_sequenced_worker_pool
= new SequencedWorkerPool(max_worker_threads
,
76 g_sequenced_worker_pool
->AddRef(); // Leak it.
80 bool g_fd_limit_histogram_has_been_populated
= false;
82 void MaybeHistogramFdLimit(net::CacheType cache_type
) {
83 if (g_fd_limit_histogram_has_been_populated
)
86 // Used in histograms; add new entries at end.
88 FD_LIMIT_STATUS_UNSUPPORTED
= 0,
89 FD_LIMIT_STATUS_FAILED
= 1,
90 FD_LIMIT_STATUS_SUCCEEDED
= 2,
91 FD_LIMIT_STATUS_MAX
= 3
93 FdLimitStatus fd_limit_status
= FD_LIMIT_STATUS_UNSUPPORTED
;
94 int soft_fd_limit
= 0;
95 int hard_fd_limit
= 0;
99 if (!getrlimit(RLIMIT_NOFILE
, &nofile
)) {
100 soft_fd_limit
= nofile
.rlim_cur
;
101 hard_fd_limit
= nofile
.rlim_max
;
102 fd_limit_status
= FD_LIMIT_STATUS_SUCCEEDED
;
104 fd_limit_status
= FD_LIMIT_STATUS_FAILED
;
108 SIMPLE_CACHE_UMA(ENUMERATION
,
109 "FileDescriptorLimitStatus", cache_type
,
110 fd_limit_status
, FD_LIMIT_STATUS_MAX
);
111 if (fd_limit_status
== FD_LIMIT_STATUS_SUCCEEDED
) {
112 SIMPLE_CACHE_UMA(SPARSE_SLOWLY
,
113 "FileDescriptorLimitSoft", cache_type
, soft_fd_limit
);
114 SIMPLE_CACHE_UMA(SPARSE_SLOWLY
,
115 "FileDescriptorLimitHard", cache_type
, hard_fd_limit
);
118 g_fd_limit_histogram_has_been_populated
= true;
121 // Detects if the files in the cache directory match the current disk cache
122 // backend type and version. If the directory contains no cache, occupies it
123 // with the fresh structure.
124 bool FileStructureConsistent(const base::FilePath
& path
) {
125 if (!base::PathExists(path
) && !base::CreateDirectory(path
)) {
126 LOG(ERROR
) << "Failed to create directory: " << path
.LossyDisplayName();
129 return disk_cache::UpgradeSimpleCacheOnDisk(path
);
132 // A context used by a BarrierCompletionCallback to track state.
133 struct BarrierContext
{
134 BarrierContext(int expected
)
135 : expected(expected
),
144 void BarrierCompletionCallbackImpl(
145 BarrierContext
* context
,
146 const net::CompletionCallback
& final_callback
,
148 DCHECK_GT(context
->expected
, context
->count
);
149 if (context
->had_error
)
151 if (result
!= net::OK
) {
152 context
->had_error
= true;
153 final_callback
.Run(result
);
157 if (context
->count
== context
->expected
)
158 final_callback
.Run(net::OK
);
161 // A barrier completion callback is a net::CompletionCallback that waits for
162 // |count| successful results before invoking |final_callback|. In the case of
163 // an error, the first error is passed to |final_callback| and all others
165 net::CompletionCallback
MakeBarrierCompletionCallback(
167 const net::CompletionCallback
& final_callback
) {
168 BarrierContext
* context
= new BarrierContext(count
);
169 return base::Bind(&BarrierCompletionCallbackImpl
,
170 base::Owned(context
), final_callback
);
173 // A short bindable thunk that ensures a completion callback is always called
174 // after running an operation asynchronously.
175 void RunOperationAndCallback(
176 const Callback
<int(const net::CompletionCallback
&)>& operation
,
177 const net::CompletionCallback
& operation_callback
) {
178 const int operation_result
= operation
.Run(operation_callback
);
179 if (operation_result
!= net::ERR_IO_PENDING
)
180 operation_callback
.Run(operation_result
);
183 void RecordIndexLoad(net::CacheType cache_type
,
184 base::TimeTicks constructed_since
,
186 const base::TimeDelta creation_to_index
= base::TimeTicks::Now() -
188 if (result
== net::OK
) {
189 SIMPLE_CACHE_UMA(TIMES
, "CreationToIndex", cache_type
, creation_to_index
);
191 SIMPLE_CACHE_UMA(TIMES
,
192 "CreationToIndexFail", cache_type
, creation_to_index
);
198 class SimpleBackendImpl::ActiveEntryProxy
199 : public SimpleEntryImpl::ActiveEntryProxy
{
201 virtual ~ActiveEntryProxy() {
203 DCHECK_EQ(1U, backend_
->active_entries_
.count(entry_hash_
));
204 backend_
->active_entries_
.erase(entry_hash_
);
208 static scoped_ptr
<SimpleEntryImpl::ActiveEntryProxy
> Create(
210 SimpleBackendImpl
* backend
) {
211 scoped_ptr
<SimpleEntryImpl::ActiveEntryProxy
>
212 proxy(new ActiveEntryProxy(entry_hash
, backend
));
217 ActiveEntryProxy(uint64 entry_hash
,
218 SimpleBackendImpl
* backend
)
219 : entry_hash_(entry_hash
),
220 backend_(backend
->AsWeakPtr()) {}
223 base::WeakPtr
<SimpleBackendImpl
> backend_
;
226 SimpleBackendImpl::SimpleBackendImpl(
227 const FilePath
& path
,
229 net::CacheType cache_type
,
230 const scoped_refptr
<base::SingleThreadTaskRunner
>& cache_thread
,
231 net::NetLog
* net_log
)
233 cache_type_(cache_type
),
234 cache_thread_(cache_thread
),
235 orig_max_size_(max_bytes
),
236 entry_operations_mode_(cache_type
== net::DISK_CACHE
?
237 SimpleEntryImpl::OPTIMISTIC_OPERATIONS
:
238 SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS
),
240 MaybeHistogramFdLimit(cache_type_
);
243 SimpleBackendImpl::~SimpleBackendImpl() {
244 index_
->WriteToDisk();
247 int SimpleBackendImpl::Init(const CompletionCallback
& completion_callback
) {
248 MaybeCreateSequencedWorkerPool();
250 worker_pool_
= g_sequenced_worker_pool
->GetTaskRunnerWithShutdownBehavior(
251 SequencedWorkerPool::CONTINUE_ON_SHUTDOWN
);
253 index_
.reset(new SimpleIndex(
254 base::ThreadTaskRunnerHandle::Get(),
257 make_scoped_ptr(new SimpleIndexFile(
258 cache_thread_
, worker_pool_
.get(), cache_type_
, path_
))));
259 index_
->ExecuteWhenReady(
260 base::Bind(&RecordIndexLoad
, cache_type_
, base::TimeTicks::Now()));
262 PostTaskAndReplyWithResult(
266 &SimpleBackendImpl::InitCacheStructureOnDisk
, path_
, orig_max_size_
),
267 base::Bind(&SimpleBackendImpl::InitializeIndex
,
269 completion_callback
));
270 return net::ERR_IO_PENDING
;
273 bool SimpleBackendImpl::SetMaxSize(int max_bytes
) {
274 orig_max_size_
= max_bytes
;
275 return index_
->SetMaxSize(max_bytes
);
278 int SimpleBackendImpl::GetMaxFileSize() const {
279 return index_
->max_size() / kMaxFileRatio
;
282 void SimpleBackendImpl::OnDoomStart(uint64 entry_hash
) {
283 // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
284 CHECK_EQ(0u, entries_pending_doom_
.count(entry_hash
));
285 entries_pending_doom_
.insert(
286 std::make_pair(entry_hash
, std::vector
<Closure
>()));
289 void SimpleBackendImpl::OnDoomComplete(uint64 entry_hash
) {
290 // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
291 CHECK_EQ(1u, entries_pending_doom_
.count(entry_hash
));
292 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
293 entries_pending_doom_
.find(entry_hash
);
294 std::vector
<Closure
> to_run_closures
;
295 to_run_closures
.swap(it
->second
);
296 entries_pending_doom_
.erase(it
);
298 std::for_each(to_run_closures
.begin(), to_run_closures
.end(),
299 std::mem_fun_ref(&Closure::Run
));
302 void SimpleBackendImpl::DoomEntries(std::vector
<uint64
>* entry_hashes
,
303 const net::CompletionCallback
& callback
) {
304 scoped_ptr
<std::vector
<uint64
> >
305 mass_doom_entry_hashes(new std::vector
<uint64
>());
306 mass_doom_entry_hashes
->swap(*entry_hashes
);
308 std::vector
<uint64
> to_doom_individually_hashes
;
310 // For each of the entry hashes, there are two cases:
311 // 1. The entry is either open or pending doom, and so it should be doomed
312 // individually to avoid flakes.
313 // 2. The entry is not in use at all, so we can call
314 // SimpleSynchronousEntry::DoomEntrySet and delete the files en masse.
315 for (int i
= mass_doom_entry_hashes
->size() - 1; i
>= 0; --i
) {
316 const uint64 entry_hash
= (*mass_doom_entry_hashes
)[i
];
317 // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
318 CHECK(active_entries_
.count(entry_hash
) == 0 ||
319 entries_pending_doom_
.count(entry_hash
) == 0)
320 << "The entry 0x" << std::hex
<< entry_hash
321 << " is both active and pending doom.";
322 if (!active_entries_
.count(entry_hash
) &&
323 !entries_pending_doom_
.count(entry_hash
)) {
327 to_doom_individually_hashes
.push_back(entry_hash
);
329 (*mass_doom_entry_hashes
)[i
] = mass_doom_entry_hashes
->back();
330 mass_doom_entry_hashes
->resize(mass_doom_entry_hashes
->size() - 1);
333 net::CompletionCallback barrier_callback
=
334 MakeBarrierCompletionCallback(to_doom_individually_hashes
.size() + 1,
336 for (std::vector
<uint64
>::const_iterator
337 it
= to_doom_individually_hashes
.begin(),
338 end
= to_doom_individually_hashes
.end(); it
!= end
; ++it
) {
339 const int doom_result
= DoomEntryFromHash(*it
, barrier_callback
);
340 // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
341 CHECK_EQ(net::ERR_IO_PENDING
, doom_result
);
345 for (std::vector
<uint64
>::const_iterator it
= mass_doom_entry_hashes
->begin(),
346 end
= mass_doom_entry_hashes
->end();
352 // Taking this pointer here avoids undefined behaviour from calling
353 // base::Passed before mass_doom_entry_hashes.get().
354 std::vector
<uint64
>* mass_doom_entry_hashes_ptr
=
355 mass_doom_entry_hashes
.get();
356 PostTaskAndReplyWithResult(worker_pool_
.get(),
358 base::Bind(&SimpleSynchronousEntry::DoomEntrySet
,
359 mass_doom_entry_hashes_ptr
,
361 base::Bind(&SimpleBackendImpl::DoomEntriesComplete
,
363 base::Passed(&mass_doom_entry_hashes
),
367 net::CacheType
SimpleBackendImpl::GetCacheType() const {
368 return net::DISK_CACHE
;
371 int32
SimpleBackendImpl::GetEntryCount() const {
372 // TODO(pasko): Use directory file count when index is not ready.
373 return index_
->GetEntryCount();
376 int SimpleBackendImpl::OpenEntry(const std::string
& key
,
378 const CompletionCallback
& callback
) {
379 const uint64 entry_hash
= simple_util::GetEntryHashKey(key
);
381 // TODO(gavinp): Factor out this (not quite completely) repetitive code
382 // block from OpenEntry/CreateEntry/DoomEntry.
383 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
384 entries_pending_doom_
.find(entry_hash
);
385 if (it
!= entries_pending_doom_
.end()) {
386 Callback
<int(const net::CompletionCallback
&)> operation
=
387 base::Bind(&SimpleBackendImpl::OpenEntry
,
388 base::Unretained(this), key
, entry
);
389 it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
390 operation
, callback
));
391 return net::ERR_IO_PENDING
;
393 scoped_refptr
<SimpleEntryImpl
> simple_entry
=
394 CreateOrFindActiveEntry(entry_hash
, key
);
395 CompletionCallback backend_callback
=
396 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromKey
,
402 return simple_entry
->OpenEntry(entry
, backend_callback
);
405 int SimpleBackendImpl::CreateEntry(const std::string
& key
,
407 const CompletionCallback
& callback
) {
408 DCHECK_LT(0u, key
.size());
409 const uint64 entry_hash
= simple_util::GetEntryHashKey(key
);
411 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
412 entries_pending_doom_
.find(entry_hash
);
413 if (it
!= entries_pending_doom_
.end()) {
414 Callback
<int(const net::CompletionCallback
&)> operation
=
415 base::Bind(&SimpleBackendImpl::CreateEntry
,
416 base::Unretained(this), key
, entry
);
417 it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
418 operation
, callback
));
419 return net::ERR_IO_PENDING
;
421 scoped_refptr
<SimpleEntryImpl
> simple_entry
=
422 CreateOrFindActiveEntry(entry_hash
, key
);
423 return simple_entry
->CreateEntry(entry
, callback
);
426 int SimpleBackendImpl::DoomEntry(const std::string
& key
,
427 const net::CompletionCallback
& callback
) {
428 const uint64 entry_hash
= simple_util::GetEntryHashKey(key
);
430 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
431 entries_pending_doom_
.find(entry_hash
);
432 if (it
!= entries_pending_doom_
.end()) {
433 Callback
<int(const net::CompletionCallback
&)> operation
=
434 base::Bind(&SimpleBackendImpl::DoomEntry
, base::Unretained(this), key
);
435 it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
436 operation
, callback
));
437 return net::ERR_IO_PENDING
;
439 scoped_refptr
<SimpleEntryImpl
> simple_entry
=
440 CreateOrFindActiveEntry(entry_hash
, key
);
441 return simple_entry
->DoomEntry(callback
);
444 int SimpleBackendImpl::DoomAllEntries(const CompletionCallback
& callback
) {
445 return DoomEntriesBetween(Time(), Time(), callback
);
448 void SimpleBackendImpl::IndexReadyForDoom(Time initial_time
,
450 const CompletionCallback
& callback
,
452 if (result
!= net::OK
) {
453 callback
.Run(result
);
456 scoped_ptr
<std::vector
<uint64
> > removed_key_hashes(
457 index_
->GetEntriesBetween(initial_time
, end_time
).release());
458 DoomEntries(removed_key_hashes
.get(), callback
);
461 int SimpleBackendImpl::DoomEntriesBetween(
462 const Time initial_time
,
464 const CompletionCallback
& callback
) {
465 return index_
->ExecuteWhenReady(
466 base::Bind(&SimpleBackendImpl::IndexReadyForDoom
, AsWeakPtr(),
467 initial_time
, end_time
, callback
));
470 int SimpleBackendImpl::DoomEntriesSince(
471 const Time initial_time
,
472 const CompletionCallback
& callback
) {
473 return DoomEntriesBetween(initial_time
, Time(), callback
);
476 int SimpleBackendImpl::OpenNextEntry(void** iter
,
478 const CompletionCallback
& callback
) {
479 CompletionCallback get_next_entry
=
480 base::Bind(&SimpleBackendImpl::GetNextEntryInIterator
, AsWeakPtr(), iter
,
481 next_entry
, callback
);
482 return index_
->ExecuteWhenReady(get_next_entry
);
485 void SimpleBackendImpl::EndEnumeration(void** iter
) {
486 SimpleIndex::HashList
* entry_list
=
487 static_cast<SimpleIndex::HashList
*>(*iter
);
492 void SimpleBackendImpl::GetStats(
493 std::vector
<std::pair
<std::string
, std::string
> >* stats
) {
494 std::pair
<std::string
, std::string
> item
;
495 item
.first
= "Cache type";
496 item
.second
= "Simple Cache";
497 stats
->push_back(item
);
500 void SimpleBackendImpl::OnExternalCacheHit(const std::string
& key
) {
501 index_
->UseIfExists(simple_util::GetEntryHashKey(key
));
504 void SimpleBackendImpl::InitializeIndex(const CompletionCallback
& callback
,
505 const DiskStatResult
& result
) {
506 if (result
.net_error
== net::OK
) {
507 index_
->SetMaxSize(result
.max_size
);
508 index_
->Initialize(result
.cache_dir_mtime
);
510 callback
.Run(result
.net_error
);
513 SimpleBackendImpl::DiskStatResult
SimpleBackendImpl::InitCacheStructureOnDisk(
514 const base::FilePath
& path
,
515 uint64 suggested_max_size
) {
516 DiskStatResult result
;
517 result
.max_size
= suggested_max_size
;
518 result
.net_error
= net::OK
;
519 if (!FileStructureConsistent(path
)) {
520 LOG(ERROR
) << "Simple Cache Backend: wrong file structure on disk: "
521 << path
.LossyDisplayName();
522 result
.net_error
= net::ERR_FAILED
;
525 disk_cache::simple_util::GetMTime(path
, &result
.cache_dir_mtime
);
526 DCHECK(mtime_result
);
527 if (!result
.max_size
) {
528 int64 available
= base::SysInfo::AmountOfFreeDiskSpace(path
);
529 result
.max_size
= disk_cache::PreferredCacheSize(available
);
531 DCHECK(result
.max_size
);
536 scoped_refptr
<SimpleEntryImpl
> SimpleBackendImpl::CreateOrFindActiveEntry(
537 const uint64 entry_hash
,
538 const std::string
& key
) {
539 DCHECK_EQ(entry_hash
, simple_util::GetEntryHashKey(key
));
540 std::pair
<EntryMap::iterator
, bool> insert_result
=
541 active_entries_
.insert(EntryMap::value_type(entry_hash
, NULL
));
542 EntryMap::iterator
& it
= insert_result
.first
;
543 const bool did_insert
= insert_result
.second
;
545 SimpleEntryImpl
* entry
= it
->second
=
546 new SimpleEntryImpl(cache_type_
, path_
, entry_hash
,
547 entry_operations_mode_
,this, net_log_
);
549 entry
->SetActiveEntryProxy(ActiveEntryProxy::Create(entry_hash
, this));
552 // It's possible, but unlikely, that we have an entry hash collision with a
553 // currently active entry.
554 if (key
!= it
->second
->key()) {
556 DCHECK_EQ(0U, active_entries_
.count(entry_hash
));
557 return CreateOrFindActiveEntry(entry_hash
, key
);
559 return make_scoped_refptr(it
->second
);
562 int SimpleBackendImpl::OpenEntryFromHash(uint64 entry_hash
,
564 const CompletionCallback
& callback
) {
565 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
566 entries_pending_doom_
.find(entry_hash
);
567 if (it
!= entries_pending_doom_
.end()) {
568 Callback
<int(const net::CompletionCallback
&)> operation
=
569 base::Bind(&SimpleBackendImpl::OpenEntryFromHash
,
570 base::Unretained(this), entry_hash
, entry
);
571 it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
572 operation
, callback
));
573 return net::ERR_IO_PENDING
;
576 EntryMap::iterator has_active
= active_entries_
.find(entry_hash
);
577 if (has_active
!= active_entries_
.end()) {
578 return OpenEntry(has_active
->second
->key(), entry
, callback
);
581 scoped_refptr
<SimpleEntryImpl
> simple_entry
= new SimpleEntryImpl(
582 cache_type_
, path_
, entry_hash
, entry_operations_mode_
, this, net_log_
);
583 CompletionCallback backend_callback
=
584 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromHash
,
585 AsWeakPtr(), entry_hash
, entry
, simple_entry
, callback
);
586 return simple_entry
->OpenEntry(entry
, backend_callback
);
589 int SimpleBackendImpl::DoomEntryFromHash(uint64 entry_hash
,
590 const CompletionCallback
& callback
) {
591 Entry
** entry
= new Entry
*();
592 scoped_ptr
<Entry
*> scoped_entry(entry
);
594 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator pending_it
=
595 entries_pending_doom_
.find(entry_hash
);
596 if (pending_it
!= entries_pending_doom_
.end()) {
597 Callback
<int(const net::CompletionCallback
&)> operation
=
598 base::Bind(&SimpleBackendImpl::DoomEntryFromHash
,
599 base::Unretained(this), entry_hash
);
600 pending_it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
601 operation
, callback
));
602 return net::ERR_IO_PENDING
;
605 EntryMap::iterator active_it
= active_entries_
.find(entry_hash
);
606 if (active_it
!= active_entries_
.end())
607 return active_it
->second
->DoomEntry(callback
);
609 // There's no pending dooms, nor any open entry. We can make a trivial
610 // call to DoomEntries() to delete this entry.
611 std::vector
<uint64
> entry_hash_vector
;
612 entry_hash_vector
.push_back(entry_hash
);
613 DoomEntries(&entry_hash_vector
, callback
);
614 return net::ERR_IO_PENDING
;
617 void SimpleBackendImpl::GetNextEntryInIterator(
620 const CompletionCallback
& callback
,
622 if (error_code
!= net::OK
) {
623 callback
.Run(error_code
);
627 *iter
= index()->GetAllHashes().release();
629 SimpleIndex::HashList
* entry_list
=
630 static_cast<SimpleIndex::HashList
*>(*iter
);
631 while (entry_list
->size() > 0) {
632 uint64 entry_hash
= entry_list
->back();
633 entry_list
->pop_back();
634 if (index()->Has(entry_hash
)) {
636 CompletionCallback continue_iteration
= base::Bind(
637 &SimpleBackendImpl::CheckIterationReturnValue
,
642 int error_code_open
= OpenEntryFromHash(entry_hash
,
645 if (error_code_open
== net::ERR_IO_PENDING
)
647 if (error_code_open
!= net::ERR_FAILED
) {
648 callback
.Run(error_code_open
);
653 callback
.Run(net::ERR_FAILED
);
656 void SimpleBackendImpl::OnEntryOpenedFromHash(
659 scoped_refptr
<SimpleEntryImpl
> simple_entry
,
660 const CompletionCallback
& callback
,
662 if (error_code
!= net::OK
) {
663 callback
.Run(error_code
);
667 std::pair
<EntryMap::iterator
, bool> insert_result
=
668 active_entries_
.insert(EntryMap::value_type(hash
, simple_entry
));
669 EntryMap::iterator
& it
= insert_result
.first
;
670 const bool did_insert
= insert_result
.second
;
672 // There was no active entry corresponding to this hash. We've already put
673 // the entry opened from hash in the |active_entries_|. We now provide the
674 // proxy object to the entry.
675 it
->second
->SetActiveEntryProxy(ActiveEntryProxy::Create(hash
, this));
676 callback
.Run(net::OK
);
678 // The entry was made active while we waiting for the open from hash to
679 // finish. The entry created from hash needs to be closed, and the one
680 // in |active_entries_| can be returned to the caller.
681 simple_entry
->Close();
682 it
->second
->OpenEntry(entry
, callback
);
686 void SimpleBackendImpl::OnEntryOpenedFromKey(
687 const std::string key
,
689 scoped_refptr
<SimpleEntryImpl
> simple_entry
,
690 const CompletionCallback
& callback
,
692 int final_code
= error_code
;
693 if (final_code
== net::OK
) {
694 bool key_matches
= key
.compare(simple_entry
->key()) == 0;
696 // TODO(clamy): Add a unit test to check this code path.
697 DLOG(WARNING
) << "Key mismatch on open.";
698 simple_entry
->Doom();
699 simple_entry
->Close();
700 final_code
= net::ERR_FAILED
;
702 DCHECK_EQ(simple_entry
->entry_hash(), simple_util::GetEntryHashKey(key
));
704 SIMPLE_CACHE_UMA(BOOLEAN
, "KeyMatchedOnOpen", cache_type_
, key_matches
);
706 callback
.Run(final_code
);
709 void SimpleBackendImpl::CheckIterationReturnValue(
712 const CompletionCallback
& callback
,
714 if (error_code
== net::ERR_FAILED
) {
715 OpenNextEntry(iter
, entry
, callback
);
718 callback
.Run(error_code
);
721 void SimpleBackendImpl::DoomEntriesComplete(
722 scoped_ptr
<std::vector
<uint64
> > entry_hashes
,
723 const net::CompletionCallback
& callback
,
726 entry_hashes
->begin(), entry_hashes
->end(),
727 std::bind1st(std::mem_fun(&SimpleBackendImpl::OnDoomComplete
),
729 callback
.Run(result
);
732 void SimpleBackendImpl::FlushWorkerPoolForTesting() {
733 if (g_sequenced_worker_pool
)
734 g_sequenced_worker_pool
->FlushForTesting();
737 } // namespace disk_cache