1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_backend_impl.h"
12 #include <sys/resource.h>
15 #include "base/bind.h"
16 #include "base/callback.h"
17 #include "base/files/file_util.h"
18 #include "base/location.h"
19 #include "base/metrics/field_trial.h"
20 #include "base/metrics/histogram.h"
21 #include "base/metrics/sparse_histogram.h"
22 #include "base/single_thread_task_runner.h"
23 #include "base/sys_info.h"
24 #include "base/task_runner_util.h"
25 #include "base/thread_task_runner_handle.h"
26 #include "base/threading/sequenced_worker_pool.h"
27 #include "base/time/time.h"
28 #include "net/base/net_errors.h"
29 #include "net/disk_cache/cache_util.h"
30 #include "net/disk_cache/simple/simple_entry_format.h"
31 #include "net/disk_cache/simple/simple_entry_impl.h"
32 #include "net/disk_cache/simple/simple_histogram_macros.h"
33 #include "net/disk_cache/simple/simple_index.h"
34 #include "net/disk_cache/simple/simple_index_file.h"
35 #include "net/disk_cache/simple/simple_synchronous_entry.h"
36 #include "net/disk_cache/simple/simple_util.h"
37 #include "net/disk_cache/simple/simple_version_upgrade.h"
42 using base::SequencedWorkerPool
;
44 using base::DirectoryExists
;
45 using base::CreateDirectory
;
47 namespace disk_cache
{
51 // Maximum number of concurrent worker pool threads, which also is the limit
52 // on concurrent IO (as we use one thread per IO request).
53 const int kDefaultMaxWorkerThreads
= 50;
55 const char kThreadNamePrefix
[] = "SimpleCache";
57 // Maximum fraction of the cache that one entry can consume.
58 const int kMaxFileRatio
= 8;
60 // A global sequenced worker pool to use for launching all tasks.
61 SequencedWorkerPool
* g_sequenced_worker_pool
= NULL
;
63 void MaybeCreateSequencedWorkerPool() {
64 if (!g_sequenced_worker_pool
) {
65 int max_worker_threads
= kDefaultMaxWorkerThreads
;
67 const std::string thread_count_field_trial
=
68 base::FieldTrialList::FindFullName("SimpleCacheMaxThreads");
69 if (!thread_count_field_trial
.empty()) {
71 std::max(1, std::atoi(thread_count_field_trial
.c_str()));
74 g_sequenced_worker_pool
= new SequencedWorkerPool(max_worker_threads
,
76 g_sequenced_worker_pool
->AddRef(); // Leak it.
80 bool g_fd_limit_histogram_has_been_populated
= false;
82 void MaybeHistogramFdLimit(net::CacheType cache_type
) {
83 if (g_fd_limit_histogram_has_been_populated
)
86 // Used in histograms; add new entries at end.
88 FD_LIMIT_STATUS_UNSUPPORTED
= 0,
89 FD_LIMIT_STATUS_FAILED
= 1,
90 FD_LIMIT_STATUS_SUCCEEDED
= 2,
91 FD_LIMIT_STATUS_MAX
= 3
93 FdLimitStatus fd_limit_status
= FD_LIMIT_STATUS_UNSUPPORTED
;
94 int soft_fd_limit
= 0;
95 int hard_fd_limit
= 0;
99 if (!getrlimit(RLIMIT_NOFILE
, &nofile
)) {
100 soft_fd_limit
= nofile
.rlim_cur
;
101 hard_fd_limit
= nofile
.rlim_max
;
102 fd_limit_status
= FD_LIMIT_STATUS_SUCCEEDED
;
104 fd_limit_status
= FD_LIMIT_STATUS_FAILED
;
108 SIMPLE_CACHE_UMA(ENUMERATION
,
109 "FileDescriptorLimitStatus", cache_type
,
110 fd_limit_status
, FD_LIMIT_STATUS_MAX
);
111 if (fd_limit_status
== FD_LIMIT_STATUS_SUCCEEDED
) {
112 SIMPLE_CACHE_UMA(SPARSE_SLOWLY
,
113 "FileDescriptorLimitSoft", cache_type
, soft_fd_limit
);
114 SIMPLE_CACHE_UMA(SPARSE_SLOWLY
,
115 "FileDescriptorLimitHard", cache_type
, hard_fd_limit
);
118 g_fd_limit_histogram_has_been_populated
= true;
121 // Detects if the files in the cache directory match the current disk cache
122 // backend type and version. If the directory contains no cache, occupies it
123 // with the fresh structure.
124 bool FileStructureConsistent(const base::FilePath
& path
) {
125 if (!base::PathExists(path
) && !base::CreateDirectory(path
)) {
126 LOG(ERROR
) << "Failed to create directory: " << path
.LossyDisplayName();
129 return disk_cache::UpgradeSimpleCacheOnDisk(path
);
132 // A context used by a BarrierCompletionCallback to track state.
133 struct BarrierContext
{
134 BarrierContext(int expected
)
135 : expected(expected
),
144 void BarrierCompletionCallbackImpl(
145 BarrierContext
* context
,
146 const net::CompletionCallback
& final_callback
,
148 DCHECK_GT(context
->expected
, context
->count
);
149 if (context
->had_error
)
151 if (result
!= net::OK
) {
152 context
->had_error
= true;
153 final_callback
.Run(result
);
157 if (context
->count
== context
->expected
)
158 final_callback
.Run(net::OK
);
161 // A barrier completion callback is a net::CompletionCallback that waits for
162 // |count| successful results before invoking |final_callback|. In the case of
163 // an error, the first error is passed to |final_callback| and all others
165 net::CompletionCallback
MakeBarrierCompletionCallback(
167 const net::CompletionCallback
& final_callback
) {
168 BarrierContext
* context
= new BarrierContext(count
);
169 return base::Bind(&BarrierCompletionCallbackImpl
,
170 base::Owned(context
), final_callback
);
173 // A short bindable thunk that ensures a completion callback is always called
174 // after running an operation asynchronously.
175 void RunOperationAndCallback(
176 const Callback
<int(const net::CompletionCallback
&)>& operation
,
177 const net::CompletionCallback
& operation_callback
) {
178 const int operation_result
= operation
.Run(operation_callback
);
179 if (operation_result
!= net::ERR_IO_PENDING
)
180 operation_callback
.Run(operation_result
);
183 void RecordIndexLoad(net::CacheType cache_type
,
184 base::TimeTicks constructed_since
,
186 const base::TimeDelta creation_to_index
= base::TimeTicks::Now() -
188 if (result
== net::OK
) {
189 SIMPLE_CACHE_UMA(TIMES
, "CreationToIndex", cache_type
, creation_to_index
);
191 SIMPLE_CACHE_UMA(TIMES
,
192 "CreationToIndexFail", cache_type
, creation_to_index
);
198 class SimpleBackendImpl::ActiveEntryProxy
199 : public SimpleEntryImpl::ActiveEntryProxy
{
201 ~ActiveEntryProxy() override
{
203 DCHECK_EQ(1U, backend_
->active_entries_
.count(entry_hash_
));
204 backend_
->active_entries_
.erase(entry_hash_
);
208 static scoped_ptr
<SimpleEntryImpl::ActiveEntryProxy
> Create(
210 SimpleBackendImpl
* backend
) {
211 scoped_ptr
<SimpleEntryImpl::ActiveEntryProxy
>
212 proxy(new ActiveEntryProxy(entry_hash
, backend
));
217 ActiveEntryProxy(uint64 entry_hash
,
218 SimpleBackendImpl
* backend
)
219 : entry_hash_(entry_hash
),
220 backend_(backend
->AsWeakPtr()) {}
223 base::WeakPtr
<SimpleBackendImpl
> backend_
;
226 SimpleBackendImpl::SimpleBackendImpl(
227 const FilePath
& path
,
229 net::CacheType cache_type
,
230 const scoped_refptr
<base::SingleThreadTaskRunner
>& cache_thread
,
231 net::NetLog
* net_log
)
233 cache_type_(cache_type
),
234 cache_thread_(cache_thread
),
235 orig_max_size_(max_bytes
),
236 entry_operations_mode_(cache_type
== net::DISK_CACHE
?
237 SimpleEntryImpl::OPTIMISTIC_OPERATIONS
:
238 SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS
),
240 MaybeHistogramFdLimit(cache_type_
);
243 SimpleBackendImpl::~SimpleBackendImpl() {
244 index_
->WriteToDisk();
247 int SimpleBackendImpl::Init(const CompletionCallback
& completion_callback
) {
248 MaybeCreateSequencedWorkerPool();
250 worker_pool_
= g_sequenced_worker_pool
->GetTaskRunnerWithShutdownBehavior(
251 SequencedWorkerPool::CONTINUE_ON_SHUTDOWN
);
253 index_
.reset(new SimpleIndex(
254 base::ThreadTaskRunnerHandle::Get(),
257 make_scoped_ptr(new SimpleIndexFile(
258 cache_thread_
, worker_pool_
.get(), cache_type_
, path_
))));
259 index_
->ExecuteWhenReady(
260 base::Bind(&RecordIndexLoad
, cache_type_
, base::TimeTicks::Now()));
262 PostTaskAndReplyWithResult(
266 &SimpleBackendImpl::InitCacheStructureOnDisk
, path_
, orig_max_size_
),
267 base::Bind(&SimpleBackendImpl::InitializeIndex
,
269 completion_callback
));
270 return net::ERR_IO_PENDING
;
273 bool SimpleBackendImpl::SetMaxSize(int max_bytes
) {
274 orig_max_size_
= max_bytes
;
275 return index_
->SetMaxSize(max_bytes
);
278 int SimpleBackendImpl::GetMaxFileSize() const {
279 return index_
->max_size() / kMaxFileRatio
;
282 void SimpleBackendImpl::OnDoomStart(uint64 entry_hash
) {
283 DCHECK_EQ(0u, entries_pending_doom_
.count(entry_hash
));
284 entries_pending_doom_
.insert(
285 std::make_pair(entry_hash
, std::vector
<Closure
>()));
288 void SimpleBackendImpl::OnDoomComplete(uint64 entry_hash
) {
289 DCHECK_EQ(1u, entries_pending_doom_
.count(entry_hash
));
290 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
291 entries_pending_doom_
.find(entry_hash
);
292 std::vector
<Closure
> to_run_closures
;
293 to_run_closures
.swap(it
->second
);
294 entries_pending_doom_
.erase(it
);
296 std::for_each(to_run_closures
.begin(), to_run_closures
.end(),
297 std::mem_fun_ref(&Closure::Run
));
300 void SimpleBackendImpl::DoomEntries(std::vector
<uint64
>* entry_hashes
,
301 const net::CompletionCallback
& callback
) {
302 scoped_ptr
<std::vector
<uint64
> >
303 mass_doom_entry_hashes(new std::vector
<uint64
>());
304 mass_doom_entry_hashes
->swap(*entry_hashes
);
306 std::vector
<uint64
> to_doom_individually_hashes
;
308 // For each of the entry hashes, there are two cases:
309 // 1. The entry is either open or pending doom, and so it should be doomed
310 // individually to avoid flakes.
311 // 2. The entry is not in use at all, so we can call
312 // SimpleSynchronousEntry::DoomEntrySet and delete the files en masse.
313 for (int i
= mass_doom_entry_hashes
->size() - 1; i
>= 0; --i
) {
314 const uint64 entry_hash
= (*mass_doom_entry_hashes
)[i
];
315 DCHECK(active_entries_
.count(entry_hash
) == 0 ||
316 entries_pending_doom_
.count(entry_hash
) == 0);
317 if (!active_entries_
.count(entry_hash
) &&
318 !entries_pending_doom_
.count(entry_hash
)) {
322 to_doom_individually_hashes
.push_back(entry_hash
);
324 (*mass_doom_entry_hashes
)[i
] = mass_doom_entry_hashes
->back();
325 mass_doom_entry_hashes
->resize(mass_doom_entry_hashes
->size() - 1);
328 net::CompletionCallback barrier_callback
=
329 MakeBarrierCompletionCallback(to_doom_individually_hashes
.size() + 1,
331 for (std::vector
<uint64
>::const_iterator
332 it
= to_doom_individually_hashes
.begin(),
333 end
= to_doom_individually_hashes
.end(); it
!= end
; ++it
) {
334 const int doom_result
= DoomEntryFromHash(*it
, barrier_callback
);
335 DCHECK_EQ(net::ERR_IO_PENDING
, doom_result
);
339 for (std::vector
<uint64
>::const_iterator it
= mass_doom_entry_hashes
->begin(),
340 end
= mass_doom_entry_hashes
->end();
346 // Taking this pointer here avoids undefined behaviour from calling
347 // base::Passed before mass_doom_entry_hashes.get().
348 std::vector
<uint64
>* mass_doom_entry_hashes_ptr
=
349 mass_doom_entry_hashes
.get();
350 PostTaskAndReplyWithResult(worker_pool_
.get(),
352 base::Bind(&SimpleSynchronousEntry::DoomEntrySet
,
353 mass_doom_entry_hashes_ptr
,
355 base::Bind(&SimpleBackendImpl::DoomEntriesComplete
,
357 base::Passed(&mass_doom_entry_hashes
),
361 net::CacheType
SimpleBackendImpl::GetCacheType() const {
362 return net::DISK_CACHE
;
365 int32
SimpleBackendImpl::GetEntryCount() const {
366 // TODO(pasko): Use directory file count when index is not ready.
367 return index_
->GetEntryCount();
370 int SimpleBackendImpl::OpenEntry(const std::string
& key
,
372 const CompletionCallback
& callback
) {
373 const uint64 entry_hash
= simple_util::GetEntryHashKey(key
);
375 // TODO(gavinp): Factor out this (not quite completely) repetitive code
376 // block from OpenEntry/CreateEntry/DoomEntry.
377 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
378 entries_pending_doom_
.find(entry_hash
);
379 if (it
!= entries_pending_doom_
.end()) {
380 Callback
<int(const net::CompletionCallback
&)> operation
=
381 base::Bind(&SimpleBackendImpl::OpenEntry
,
382 base::Unretained(this), key
, entry
);
383 it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
384 operation
, callback
));
385 return net::ERR_IO_PENDING
;
387 scoped_refptr
<SimpleEntryImpl
> simple_entry
=
388 CreateOrFindActiveEntry(entry_hash
, key
);
389 CompletionCallback backend_callback
=
390 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromKey
,
396 return simple_entry
->OpenEntry(entry
, backend_callback
);
399 int SimpleBackendImpl::CreateEntry(const std::string
& key
,
401 const CompletionCallback
& callback
) {
402 DCHECK_LT(0u, key
.size());
403 const uint64 entry_hash
= simple_util::GetEntryHashKey(key
);
405 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
406 entries_pending_doom_
.find(entry_hash
);
407 if (it
!= entries_pending_doom_
.end()) {
408 Callback
<int(const net::CompletionCallback
&)> operation
=
409 base::Bind(&SimpleBackendImpl::CreateEntry
,
410 base::Unretained(this), key
, entry
);
411 it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
412 operation
, callback
));
413 return net::ERR_IO_PENDING
;
415 scoped_refptr
<SimpleEntryImpl
> simple_entry
=
416 CreateOrFindActiveEntry(entry_hash
, key
);
417 return simple_entry
->CreateEntry(entry
, callback
);
420 int SimpleBackendImpl::DoomEntry(const std::string
& key
,
421 const net::CompletionCallback
& callback
) {
422 const uint64 entry_hash
= simple_util::GetEntryHashKey(key
);
424 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
425 entries_pending_doom_
.find(entry_hash
);
426 if (it
!= entries_pending_doom_
.end()) {
427 Callback
<int(const net::CompletionCallback
&)> operation
=
428 base::Bind(&SimpleBackendImpl::DoomEntry
, base::Unretained(this), key
);
429 it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
430 operation
, callback
));
431 return net::ERR_IO_PENDING
;
433 scoped_refptr
<SimpleEntryImpl
> simple_entry
=
434 CreateOrFindActiveEntry(entry_hash
, key
);
435 return simple_entry
->DoomEntry(callback
);
438 int SimpleBackendImpl::DoomAllEntries(const CompletionCallback
& callback
) {
439 return DoomEntriesBetween(Time(), Time(), callback
);
442 void SimpleBackendImpl::IndexReadyForDoom(Time initial_time
,
444 const CompletionCallback
& callback
,
446 if (result
!= net::OK
) {
447 callback
.Run(result
);
450 scoped_ptr
<std::vector
<uint64
> > removed_key_hashes(
451 index_
->GetEntriesBetween(initial_time
, end_time
).release());
452 DoomEntries(removed_key_hashes
.get(), callback
);
455 int SimpleBackendImpl::DoomEntriesBetween(
456 const Time initial_time
,
458 const CompletionCallback
& callback
) {
459 return index_
->ExecuteWhenReady(
460 base::Bind(&SimpleBackendImpl::IndexReadyForDoom
, AsWeakPtr(),
461 initial_time
, end_time
, callback
));
464 int SimpleBackendImpl::DoomEntriesSince(
465 const Time initial_time
,
466 const CompletionCallback
& callback
) {
467 return DoomEntriesBetween(initial_time
, Time(), callback
);
470 class SimpleBackendImpl::SimpleIterator final
: public Iterator
{
472 explicit SimpleIterator(base::WeakPtr
<SimpleBackendImpl
> backend
)
474 weak_factory_(this) {
477 // From Backend::Iterator:
478 int OpenNextEntry(Entry
** next_entry
,
479 const CompletionCallback
& callback
) override
{
480 CompletionCallback open_next_entry_impl
=
481 base::Bind(&SimpleIterator::OpenNextEntryImpl
,
482 weak_factory_
.GetWeakPtr(), next_entry
, callback
);
483 return backend_
->index_
->ExecuteWhenReady(open_next_entry_impl
);
486 void OpenNextEntryImpl(Entry
** next_entry
,
487 const CompletionCallback
& callback
,
488 int index_initialization_error_code
) {
490 callback
.Run(net::ERR_FAILED
);
493 if (index_initialization_error_code
!= net::OK
) {
494 callback
.Run(index_initialization_error_code
);
497 if (!hashes_to_enumerate_
)
498 hashes_to_enumerate_
= backend_
->index()->GetAllHashes().Pass();
500 while (!hashes_to_enumerate_
->empty()) {
501 uint64 entry_hash
= hashes_to_enumerate_
->back();
502 hashes_to_enumerate_
->pop_back();
503 if (backend_
->index()->Has(entry_hash
)) {
505 CompletionCallback continue_iteration
= base::Bind(
506 &SimpleIterator::CheckIterationReturnValue
,
507 weak_factory_
.GetWeakPtr(),
510 int error_code_open
= backend_
->OpenEntryFromHash(entry_hash
,
513 if (error_code_open
== net::ERR_IO_PENDING
)
515 if (error_code_open
!= net::ERR_FAILED
) {
516 callback
.Run(error_code_open
);
521 callback
.Run(net::ERR_FAILED
);
524 void CheckIterationReturnValue(Entry
** entry
,
525 const CompletionCallback
& callback
,
527 if (error_code
== net::ERR_FAILED
) {
528 OpenNextEntry(entry
, callback
);
531 callback
.Run(error_code
);
535 base::WeakPtr
<SimpleBackendImpl
> backend_
;
536 scoped_ptr
<std::vector
<uint64
> > hashes_to_enumerate_
;
537 base::WeakPtrFactory
<SimpleIterator
> weak_factory_
;
540 scoped_ptr
<Backend::Iterator
> SimpleBackendImpl::CreateIterator() {
541 return scoped_ptr
<Iterator
>(new SimpleIterator(AsWeakPtr()));
544 void SimpleBackendImpl::GetStats(
545 std::vector
<std::pair
<std::string
, std::string
> >* stats
) {
546 std::pair
<std::string
, std::string
> item
;
547 item
.first
= "Cache type";
548 item
.second
= "Simple Cache";
549 stats
->push_back(item
);
552 void SimpleBackendImpl::OnExternalCacheHit(const std::string
& key
) {
553 index_
->UseIfExists(simple_util::GetEntryHashKey(key
));
556 void SimpleBackendImpl::InitializeIndex(const CompletionCallback
& callback
,
557 const DiskStatResult
& result
) {
558 if (result
.net_error
== net::OK
) {
559 index_
->SetMaxSize(result
.max_size
);
560 index_
->Initialize(result
.cache_dir_mtime
);
562 callback
.Run(result
.net_error
);
565 SimpleBackendImpl::DiskStatResult
SimpleBackendImpl::InitCacheStructureOnDisk(
566 const base::FilePath
& path
,
567 uint64 suggested_max_size
) {
568 DiskStatResult result
;
569 result
.max_size
= suggested_max_size
;
570 result
.net_error
= net::OK
;
571 if (!FileStructureConsistent(path
)) {
572 LOG(ERROR
) << "Simple Cache Backend: wrong file structure on disk: "
573 << path
.LossyDisplayName();
574 result
.net_error
= net::ERR_FAILED
;
577 disk_cache::simple_util::GetMTime(path
, &result
.cache_dir_mtime
);
578 DCHECK(mtime_result
);
579 if (!result
.max_size
) {
580 int64 available
= base::SysInfo::AmountOfFreeDiskSpace(path
);
581 result
.max_size
= disk_cache::PreferredCacheSize(available
);
583 DCHECK(result
.max_size
);
588 scoped_refptr
<SimpleEntryImpl
> SimpleBackendImpl::CreateOrFindActiveEntry(
589 const uint64 entry_hash
,
590 const std::string
& key
) {
591 DCHECK_EQ(entry_hash
, simple_util::GetEntryHashKey(key
));
592 std::pair
<EntryMap::iterator
, bool> insert_result
=
593 active_entries_
.insert(EntryMap::value_type(entry_hash
, NULL
));
594 EntryMap::iterator
& it
= insert_result
.first
;
595 const bool did_insert
= insert_result
.second
;
597 SimpleEntryImpl
* entry
= it
->second
=
598 new SimpleEntryImpl(cache_type_
, path_
, entry_hash
,
599 entry_operations_mode_
,this, net_log_
);
601 entry
->SetActiveEntryProxy(ActiveEntryProxy::Create(entry_hash
, this));
604 // It's possible, but unlikely, that we have an entry hash collision with a
605 // currently active entry.
606 if (key
!= it
->second
->key()) {
608 DCHECK_EQ(0U, active_entries_
.count(entry_hash
));
609 return CreateOrFindActiveEntry(entry_hash
, key
);
611 return make_scoped_refptr(it
->second
);
614 int SimpleBackendImpl::OpenEntryFromHash(uint64 entry_hash
,
616 const CompletionCallback
& callback
) {
617 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
618 entries_pending_doom_
.find(entry_hash
);
619 if (it
!= entries_pending_doom_
.end()) {
620 Callback
<int(const net::CompletionCallback
&)> operation
=
621 base::Bind(&SimpleBackendImpl::OpenEntryFromHash
,
622 base::Unretained(this), entry_hash
, entry
);
623 it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
624 operation
, callback
));
625 return net::ERR_IO_PENDING
;
628 EntryMap::iterator has_active
= active_entries_
.find(entry_hash
);
629 if (has_active
!= active_entries_
.end()) {
630 return OpenEntry(has_active
->second
->key(), entry
, callback
);
633 scoped_refptr
<SimpleEntryImpl
> simple_entry
= new SimpleEntryImpl(
634 cache_type_
, path_
, entry_hash
, entry_operations_mode_
, this, net_log_
);
635 CompletionCallback backend_callback
=
636 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromHash
,
637 AsWeakPtr(), entry_hash
, entry
, simple_entry
, callback
);
638 return simple_entry
->OpenEntry(entry
, backend_callback
);
641 int SimpleBackendImpl::DoomEntryFromHash(uint64 entry_hash
,
642 const CompletionCallback
& callback
) {
643 Entry
** entry
= new Entry
*();
644 scoped_ptr
<Entry
*> scoped_entry(entry
);
646 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator pending_it
=
647 entries_pending_doom_
.find(entry_hash
);
648 if (pending_it
!= entries_pending_doom_
.end()) {
649 Callback
<int(const net::CompletionCallback
&)> operation
=
650 base::Bind(&SimpleBackendImpl::DoomEntryFromHash
,
651 base::Unretained(this), entry_hash
);
652 pending_it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
653 operation
, callback
));
654 return net::ERR_IO_PENDING
;
657 EntryMap::iterator active_it
= active_entries_
.find(entry_hash
);
658 if (active_it
!= active_entries_
.end())
659 return active_it
->second
->DoomEntry(callback
);
661 // There's no pending dooms, nor any open entry. We can make a trivial
662 // call to DoomEntries() to delete this entry.
663 std::vector
<uint64
> entry_hash_vector
;
664 entry_hash_vector
.push_back(entry_hash
);
665 DoomEntries(&entry_hash_vector
, callback
);
666 return net::ERR_IO_PENDING
;
669 void SimpleBackendImpl::OnEntryOpenedFromHash(
672 const scoped_refptr
<SimpleEntryImpl
>& simple_entry
,
673 const CompletionCallback
& callback
,
675 if (error_code
!= net::OK
) {
676 callback
.Run(error_code
);
680 std::pair
<EntryMap::iterator
, bool> insert_result
=
681 active_entries_
.insert(EntryMap::value_type(hash
, simple_entry
.get()));
682 EntryMap::iterator
& it
= insert_result
.first
;
683 const bool did_insert
= insert_result
.second
;
685 // There was no active entry corresponding to this hash. We've already put
686 // the entry opened from hash in the |active_entries_|. We now provide the
687 // proxy object to the entry.
688 it
->second
->SetActiveEntryProxy(ActiveEntryProxy::Create(hash
, this));
689 callback
.Run(net::OK
);
691 // The entry was made active while we waiting for the open from hash to
692 // finish. The entry created from hash needs to be closed, and the one
693 // in |active_entries_| can be returned to the caller.
694 simple_entry
->Close();
695 it
->second
->OpenEntry(entry
, callback
);
699 void SimpleBackendImpl::OnEntryOpenedFromKey(
700 const std::string key
,
702 const scoped_refptr
<SimpleEntryImpl
>& simple_entry
,
703 const CompletionCallback
& callback
,
705 int final_code
= error_code
;
706 if (final_code
== net::OK
) {
707 bool key_matches
= key
.compare(simple_entry
->key()) == 0;
709 // TODO(clamy): Add a unit test to check this code path.
710 DLOG(WARNING
) << "Key mismatch on open.";
711 simple_entry
->Doom();
712 simple_entry
->Close();
713 final_code
= net::ERR_FAILED
;
715 DCHECK_EQ(simple_entry
->entry_hash(), simple_util::GetEntryHashKey(key
));
717 SIMPLE_CACHE_UMA(BOOLEAN
, "KeyMatchedOnOpen", cache_type_
, key_matches
);
719 callback
.Run(final_code
);
722 void SimpleBackendImpl::DoomEntriesComplete(
723 scoped_ptr
<std::vector
<uint64
> > entry_hashes
,
724 const net::CompletionCallback
& callback
,
727 entry_hashes
->begin(), entry_hashes
->end(),
728 std::bind1st(std::mem_fun(&SimpleBackendImpl::OnDoomComplete
),
730 callback
.Run(result
);
733 void SimpleBackendImpl::FlushWorkerPoolForTesting() {
734 if (g_sequenced_worker_pool
)
735 g_sequenced_worker_pool
->FlushForTesting();
738 } // namespace disk_cache