1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_backend_impl.h"
12 #include <sys/resource.h>
15 #include "base/bind.h"
16 #include "base/callback.h"
17 #include "base/file_util.h"
18 #include "base/location.h"
19 #include "base/message_loop/message_loop_proxy.h"
20 #include "base/metrics/field_trial.h"
21 #include "base/metrics/histogram.h"
22 #include "base/metrics/sparse_histogram.h"
23 #include "base/single_thread_task_runner.h"
24 #include "base/sys_info.h"
25 #include "base/task_runner_util.h"
26 #include "base/threading/sequenced_worker_pool.h"
27 #include "base/time/time.h"
28 #include "net/base/net_errors.h"
29 #include "net/disk_cache/cache_util.h"
30 #include "net/disk_cache/simple/simple_entry_format.h"
31 #include "net/disk_cache/simple/simple_entry_impl.h"
32 #include "net/disk_cache/simple/simple_histogram_macros.h"
33 #include "net/disk_cache/simple/simple_index.h"
34 #include "net/disk_cache/simple/simple_index_file.h"
35 #include "net/disk_cache/simple/simple_synchronous_entry.h"
36 #include "net/disk_cache/simple/simple_util.h"
37 #include "net/disk_cache/simple/simple_version_upgrade.h"
42 using base::MessageLoopProxy
;
43 using base::SequencedWorkerPool
;
44 using base::SingleThreadTaskRunner
;
46 using base::DirectoryExists
;
47 using base::CreateDirectory
;
49 namespace disk_cache
{
53 // Maximum number of concurrent worker pool threads, which also is the limit
54 // on concurrent IO (as we use one thread per IO request).
55 const int kDefaultMaxWorkerThreads
= 50;
57 const char kThreadNamePrefix
[] = "SimpleCache";
59 // Maximum fraction of the cache that one entry can consume.
60 const int kMaxFileRatio
= 8;
62 // A global sequenced worker pool to use for launching all tasks.
63 SequencedWorkerPool
* g_sequenced_worker_pool
= NULL
;
65 void MaybeCreateSequencedWorkerPool() {
66 if (!g_sequenced_worker_pool
) {
67 int max_worker_threads
= kDefaultMaxWorkerThreads
;
69 const std::string thread_count_field_trial
=
70 base::FieldTrialList::FindFullName("SimpleCacheMaxThreads");
71 if (!thread_count_field_trial
.empty()) {
73 std::max(1, std::atoi(thread_count_field_trial
.c_str()));
76 g_sequenced_worker_pool
= new SequencedWorkerPool(max_worker_threads
,
78 g_sequenced_worker_pool
->AddRef(); // Leak it.
82 bool g_fd_limit_histogram_has_been_populated
= false;
84 void MaybeHistogramFdLimit(net::CacheType cache_type
) {
85 if (g_fd_limit_histogram_has_been_populated
)
88 // Used in histograms; add new entries at end.
90 FD_LIMIT_STATUS_UNSUPPORTED
= 0,
91 FD_LIMIT_STATUS_FAILED
= 1,
92 FD_LIMIT_STATUS_SUCCEEDED
= 2,
93 FD_LIMIT_STATUS_MAX
= 3
95 FdLimitStatus fd_limit_status
= FD_LIMIT_STATUS_UNSUPPORTED
;
96 int soft_fd_limit
= 0;
97 int hard_fd_limit
= 0;
100 struct rlimit nofile
;
101 if (!getrlimit(RLIMIT_NOFILE
, &nofile
)) {
102 soft_fd_limit
= nofile
.rlim_cur
;
103 hard_fd_limit
= nofile
.rlim_max
;
104 fd_limit_status
= FD_LIMIT_STATUS_SUCCEEDED
;
106 fd_limit_status
= FD_LIMIT_STATUS_FAILED
;
110 SIMPLE_CACHE_UMA(ENUMERATION
,
111 "FileDescriptorLimitStatus", cache_type
,
112 fd_limit_status
, FD_LIMIT_STATUS_MAX
);
113 if (fd_limit_status
== FD_LIMIT_STATUS_SUCCEEDED
) {
114 SIMPLE_CACHE_UMA(SPARSE_SLOWLY
,
115 "FileDescriptorLimitSoft", cache_type
, soft_fd_limit
);
116 SIMPLE_CACHE_UMA(SPARSE_SLOWLY
,
117 "FileDescriptorLimitHard", cache_type
, hard_fd_limit
);
120 g_fd_limit_histogram_has_been_populated
= true;
123 // Detects if the files in the cache directory match the current disk cache
124 // backend type and version. If the directory contains no cache, occupies it
125 // with the fresh structure.
126 bool FileStructureConsistent(const base::FilePath
& path
) {
127 if (!base::PathExists(path
) && !base::CreateDirectory(path
)) {
128 LOG(ERROR
) << "Failed to create directory: " << path
.LossyDisplayName();
131 return disk_cache::UpgradeSimpleCacheOnDisk(path
);
134 // A context used by a BarrierCompletionCallback to track state.
135 struct BarrierContext
{
136 BarrierContext(int expected
)
137 : expected(expected
),
146 void BarrierCompletionCallbackImpl(
147 BarrierContext
* context
,
148 const net::CompletionCallback
& final_callback
,
150 DCHECK_GT(context
->expected
, context
->count
);
151 if (context
->had_error
)
153 if (result
!= net::OK
) {
154 context
->had_error
= true;
155 final_callback
.Run(result
);
159 if (context
->count
== context
->expected
)
160 final_callback
.Run(net::OK
);
163 // A barrier completion callback is a net::CompletionCallback that waits for
164 // |count| successful results before invoking |final_callback|. In the case of
165 // an error, the first error is passed to |final_callback| and all others
167 net::CompletionCallback
MakeBarrierCompletionCallback(
169 const net::CompletionCallback
& final_callback
) {
170 BarrierContext
* context
= new BarrierContext(count
);
171 return base::Bind(&BarrierCompletionCallbackImpl
,
172 base::Owned(context
), final_callback
);
175 // A short bindable thunk that ensures a completion callback is always called
176 // after running an operation asynchronously.
177 void RunOperationAndCallback(
178 const Callback
<int(const net::CompletionCallback
&)>& operation
,
179 const net::CompletionCallback
& operation_callback
) {
180 const int operation_result
= operation
.Run(operation_callback
);
181 if (operation_result
!= net::ERR_IO_PENDING
)
182 operation_callback
.Run(operation_result
);
185 void RecordIndexLoad(net::CacheType cache_type
,
186 base::TimeTicks constructed_since
,
188 const base::TimeDelta creation_to_index
= base::TimeTicks::Now() -
190 if (result
== net::OK
) {
191 SIMPLE_CACHE_UMA(TIMES
, "CreationToIndex", cache_type
, creation_to_index
);
193 SIMPLE_CACHE_UMA(TIMES
,
194 "CreationToIndexFail", cache_type
, creation_to_index
);
200 SimpleBackendImpl::SimpleBackendImpl(const FilePath
& path
,
202 net::CacheType cache_type
,
203 base::SingleThreadTaskRunner
* cache_thread
,
204 net::NetLog
* net_log
)
206 cache_type_(cache_type
),
207 cache_thread_(cache_thread
),
208 orig_max_size_(max_bytes
),
209 entry_operations_mode_(
210 cache_type
== net::DISK_CACHE
?
211 SimpleEntryImpl::OPTIMISTIC_OPERATIONS
:
212 SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS
),
214 MaybeHistogramFdLimit(cache_type_
);
217 SimpleBackendImpl::~SimpleBackendImpl() {
218 index_
->WriteToDisk();
221 int SimpleBackendImpl::Init(const CompletionCallback
& completion_callback
) {
222 MaybeCreateSequencedWorkerPool();
224 worker_pool_
= g_sequenced_worker_pool
->GetTaskRunnerWithShutdownBehavior(
225 SequencedWorkerPool::CONTINUE_ON_SHUTDOWN
);
227 index_
.reset(new SimpleIndex(MessageLoopProxy::current(), this, cache_type_
,
228 make_scoped_ptr(new SimpleIndexFile(
229 cache_thread_
.get(), worker_pool_
.get(),
230 cache_type_
, path_
))));
231 index_
->ExecuteWhenReady(
232 base::Bind(&RecordIndexLoad
, cache_type_
, base::TimeTicks::Now()));
234 PostTaskAndReplyWithResult(
237 base::Bind(&SimpleBackendImpl::InitCacheStructureOnDisk
, path_
,
239 base::Bind(&SimpleBackendImpl::InitializeIndex
, AsWeakPtr(),
240 completion_callback
));
241 return net::ERR_IO_PENDING
;
244 bool SimpleBackendImpl::SetMaxSize(int max_bytes
) {
245 orig_max_size_
= max_bytes
;
246 return index_
->SetMaxSize(max_bytes
);
249 int SimpleBackendImpl::GetMaxFileSize() const {
250 return index_
->max_size() / kMaxFileRatio
;
253 void SimpleBackendImpl::OnDeactivated(const SimpleEntryImpl
* entry
) {
254 active_entries_
.erase(entry
->entry_hash());
257 void SimpleBackendImpl::OnDoomStart(uint64 entry_hash
) {
258 // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
259 CHECK_EQ(0u, entries_pending_doom_
.count(entry_hash
));
260 entries_pending_doom_
.insert(
261 std::make_pair(entry_hash
, std::vector
<Closure
>()));
264 void SimpleBackendImpl::OnDoomComplete(uint64 entry_hash
) {
265 // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
266 CHECK_EQ(1u, entries_pending_doom_
.count(entry_hash
));
267 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
268 entries_pending_doom_
.find(entry_hash
);
269 std::vector
<Closure
> to_run_closures
;
270 to_run_closures
.swap(it
->second
);
271 entries_pending_doom_
.erase(it
);
273 std::for_each(to_run_closures
.begin(), to_run_closures
.end(),
274 std::mem_fun_ref(&Closure::Run
));
277 void SimpleBackendImpl::DoomEntries(std::vector
<uint64
>* entry_hashes
,
278 const net::CompletionCallback
& callback
) {
279 scoped_ptr
<std::vector
<uint64
> >
280 mass_doom_entry_hashes(new std::vector
<uint64
>());
281 mass_doom_entry_hashes
->swap(*entry_hashes
);
283 std::vector
<uint64
> to_doom_individually_hashes
;
285 // For each of the entry hashes, there are two cases:
286 // 1. The entry is either open or pending doom, and so it should be doomed
287 // individually to avoid flakes.
288 // 2. The entry is not in use at all, so we can call
289 // SimpleSynchronousEntry::DoomEntrySet and delete the files en masse.
290 for (int i
= mass_doom_entry_hashes
->size() - 1; i
>= 0; --i
) {
291 const uint64 entry_hash
= (*mass_doom_entry_hashes
)[i
];
292 // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
293 CHECK(active_entries_
.count(entry_hash
) == 0 ||
294 entries_pending_doom_
.count(entry_hash
) == 0)
295 << "The entry 0x" << std::hex
<< entry_hash
296 << " is both active and pending doom.";
297 if (!active_entries_
.count(entry_hash
) &&
298 !entries_pending_doom_
.count(entry_hash
)) {
302 to_doom_individually_hashes
.push_back(entry_hash
);
304 (*mass_doom_entry_hashes
)[i
] = mass_doom_entry_hashes
->back();
305 mass_doom_entry_hashes
->resize(mass_doom_entry_hashes
->size() - 1);
308 net::CompletionCallback barrier_callback
=
309 MakeBarrierCompletionCallback(to_doom_individually_hashes
.size() + 1,
311 for (std::vector
<uint64
>::const_iterator
312 it
= to_doom_individually_hashes
.begin(),
313 end
= to_doom_individually_hashes
.end(); it
!= end
; ++it
) {
314 const int doom_result
= DoomEntryFromHash(*it
, barrier_callback
);
315 // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
316 CHECK_EQ(net::ERR_IO_PENDING
, doom_result
);
320 for (std::vector
<uint64
>::const_iterator it
= mass_doom_entry_hashes
->begin(),
321 end
= mass_doom_entry_hashes
->end();
327 // Taking this pointer here avoids undefined behaviour from calling
328 // base::Passed before mass_doom_entry_hashes.get().
329 std::vector
<uint64
>* mass_doom_entry_hashes_ptr
=
330 mass_doom_entry_hashes
.get();
331 PostTaskAndReplyWithResult(
332 worker_pool_
, FROM_HERE
,
333 base::Bind(&SimpleSynchronousEntry::DoomEntrySet
,
334 mass_doom_entry_hashes_ptr
, path_
),
335 base::Bind(&SimpleBackendImpl::DoomEntriesComplete
,
336 AsWeakPtr(), base::Passed(&mass_doom_entry_hashes
),
340 net::CacheType
SimpleBackendImpl::GetCacheType() const {
341 return net::DISK_CACHE
;
344 int32
SimpleBackendImpl::GetEntryCount() const {
345 // TODO(pasko): Use directory file count when index is not ready.
346 return index_
->GetEntryCount();
349 int SimpleBackendImpl::OpenEntry(const std::string
& key
,
351 const CompletionCallback
& callback
) {
352 const uint64 entry_hash
= simple_util::GetEntryHashKey(key
);
354 // TODO(gavinp): Factor out this (not quite completely) repetitive code
355 // block from OpenEntry/CreateEntry/DoomEntry.
356 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
357 entries_pending_doom_
.find(entry_hash
);
358 if (it
!= entries_pending_doom_
.end()) {
359 Callback
<int(const net::CompletionCallback
&)> operation
=
360 base::Bind(&SimpleBackendImpl::OpenEntry
,
361 base::Unretained(this), key
, entry
);
362 it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
363 operation
, callback
));
364 return net::ERR_IO_PENDING
;
366 scoped_refptr
<SimpleEntryImpl
> simple_entry
=
367 CreateOrFindActiveEntry(entry_hash
, key
);
368 CompletionCallback backend_callback
=
369 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromKey
,
375 return simple_entry
->OpenEntry(entry
, backend_callback
);
378 int SimpleBackendImpl::CreateEntry(const std::string
& key
,
380 const CompletionCallback
& callback
) {
381 DCHECK_LT(0u, key
.size());
382 const uint64 entry_hash
= simple_util::GetEntryHashKey(key
);
384 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
385 entries_pending_doom_
.find(entry_hash
);
386 if (it
!= entries_pending_doom_
.end()) {
387 Callback
<int(const net::CompletionCallback
&)> operation
=
388 base::Bind(&SimpleBackendImpl::CreateEntry
,
389 base::Unretained(this), key
, entry
);
390 it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
391 operation
, callback
));
392 return net::ERR_IO_PENDING
;
394 scoped_refptr
<SimpleEntryImpl
> simple_entry
=
395 CreateOrFindActiveEntry(entry_hash
, key
);
396 return simple_entry
->CreateEntry(entry
, callback
);
399 int SimpleBackendImpl::DoomEntry(const std::string
& key
,
400 const net::CompletionCallback
& callback
) {
401 const uint64 entry_hash
= simple_util::GetEntryHashKey(key
);
403 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
404 entries_pending_doom_
.find(entry_hash
);
405 if (it
!= entries_pending_doom_
.end()) {
406 Callback
<int(const net::CompletionCallback
&)> operation
=
407 base::Bind(&SimpleBackendImpl::DoomEntry
, base::Unretained(this), key
);
408 it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
409 operation
, callback
));
410 return net::ERR_IO_PENDING
;
412 scoped_refptr
<SimpleEntryImpl
> simple_entry
=
413 CreateOrFindActiveEntry(entry_hash
, key
);
414 return simple_entry
->DoomEntry(callback
);
417 int SimpleBackendImpl::DoomAllEntries(const CompletionCallback
& callback
) {
418 return DoomEntriesBetween(Time(), Time(), callback
);
421 void SimpleBackendImpl::IndexReadyForDoom(Time initial_time
,
423 const CompletionCallback
& callback
,
425 if (result
!= net::OK
) {
426 callback
.Run(result
);
429 scoped_ptr
<std::vector
<uint64
> > removed_key_hashes(
430 index_
->GetEntriesBetween(initial_time
, end_time
).release());
431 DoomEntries(removed_key_hashes
.get(), callback
);
434 int SimpleBackendImpl::DoomEntriesBetween(
435 const Time initial_time
,
437 const CompletionCallback
& callback
) {
438 return index_
->ExecuteWhenReady(
439 base::Bind(&SimpleBackendImpl::IndexReadyForDoom
, AsWeakPtr(),
440 initial_time
, end_time
, callback
));
443 int SimpleBackendImpl::DoomEntriesSince(
444 const Time initial_time
,
445 const CompletionCallback
& callback
) {
446 return DoomEntriesBetween(initial_time
, Time(), callback
);
449 int SimpleBackendImpl::OpenNextEntry(void** iter
,
451 const CompletionCallback
& callback
) {
452 CompletionCallback get_next_entry
=
453 base::Bind(&SimpleBackendImpl::GetNextEntryInIterator
, AsWeakPtr(), iter
,
454 next_entry
, callback
);
455 return index_
->ExecuteWhenReady(get_next_entry
);
458 void SimpleBackendImpl::EndEnumeration(void** iter
) {
459 SimpleIndex::HashList
* entry_list
=
460 static_cast<SimpleIndex::HashList
*>(*iter
);
465 void SimpleBackendImpl::GetStats(
466 std::vector
<std::pair
<std::string
, std::string
> >* stats
) {
467 std::pair
<std::string
, std::string
> item
;
468 item
.first
= "Cache type";
469 item
.second
= "Simple Cache";
470 stats
->push_back(item
);
473 void SimpleBackendImpl::OnExternalCacheHit(const std::string
& key
) {
474 index_
->UseIfExists(simple_util::GetEntryHashKey(key
));
477 void SimpleBackendImpl::InitializeIndex(const CompletionCallback
& callback
,
478 const DiskStatResult
& result
) {
479 if (result
.net_error
== net::OK
) {
480 index_
->SetMaxSize(result
.max_size
);
481 index_
->Initialize(result
.cache_dir_mtime
);
483 callback
.Run(result
.net_error
);
486 SimpleBackendImpl::DiskStatResult
SimpleBackendImpl::InitCacheStructureOnDisk(
487 const base::FilePath
& path
,
488 uint64 suggested_max_size
) {
489 DiskStatResult result
;
490 result
.max_size
= suggested_max_size
;
491 result
.net_error
= net::OK
;
492 if (!FileStructureConsistent(path
)) {
493 LOG(ERROR
) << "Simple Cache Backend: wrong file structure on disk: "
494 << path
.LossyDisplayName();
495 result
.net_error
= net::ERR_FAILED
;
498 disk_cache::simple_util::GetMTime(path
, &result
.cache_dir_mtime
);
499 DCHECK(mtime_result
);
500 if (!result
.max_size
) {
501 int64 available
= base::SysInfo::AmountOfFreeDiskSpace(path
);
502 result
.max_size
= disk_cache::PreferredCacheSize(available
);
504 DCHECK(result
.max_size
);
509 scoped_refptr
<SimpleEntryImpl
> SimpleBackendImpl::CreateOrFindActiveEntry(
510 const uint64 entry_hash
,
511 const std::string
& key
) {
512 DCHECK_EQ(entry_hash
, simple_util::GetEntryHashKey(key
));
513 std::pair
<EntryMap::iterator
, bool> insert_result
=
514 active_entries_
.insert(std::make_pair(entry_hash
,
515 base::WeakPtr
<SimpleEntryImpl
>()));
516 EntryMap::iterator
& it
= insert_result
.first
;
517 if (insert_result
.second
)
518 DCHECK(!it
->second
.get());
519 if (!it
->second
.get()) {
520 SimpleEntryImpl
* entry
= new SimpleEntryImpl(
521 cache_type_
, path_
, entry_hash
, entry_operations_mode_
, this, net_log_
);
523 it
->second
= entry
->AsWeakPtr();
525 DCHECK(it
->second
.get());
526 // It's possible, but unlikely, that we have an entry hash collision with a
527 // currently active entry.
528 if (key
!= it
->second
->key()) {
530 DCHECK_EQ(0U, active_entries_
.count(entry_hash
));
531 return CreateOrFindActiveEntry(entry_hash
, key
);
533 return make_scoped_refptr(it
->second
.get());
536 int SimpleBackendImpl::OpenEntryFromHash(uint64 entry_hash
,
538 const CompletionCallback
& callback
) {
539 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator it
=
540 entries_pending_doom_
.find(entry_hash
);
541 if (it
!= entries_pending_doom_
.end()) {
542 Callback
<int(const net::CompletionCallback
&)> operation
=
543 base::Bind(&SimpleBackendImpl::OpenEntryFromHash
,
544 base::Unretained(this), entry_hash
, entry
);
545 it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
546 operation
, callback
));
547 return net::ERR_IO_PENDING
;
550 EntryMap::iterator has_active
= active_entries_
.find(entry_hash
);
551 if (has_active
!= active_entries_
.end()) {
552 return OpenEntry(has_active
->second
->key(), entry
, callback
);
555 scoped_refptr
<SimpleEntryImpl
> simple_entry
= new SimpleEntryImpl(
556 cache_type_
, path_
, entry_hash
, entry_operations_mode_
, this, net_log_
);
557 CompletionCallback backend_callback
=
558 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromHash
,
559 AsWeakPtr(), entry_hash
, entry
, simple_entry
, callback
);
560 return simple_entry
->OpenEntry(entry
, backend_callback
);
563 int SimpleBackendImpl::DoomEntryFromHash(uint64 entry_hash
,
564 const CompletionCallback
& callback
) {
565 Entry
** entry
= new Entry
*();
566 scoped_ptr
<Entry
*> scoped_entry(entry
);
568 base::hash_map
<uint64
, std::vector
<Closure
> >::iterator pending_it
=
569 entries_pending_doom_
.find(entry_hash
);
570 if (pending_it
!= entries_pending_doom_
.end()) {
571 Callback
<int(const net::CompletionCallback
&)> operation
=
572 base::Bind(&SimpleBackendImpl::DoomEntryFromHash
,
573 base::Unretained(this), entry_hash
);
574 pending_it
->second
.push_back(base::Bind(&RunOperationAndCallback
,
575 operation
, callback
));
576 return net::ERR_IO_PENDING
;
579 EntryMap::iterator active_it
= active_entries_
.find(entry_hash
);
580 if (active_it
!= active_entries_
.end())
581 return active_it
->second
->DoomEntry(callback
);
583 // There's no pending dooms, nor any open entry. We can make a trivial
584 // call to DoomEntries() to delete this entry.
585 std::vector
<uint64
> entry_hash_vector
;
586 entry_hash_vector
.push_back(entry_hash
);
587 DoomEntries(&entry_hash_vector
, callback
);
588 return net::ERR_IO_PENDING
;
591 void SimpleBackendImpl::GetNextEntryInIterator(
594 const CompletionCallback
& callback
,
596 if (error_code
!= net::OK
) {
597 callback
.Run(error_code
);
601 *iter
= index()->GetAllHashes().release();
603 SimpleIndex::HashList
* entry_list
=
604 static_cast<SimpleIndex::HashList
*>(*iter
);
605 while (entry_list
->size() > 0) {
606 uint64 entry_hash
= entry_list
->back();
607 entry_list
->pop_back();
608 if (index()->Has(entry_hash
)) {
610 CompletionCallback continue_iteration
= base::Bind(
611 &SimpleBackendImpl::CheckIterationReturnValue
,
616 int error_code_open
= OpenEntryFromHash(entry_hash
,
619 if (error_code_open
== net::ERR_IO_PENDING
)
621 if (error_code_open
!= net::ERR_FAILED
) {
622 callback
.Run(error_code_open
);
627 callback
.Run(net::ERR_FAILED
);
630 void SimpleBackendImpl::OnEntryOpenedFromHash(
633 scoped_refptr
<SimpleEntryImpl
> simple_entry
,
634 const CompletionCallback
& callback
,
636 if (error_code
!= net::OK
) {
637 callback
.Run(error_code
);
641 std::pair
<EntryMap::iterator
, bool> insert_result
=
642 active_entries_
.insert(std::make_pair(hash
,
643 base::WeakPtr
<SimpleEntryImpl
>()));
644 EntryMap::iterator
& it
= insert_result
.first
;
645 const bool did_insert
= insert_result
.second
;
647 // There is no active entry corresponding to this hash. The entry created
648 // is put in the map of active entries and returned to the caller.
649 it
->second
= simple_entry
->AsWeakPtr();
650 callback
.Run(error_code
);
652 // The entry was made active with the key while the creation from hash
653 // occurred. The entry created from hash needs to be closed, and the one
654 // coming from the key returned to the caller.
655 simple_entry
->Close();
656 it
->second
->OpenEntry(entry
, callback
);
660 void SimpleBackendImpl::OnEntryOpenedFromKey(
661 const std::string key
,
663 scoped_refptr
<SimpleEntryImpl
> simple_entry
,
664 const CompletionCallback
& callback
,
666 int final_code
= error_code
;
667 if (final_code
== net::OK
) {
668 bool key_matches
= key
.compare(simple_entry
->key()) == 0;
670 // TODO(clamy): Add a unit test to check this code path.
671 DLOG(WARNING
) << "Key mismatch on open.";
672 simple_entry
->Doom();
673 simple_entry
->Close();
674 final_code
= net::ERR_FAILED
;
676 DCHECK_EQ(simple_entry
->entry_hash(), simple_util::GetEntryHashKey(key
));
678 SIMPLE_CACHE_UMA(BOOLEAN
, "KeyMatchedOnOpen", cache_type_
, key_matches
);
680 callback
.Run(final_code
);
683 void SimpleBackendImpl::CheckIterationReturnValue(
686 const CompletionCallback
& callback
,
688 if (error_code
== net::ERR_FAILED
) {
689 OpenNextEntry(iter
, entry
, callback
);
692 callback
.Run(error_code
);
695 void SimpleBackendImpl::DoomEntriesComplete(
696 scoped_ptr
<std::vector
<uint64
> > entry_hashes
,
697 const net::CompletionCallback
& callback
,
700 entry_hashes
->begin(), entry_hashes
->end(),
701 std::bind1st(std::mem_fun(&SimpleBackendImpl::OnDoomComplete
),
703 callback
.Run(result
);
706 void SimpleBackendImpl::FlushWorkerPoolForTesting() {
707 if (g_sequenced_worker_pool
)
708 g_sequenced_worker_pool
->FlushForTesting();
711 } // namespace disk_cache