Update V8 to version 4.7.53.
[chromium-blink-merge.git] / net / disk_cache / simple / simple_backend_impl.cc
blob5d1730f883f2cf5db12c8dd77ab8ab5352f17934
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_backend_impl.h"
7 #include <algorithm>
8 #include <cstdlib>
9 #include <functional>
11 #if defined(OS_POSIX)
12 #include <sys/resource.h>
13 #endif
15 #include "base/bind.h"
16 #include "base/callback.h"
17 #include "base/files/file_util.h"
18 #include "base/lazy_instance.h"
19 #include "base/location.h"
20 #include "base/metrics/field_trial.h"
21 #include "base/metrics/histogram_macros.h"
22 #include "base/metrics/sparse_histogram.h"
23 #include "base/single_thread_task_runner.h"
24 #include "base/sys_info.h"
25 #include "base/task_runner_util.h"
26 #include "base/thread_task_runner_handle.h"
27 #include "base/threading/sequenced_worker_pool.h"
28 #include "base/time/time.h"
29 #include "net/base/net_errors.h"
30 #include "net/disk_cache/cache_util.h"
31 #include "net/disk_cache/simple/simple_entry_format.h"
32 #include "net/disk_cache/simple/simple_entry_impl.h"
33 #include "net/disk_cache/simple/simple_histogram_macros.h"
34 #include "net/disk_cache/simple/simple_index.h"
35 #include "net/disk_cache/simple/simple_index_file.h"
36 #include "net/disk_cache/simple/simple_synchronous_entry.h"
37 #include "net/disk_cache/simple/simple_util.h"
38 #include "net/disk_cache/simple/simple_version_upgrade.h"
40 using base::Callback;
41 using base::Closure;
42 using base::FilePath;
43 using base::SequencedWorkerPool;
44 using base::Time;
45 using base::DirectoryExists;
46 using base::CreateDirectory;
48 namespace disk_cache {
50 namespace {
52 // Maximum number of concurrent worker pool threads, which also is the limit
53 // on concurrent IO (as we use one thread per IO request).
54 const size_t kMaxWorkerThreads = 5U;
56 const char kThreadNamePrefix[] = "SimpleCache";
58 // Maximum fraction of the cache that one entry can consume.
59 const int kMaxFileRatio = 8;
61 class LeakySequencedWorkerPool {
62 public:
63 LeakySequencedWorkerPool()
64 : sequenced_worker_pool_(
65 new SequencedWorkerPool(kMaxWorkerThreads, kThreadNamePrefix)) {}
67 void FlushForTesting() { sequenced_worker_pool_->FlushForTesting(); }
69 scoped_refptr<base::TaskRunner> GetTaskRunner() {
70 return sequenced_worker_pool_->GetTaskRunnerWithShutdownBehavior(
71 SequencedWorkerPool::CONTINUE_ON_SHUTDOWN);
74 private:
75 scoped_refptr<SequencedWorkerPool> sequenced_worker_pool_;
77 DISALLOW_COPY_AND_ASSIGN(LeakySequencedWorkerPool);
80 base::LazyInstance<LeakySequencedWorkerPool>::Leaky g_sequenced_worker_pool =
81 LAZY_INSTANCE_INITIALIZER;
83 bool g_fd_limit_histogram_has_been_populated = false;
85 void MaybeHistogramFdLimit(net::CacheType cache_type) {
86 if (g_fd_limit_histogram_has_been_populated)
87 return;
89 // Used in histograms; add new entries at end.
90 enum FdLimitStatus {
91 FD_LIMIT_STATUS_UNSUPPORTED = 0,
92 FD_LIMIT_STATUS_FAILED = 1,
93 FD_LIMIT_STATUS_SUCCEEDED = 2,
94 FD_LIMIT_STATUS_MAX = 3
96 FdLimitStatus fd_limit_status = FD_LIMIT_STATUS_UNSUPPORTED;
97 int soft_fd_limit = 0;
98 int hard_fd_limit = 0;
100 #if defined(OS_POSIX)
101 struct rlimit nofile;
102 if (!getrlimit(RLIMIT_NOFILE, &nofile)) {
103 soft_fd_limit = nofile.rlim_cur;
104 hard_fd_limit = nofile.rlim_max;
105 fd_limit_status = FD_LIMIT_STATUS_SUCCEEDED;
106 } else {
107 fd_limit_status = FD_LIMIT_STATUS_FAILED;
109 #endif
111 SIMPLE_CACHE_UMA(ENUMERATION,
112 "FileDescriptorLimitStatus", cache_type,
113 fd_limit_status, FD_LIMIT_STATUS_MAX);
114 if (fd_limit_status == FD_LIMIT_STATUS_SUCCEEDED) {
115 SIMPLE_CACHE_UMA(SPARSE_SLOWLY,
116 "FileDescriptorLimitSoft", cache_type, soft_fd_limit);
117 SIMPLE_CACHE_UMA(SPARSE_SLOWLY,
118 "FileDescriptorLimitHard", cache_type, hard_fd_limit);
121 g_fd_limit_histogram_has_been_populated = true;
124 // Detects if the files in the cache directory match the current disk cache
125 // backend type and version. If the directory contains no cache, occupies it
126 // with the fresh structure.
127 bool FileStructureConsistent(const base::FilePath& path) {
128 if (!base::PathExists(path) && !base::CreateDirectory(path)) {
129 LOG(ERROR) << "Failed to create directory: " << path.LossyDisplayName();
130 return false;
132 return disk_cache::UpgradeSimpleCacheOnDisk(path);
135 // A context used by a BarrierCompletionCallback to track state.
136 struct BarrierContext {
137 BarrierContext(int expected)
138 : expected(expected),
139 count(0),
140 had_error(false) {}
142 const int expected;
143 int count;
144 bool had_error;
147 void BarrierCompletionCallbackImpl(
148 BarrierContext* context,
149 const net::CompletionCallback& final_callback,
150 int result) {
151 DCHECK_GT(context->expected, context->count);
152 if (context->had_error)
153 return;
154 if (result != net::OK) {
155 context->had_error = true;
156 final_callback.Run(result);
157 return;
159 ++context->count;
160 if (context->count == context->expected)
161 final_callback.Run(net::OK);
164 // A barrier completion callback is a net::CompletionCallback that waits for
165 // |count| successful results before invoking |final_callback|. In the case of
166 // an error, the first error is passed to |final_callback| and all others
167 // are ignored.
168 net::CompletionCallback MakeBarrierCompletionCallback(
169 int count,
170 const net::CompletionCallback& final_callback) {
171 BarrierContext* context = new BarrierContext(count);
172 return base::Bind(&BarrierCompletionCallbackImpl,
173 base::Owned(context), final_callback);
176 // A short bindable thunk that ensures a completion callback is always called
177 // after running an operation asynchronously.
178 void RunOperationAndCallback(
179 const Callback<int(const net::CompletionCallback&)>& operation,
180 const net::CompletionCallback& operation_callback) {
181 const int operation_result = operation.Run(operation_callback);
182 if (operation_result != net::ERR_IO_PENDING)
183 operation_callback.Run(operation_result);
186 void RecordIndexLoad(net::CacheType cache_type,
187 base::TimeTicks constructed_since,
188 int result) {
189 const base::TimeDelta creation_to_index = base::TimeTicks::Now() -
190 constructed_since;
191 if (result == net::OK) {
192 SIMPLE_CACHE_UMA(TIMES, "CreationToIndex", cache_type, creation_to_index);
193 } else {
194 SIMPLE_CACHE_UMA(TIMES,
195 "CreationToIndexFail", cache_type, creation_to_index);
199 } // namespace
201 class SimpleBackendImpl::ActiveEntryProxy
202 : public SimpleEntryImpl::ActiveEntryProxy {
203 public:
204 ~ActiveEntryProxy() override {
205 if (backend_) {
206 DCHECK_EQ(1U, backend_->active_entries_.count(entry_hash_));
207 backend_->active_entries_.erase(entry_hash_);
211 static scoped_ptr<SimpleEntryImpl::ActiveEntryProxy> Create(
212 int64 entry_hash,
213 SimpleBackendImpl* backend) {
214 scoped_ptr<SimpleEntryImpl::ActiveEntryProxy>
215 proxy(new ActiveEntryProxy(entry_hash, backend));
216 return proxy.Pass();
219 private:
220 ActiveEntryProxy(uint64 entry_hash,
221 SimpleBackendImpl* backend)
222 : entry_hash_(entry_hash),
223 backend_(backend->AsWeakPtr()) {}
225 uint64 entry_hash_;
226 base::WeakPtr<SimpleBackendImpl> backend_;
229 SimpleBackendImpl::SimpleBackendImpl(
230 const FilePath& path,
231 int max_bytes,
232 net::CacheType cache_type,
233 const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
234 net::NetLog* net_log)
235 : path_(path),
236 cache_type_(cache_type),
237 cache_thread_(cache_thread),
238 orig_max_size_(max_bytes),
239 entry_operations_mode_(cache_type == net::DISK_CACHE ?
240 SimpleEntryImpl::OPTIMISTIC_OPERATIONS :
241 SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS),
242 net_log_(net_log) {
243 MaybeHistogramFdLimit(cache_type_);
246 SimpleBackendImpl::~SimpleBackendImpl() {
247 index_->WriteToDisk();
250 int SimpleBackendImpl::Init(const CompletionCallback& completion_callback) {
251 worker_pool_ = g_sequenced_worker_pool.Get().GetTaskRunner();
253 index_.reset(new SimpleIndex(
254 base::ThreadTaskRunnerHandle::Get(),
255 this,
256 cache_type_,
257 make_scoped_ptr(new SimpleIndexFile(
258 cache_thread_, worker_pool_.get(), cache_type_, path_))));
259 index_->ExecuteWhenReady(
260 base::Bind(&RecordIndexLoad, cache_type_, base::TimeTicks::Now()));
262 PostTaskAndReplyWithResult(
263 cache_thread_.get(),
264 FROM_HERE,
265 base::Bind(
266 &SimpleBackendImpl::InitCacheStructureOnDisk, path_, orig_max_size_),
267 base::Bind(&SimpleBackendImpl::InitializeIndex,
268 AsWeakPtr(),
269 completion_callback));
270 return net::ERR_IO_PENDING;
273 bool SimpleBackendImpl::SetMaxSize(int max_bytes) {
274 if (max_bytes < 0)
275 return false;
276 orig_max_size_ = max_bytes;
277 index_->SetMaxSize(max_bytes);
278 return true;
281 int SimpleBackendImpl::GetMaxFileSize() const {
282 return static_cast<int>(index_->max_size() / kMaxFileRatio);
285 void SimpleBackendImpl::OnDoomStart(uint64 entry_hash) {
286 DCHECK_EQ(0u, entries_pending_doom_.count(entry_hash));
287 entries_pending_doom_.insert(
288 std::make_pair(entry_hash, std::vector<Closure>()));
291 void SimpleBackendImpl::OnDoomComplete(uint64 entry_hash) {
292 DCHECK_EQ(1u, entries_pending_doom_.count(entry_hash));
293 base::hash_map<uint64, std::vector<Closure> >::iterator it =
294 entries_pending_doom_.find(entry_hash);
295 std::vector<Closure> to_run_closures;
296 to_run_closures.swap(it->second);
297 entries_pending_doom_.erase(it);
299 std::for_each(to_run_closures.begin(), to_run_closures.end(),
300 std::mem_fun_ref(&Closure::Run));
303 void SimpleBackendImpl::DoomEntries(std::vector<uint64>* entry_hashes,
304 const net::CompletionCallback& callback) {
305 scoped_ptr<std::vector<uint64> >
306 mass_doom_entry_hashes(new std::vector<uint64>());
307 mass_doom_entry_hashes->swap(*entry_hashes);
309 std::vector<uint64> to_doom_individually_hashes;
311 // For each of the entry hashes, there are two cases:
312 // 1. The entry is either open or pending doom, and so it should be doomed
313 // individually to avoid flakes.
314 // 2. The entry is not in use at all, so we can call
315 // SimpleSynchronousEntry::DoomEntrySet and delete the files en masse.
316 for (int i = mass_doom_entry_hashes->size() - 1; i >= 0; --i) {
317 const uint64 entry_hash = (*mass_doom_entry_hashes)[i];
318 DCHECK(active_entries_.count(entry_hash) == 0 ||
319 entries_pending_doom_.count(entry_hash) == 0);
320 if (!active_entries_.count(entry_hash) &&
321 !entries_pending_doom_.count(entry_hash)) {
322 continue;
325 to_doom_individually_hashes.push_back(entry_hash);
327 (*mass_doom_entry_hashes)[i] = mass_doom_entry_hashes->back();
328 mass_doom_entry_hashes->resize(mass_doom_entry_hashes->size() - 1);
331 net::CompletionCallback barrier_callback =
332 MakeBarrierCompletionCallback(to_doom_individually_hashes.size() + 1,
333 callback);
334 for (std::vector<uint64>::const_iterator
335 it = to_doom_individually_hashes.begin(),
336 end = to_doom_individually_hashes.end(); it != end; ++it) {
337 const int doom_result = DoomEntryFromHash(*it, barrier_callback);
338 DCHECK_EQ(net::ERR_IO_PENDING, doom_result);
339 index_->Remove(*it);
342 for (std::vector<uint64>::const_iterator it = mass_doom_entry_hashes->begin(),
343 end = mass_doom_entry_hashes->end();
344 it != end; ++it) {
345 index_->Remove(*it);
346 OnDoomStart(*it);
349 // Taking this pointer here avoids undefined behaviour from calling
350 // base::Passed before mass_doom_entry_hashes.get().
351 std::vector<uint64>* mass_doom_entry_hashes_ptr =
352 mass_doom_entry_hashes.get();
353 PostTaskAndReplyWithResult(worker_pool_.get(),
354 FROM_HERE,
355 base::Bind(&SimpleSynchronousEntry::DoomEntrySet,
356 mass_doom_entry_hashes_ptr,
357 path_),
358 base::Bind(&SimpleBackendImpl::DoomEntriesComplete,
359 AsWeakPtr(),
360 base::Passed(&mass_doom_entry_hashes),
361 barrier_callback));
364 net::CacheType SimpleBackendImpl::GetCacheType() const {
365 return net::DISK_CACHE;
368 int32 SimpleBackendImpl::GetEntryCount() const {
369 // TODO(pasko): Use directory file count when index is not ready.
370 return index_->GetEntryCount();
373 int SimpleBackendImpl::OpenEntry(const std::string& key,
374 Entry** entry,
375 const CompletionCallback& callback) {
376 const uint64 entry_hash = simple_util::GetEntryHashKey(key);
378 // TODO(gavinp): Factor out this (not quite completely) repetitive code
379 // block from OpenEntry/CreateEntry/DoomEntry.
380 base::hash_map<uint64, std::vector<Closure> >::iterator it =
381 entries_pending_doom_.find(entry_hash);
382 if (it != entries_pending_doom_.end()) {
383 Callback<int(const net::CompletionCallback&)> operation =
384 base::Bind(&SimpleBackendImpl::OpenEntry,
385 base::Unretained(this), key, entry);
386 it->second.push_back(base::Bind(&RunOperationAndCallback,
387 operation, callback));
388 return net::ERR_IO_PENDING;
390 scoped_refptr<SimpleEntryImpl> simple_entry =
391 CreateOrFindActiveEntry(entry_hash, key);
392 CompletionCallback backend_callback =
393 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromKey,
394 AsWeakPtr(),
395 key,
396 entry,
397 simple_entry,
398 callback);
399 return simple_entry->OpenEntry(entry, backend_callback);
402 int SimpleBackendImpl::CreateEntry(const std::string& key,
403 Entry** entry,
404 const CompletionCallback& callback) {
405 DCHECK_LT(0u, key.size());
406 const uint64 entry_hash = simple_util::GetEntryHashKey(key);
408 base::hash_map<uint64, std::vector<Closure> >::iterator it =
409 entries_pending_doom_.find(entry_hash);
410 if (it != entries_pending_doom_.end()) {
411 Callback<int(const net::CompletionCallback&)> operation =
412 base::Bind(&SimpleBackendImpl::CreateEntry,
413 base::Unretained(this), key, entry);
414 it->second.push_back(base::Bind(&RunOperationAndCallback,
415 operation, callback));
416 return net::ERR_IO_PENDING;
418 scoped_refptr<SimpleEntryImpl> simple_entry =
419 CreateOrFindActiveEntry(entry_hash, key);
420 return simple_entry->CreateEntry(entry, callback);
423 int SimpleBackendImpl::DoomEntry(const std::string& key,
424 const net::CompletionCallback& callback) {
425 const uint64 entry_hash = simple_util::GetEntryHashKey(key);
427 base::hash_map<uint64, std::vector<Closure> >::iterator it =
428 entries_pending_doom_.find(entry_hash);
429 if (it != entries_pending_doom_.end()) {
430 Callback<int(const net::CompletionCallback&)> operation =
431 base::Bind(&SimpleBackendImpl::DoomEntry, base::Unretained(this), key);
432 it->second.push_back(base::Bind(&RunOperationAndCallback,
433 operation, callback));
434 return net::ERR_IO_PENDING;
436 scoped_refptr<SimpleEntryImpl> simple_entry =
437 CreateOrFindActiveEntry(entry_hash, key);
438 return simple_entry->DoomEntry(callback);
441 int SimpleBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
442 return DoomEntriesBetween(Time(), Time(), callback);
445 void SimpleBackendImpl::IndexReadyForDoom(Time initial_time,
446 Time end_time,
447 const CompletionCallback& callback,
448 int result) {
449 if (result != net::OK) {
450 callback.Run(result);
451 return;
453 scoped_ptr<std::vector<uint64> > removed_key_hashes(
454 index_->GetEntriesBetween(initial_time, end_time).release());
455 DoomEntries(removed_key_hashes.get(), callback);
458 int SimpleBackendImpl::DoomEntriesBetween(
459 const Time initial_time,
460 const Time end_time,
461 const CompletionCallback& callback) {
462 return index_->ExecuteWhenReady(
463 base::Bind(&SimpleBackendImpl::IndexReadyForDoom, AsWeakPtr(),
464 initial_time, end_time, callback));
467 int SimpleBackendImpl::DoomEntriesSince(
468 const Time initial_time,
469 const CompletionCallback& callback) {
470 return DoomEntriesBetween(initial_time, Time(), callback);
473 class SimpleBackendImpl::SimpleIterator final : public Iterator {
474 public:
475 explicit SimpleIterator(base::WeakPtr<SimpleBackendImpl> backend)
476 : backend_(backend),
477 weak_factory_(this) {
480 // From Backend::Iterator:
481 int OpenNextEntry(Entry** next_entry,
482 const CompletionCallback& callback) override {
483 CompletionCallback open_next_entry_impl =
484 base::Bind(&SimpleIterator::OpenNextEntryImpl,
485 weak_factory_.GetWeakPtr(), next_entry, callback);
486 return backend_->index_->ExecuteWhenReady(open_next_entry_impl);
489 void OpenNextEntryImpl(Entry** next_entry,
490 const CompletionCallback& callback,
491 int index_initialization_error_code) {
492 if (!backend_) {
493 callback.Run(net::ERR_FAILED);
494 return;
496 if (index_initialization_error_code != net::OK) {
497 callback.Run(index_initialization_error_code);
498 return;
500 if (!hashes_to_enumerate_)
501 hashes_to_enumerate_ = backend_->index()->GetAllHashes().Pass();
503 while (!hashes_to_enumerate_->empty()) {
504 uint64 entry_hash = hashes_to_enumerate_->back();
505 hashes_to_enumerate_->pop_back();
506 if (backend_->index()->Has(entry_hash)) {
507 *next_entry = NULL;
508 CompletionCallback continue_iteration = base::Bind(
509 &SimpleIterator::CheckIterationReturnValue,
510 weak_factory_.GetWeakPtr(),
511 next_entry,
512 callback);
513 int error_code_open = backend_->OpenEntryFromHash(entry_hash,
514 next_entry,
515 continue_iteration);
516 if (error_code_open == net::ERR_IO_PENDING)
517 return;
518 if (error_code_open != net::ERR_FAILED) {
519 callback.Run(error_code_open);
520 return;
524 callback.Run(net::ERR_FAILED);
527 void CheckIterationReturnValue(Entry** entry,
528 const CompletionCallback& callback,
529 int error_code) {
530 if (error_code == net::ERR_FAILED) {
531 OpenNextEntry(entry, callback);
532 return;
534 callback.Run(error_code);
537 private:
538 base::WeakPtr<SimpleBackendImpl> backend_;
539 scoped_ptr<std::vector<uint64> > hashes_to_enumerate_;
540 base::WeakPtrFactory<SimpleIterator> weak_factory_;
543 scoped_ptr<Backend::Iterator> SimpleBackendImpl::CreateIterator() {
544 return scoped_ptr<Iterator>(new SimpleIterator(AsWeakPtr()));
547 void SimpleBackendImpl::GetStats(base::StringPairs* stats) {
548 std::pair<std::string, std::string> item;
549 item.first = "Cache type";
550 item.second = "Simple Cache";
551 stats->push_back(item);
554 void SimpleBackendImpl::OnExternalCacheHit(const std::string& key) {
555 index_->UseIfExists(simple_util::GetEntryHashKey(key));
558 void SimpleBackendImpl::InitializeIndex(const CompletionCallback& callback,
559 const DiskStatResult& result) {
560 if (result.net_error == net::OK) {
561 index_->SetMaxSize(result.max_size);
562 index_->Initialize(result.cache_dir_mtime);
564 callback.Run(result.net_error);
567 SimpleBackendImpl::DiskStatResult SimpleBackendImpl::InitCacheStructureOnDisk(
568 const base::FilePath& path,
569 uint64 suggested_max_size) {
570 DiskStatResult result;
571 result.max_size = suggested_max_size;
572 result.net_error = net::OK;
573 if (!FileStructureConsistent(path)) {
574 LOG(ERROR) << "Simple Cache Backend: wrong file structure on disk: "
575 << path.LossyDisplayName();
576 result.net_error = net::ERR_FAILED;
577 } else {
578 bool mtime_result =
579 disk_cache::simple_util::GetMTime(path, &result.cache_dir_mtime);
580 DCHECK(mtime_result);
581 if (!result.max_size) {
582 int64 available = base::SysInfo::AmountOfFreeDiskSpace(path);
583 result.max_size = disk_cache::PreferredCacheSize(available);
585 DCHECK(result.max_size);
587 return result;
590 scoped_refptr<SimpleEntryImpl> SimpleBackendImpl::CreateOrFindActiveEntry(
591 const uint64 entry_hash,
592 const std::string& key) {
593 DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key));
594 std::pair<EntryMap::iterator, bool> insert_result =
595 active_entries_.insert(EntryMap::value_type(entry_hash, NULL));
596 EntryMap::iterator& it = insert_result.first;
597 const bool did_insert = insert_result.second;
598 if (did_insert) {
599 SimpleEntryImpl* entry = it->second =
600 new SimpleEntryImpl(cache_type_, path_, entry_hash,
601 entry_operations_mode_,this, net_log_);
602 entry->SetKey(key);
603 entry->SetActiveEntryProxy(ActiveEntryProxy::Create(entry_hash, this));
605 DCHECK(it->second);
606 // It's possible, but unlikely, that we have an entry hash collision with a
607 // currently active entry.
608 if (key != it->second->key()) {
609 it->second->Doom();
610 DCHECK_EQ(0U, active_entries_.count(entry_hash));
611 return CreateOrFindActiveEntry(entry_hash, key);
613 return make_scoped_refptr(it->second);
616 int SimpleBackendImpl::OpenEntryFromHash(uint64 entry_hash,
617 Entry** entry,
618 const CompletionCallback& callback) {
619 base::hash_map<uint64, std::vector<Closure> >::iterator it =
620 entries_pending_doom_.find(entry_hash);
621 if (it != entries_pending_doom_.end()) {
622 Callback<int(const net::CompletionCallback&)> operation =
623 base::Bind(&SimpleBackendImpl::OpenEntryFromHash,
624 base::Unretained(this), entry_hash, entry);
625 it->second.push_back(base::Bind(&RunOperationAndCallback,
626 operation, callback));
627 return net::ERR_IO_PENDING;
630 EntryMap::iterator has_active = active_entries_.find(entry_hash);
631 if (has_active != active_entries_.end()) {
632 return OpenEntry(has_active->second->key(), entry, callback);
635 scoped_refptr<SimpleEntryImpl> simple_entry = new SimpleEntryImpl(
636 cache_type_, path_, entry_hash, entry_operations_mode_, this, net_log_);
637 CompletionCallback backend_callback =
638 base::Bind(&SimpleBackendImpl::OnEntryOpenedFromHash,
639 AsWeakPtr(), entry_hash, entry, simple_entry, callback);
640 return simple_entry->OpenEntry(entry, backend_callback);
643 int SimpleBackendImpl::DoomEntryFromHash(uint64 entry_hash,
644 const CompletionCallback& callback) {
645 Entry** entry = new Entry*();
646 scoped_ptr<Entry*> scoped_entry(entry);
648 base::hash_map<uint64, std::vector<Closure> >::iterator pending_it =
649 entries_pending_doom_.find(entry_hash);
650 if (pending_it != entries_pending_doom_.end()) {
651 Callback<int(const net::CompletionCallback&)> operation =
652 base::Bind(&SimpleBackendImpl::DoomEntryFromHash,
653 base::Unretained(this), entry_hash);
654 pending_it->second.push_back(base::Bind(&RunOperationAndCallback,
655 operation, callback));
656 return net::ERR_IO_PENDING;
659 EntryMap::iterator active_it = active_entries_.find(entry_hash);
660 if (active_it != active_entries_.end())
661 return active_it->second->DoomEntry(callback);
663 // There's no pending dooms, nor any open entry. We can make a trivial
664 // call to DoomEntries() to delete this entry.
665 std::vector<uint64> entry_hash_vector;
666 entry_hash_vector.push_back(entry_hash);
667 DoomEntries(&entry_hash_vector, callback);
668 return net::ERR_IO_PENDING;
671 void SimpleBackendImpl::OnEntryOpenedFromHash(
672 uint64 hash,
673 Entry** entry,
674 const scoped_refptr<SimpleEntryImpl>& simple_entry,
675 const CompletionCallback& callback,
676 int error_code) {
677 if (error_code != net::OK) {
678 callback.Run(error_code);
679 return;
681 DCHECK(*entry);
682 std::pair<EntryMap::iterator, bool> insert_result =
683 active_entries_.insert(EntryMap::value_type(hash, simple_entry.get()));
684 EntryMap::iterator& it = insert_result.first;
685 const bool did_insert = insert_result.second;
686 if (did_insert) {
687 // There was no active entry corresponding to this hash. We've already put
688 // the entry opened from hash in the |active_entries_|. We now provide the
689 // proxy object to the entry.
690 it->second->SetActiveEntryProxy(ActiveEntryProxy::Create(hash, this));
691 callback.Run(net::OK);
692 } else {
693 // The entry was made active while we waiting for the open from hash to
694 // finish. The entry created from hash needs to be closed, and the one
695 // in |active_entries_| can be returned to the caller.
696 simple_entry->Close();
697 it->second->OpenEntry(entry, callback);
701 void SimpleBackendImpl::OnEntryOpenedFromKey(
702 const std::string key,
703 Entry** entry,
704 const scoped_refptr<SimpleEntryImpl>& simple_entry,
705 const CompletionCallback& callback,
706 int error_code) {
707 int final_code = error_code;
708 if (final_code == net::OK) {
709 bool key_matches = key.compare(simple_entry->key()) == 0;
710 if (!key_matches) {
711 // TODO(clamy): Add a unit test to check this code path.
712 DLOG(WARNING) << "Key mismatch on open.";
713 simple_entry->Doom();
714 simple_entry->Close();
715 final_code = net::ERR_FAILED;
716 } else {
717 DCHECK_EQ(simple_entry->entry_hash(), simple_util::GetEntryHashKey(key));
719 SIMPLE_CACHE_UMA(BOOLEAN, "KeyMatchedOnOpen", cache_type_, key_matches);
721 callback.Run(final_code);
724 void SimpleBackendImpl::DoomEntriesComplete(
725 scoped_ptr<std::vector<uint64> > entry_hashes,
726 const net::CompletionCallback& callback,
727 int result) {
728 std::for_each(
729 entry_hashes->begin(), entry_hashes->end(),
730 std::bind1st(std::mem_fun(&SimpleBackendImpl::OnDoomComplete),
731 this));
732 callback.Run(result);
735 // static
736 void SimpleBackendImpl::FlushWorkerPoolForTesting() {
737 // We only need to do this if we there is an active task runner.
738 if (base::ThreadTaskRunnerHandle::IsSet())
739 g_sequenced_worker_pool.Get().FlushForTesting();
742 } // namespace disk_cache