Roll src/third_party/WebKit d9c6159:8139f33 (svn 201974:201975)
[chromium-blink-merge.git] / net / disk_cache / simple / simple_entry_impl.cc
blob391079276067eb4605be4dc29b6b72d0d401d6a3
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_entry_impl.h"
7 #include <algorithm>
8 #include <cstring>
9 #include <vector>
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/callback.h"
14 #include "base/location.h"
15 #include "base/logging.h"
16 #include "base/single_thread_task_runner.h"
17 #include "base/task_runner.h"
18 #include "base/task_runner_util.h"
19 #include "base/thread_task_runner_handle.h"
20 #include "base/time/time.h"
21 #include "net/base/io_buffer.h"
22 #include "net/base/net_errors.h"
23 #include "net/disk_cache/net_log_parameters.h"
24 #include "net/disk_cache/simple/simple_backend_impl.h"
25 #include "net/disk_cache/simple/simple_histogram_macros.h"
26 #include "net/disk_cache/simple/simple_index.h"
27 #include "net/disk_cache/simple/simple_net_log_parameters.h"
28 #include "net/disk_cache/simple/simple_synchronous_entry.h"
29 #include "net/disk_cache/simple/simple_util.h"
30 #include "third_party/zlib/zlib.h"
32 namespace disk_cache {
33 namespace {
35 // An entry can store sparse data taking up to 1 / kMaxSparseDataSizeDivisor of
36 // the cache.
37 const int64 kMaxSparseDataSizeDivisor = 10;
39 // Used in histograms, please only add entries at the end.
40 enum ReadResult {
41 READ_RESULT_SUCCESS = 0,
42 READ_RESULT_INVALID_ARGUMENT = 1,
43 READ_RESULT_NONBLOCK_EMPTY_RETURN = 2,
44 READ_RESULT_BAD_STATE = 3,
45 READ_RESULT_FAST_EMPTY_RETURN = 4,
46 READ_RESULT_SYNC_READ_FAILURE = 5,
47 READ_RESULT_SYNC_CHECKSUM_FAILURE = 6,
48 READ_RESULT_MAX = 7,
51 // Used in histograms, please only add entries at the end.
52 enum WriteResult {
53 WRITE_RESULT_SUCCESS = 0,
54 WRITE_RESULT_INVALID_ARGUMENT = 1,
55 WRITE_RESULT_OVER_MAX_SIZE = 2,
56 WRITE_RESULT_BAD_STATE = 3,
57 WRITE_RESULT_SYNC_WRITE_FAILURE = 4,
58 WRITE_RESULT_FAST_EMPTY_RETURN = 5,
59 WRITE_RESULT_MAX = 6,
62 // Used in histograms, please only add entries at the end.
63 enum HeaderSizeChange {
64 HEADER_SIZE_CHANGE_INITIAL,
65 HEADER_SIZE_CHANGE_SAME,
66 HEADER_SIZE_CHANGE_INCREASE,
67 HEADER_SIZE_CHANGE_DECREASE,
68 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
69 HEADER_SIZE_CHANGE_MAX
72 void RecordReadResult(net::CacheType cache_type, ReadResult result) {
73 SIMPLE_CACHE_UMA(ENUMERATION,
74 "ReadResult", cache_type, result, READ_RESULT_MAX);
77 void RecordWriteResult(net::CacheType cache_type, WriteResult result) {
78 SIMPLE_CACHE_UMA(ENUMERATION,
79 "WriteResult2", cache_type, result, WRITE_RESULT_MAX);
82 // TODO(ttuttle): Consider removing this once we have a good handle on header
83 // size changes.
84 void RecordHeaderSizeChange(net::CacheType cache_type,
85 int old_size, int new_size) {
86 HeaderSizeChange size_change;
88 SIMPLE_CACHE_UMA(COUNTS_10000, "HeaderSize", cache_type, new_size);
90 if (old_size == 0) {
91 size_change = HEADER_SIZE_CHANGE_INITIAL;
92 } else if (new_size == old_size) {
93 size_change = HEADER_SIZE_CHANGE_SAME;
94 } else if (new_size > old_size) {
95 int delta = new_size - old_size;
96 SIMPLE_CACHE_UMA(COUNTS_10000,
97 "HeaderSizeIncreaseAbsolute", cache_type, delta);
98 SIMPLE_CACHE_UMA(PERCENTAGE,
99 "HeaderSizeIncreasePercentage", cache_type,
100 delta * 100 / old_size);
101 size_change = HEADER_SIZE_CHANGE_INCREASE;
102 } else { // new_size < old_size
103 int delta = old_size - new_size;
104 SIMPLE_CACHE_UMA(COUNTS_10000,
105 "HeaderSizeDecreaseAbsolute", cache_type, delta);
106 SIMPLE_CACHE_UMA(PERCENTAGE,
107 "HeaderSizeDecreasePercentage", cache_type,
108 delta * 100 / old_size);
109 size_change = HEADER_SIZE_CHANGE_DECREASE;
112 SIMPLE_CACHE_UMA(ENUMERATION,
113 "HeaderSizeChange", cache_type,
114 size_change, HEADER_SIZE_CHANGE_MAX);
117 void RecordUnexpectedStream0Write(net::CacheType cache_type) {
118 SIMPLE_CACHE_UMA(ENUMERATION,
119 "HeaderSizeChange", cache_type,
120 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE, HEADER_SIZE_CHANGE_MAX);
123 int g_open_entry_count = 0;
125 void AdjustOpenEntryCountBy(net::CacheType cache_type, int offset) {
126 g_open_entry_count += offset;
127 SIMPLE_CACHE_UMA(COUNTS_10000,
128 "GlobalOpenEntryCount", cache_type, g_open_entry_count);
131 void InvokeCallbackIfBackendIsAlive(
132 const base::WeakPtr<SimpleBackendImpl>& backend,
133 const net::CompletionCallback& completion_callback,
134 int result) {
135 DCHECK(!completion_callback.is_null());
136 if (!backend.get())
137 return;
138 completion_callback.Run(result);
141 } // namespace
143 using base::Closure;
144 using base::FilePath;
145 using base::Time;
146 using base::TaskRunner;
148 // A helper class to insure that RunNextOperationIfNeeded() is called when
149 // exiting the current stack frame.
150 class SimpleEntryImpl::ScopedOperationRunner {
151 public:
152 explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
155 ~ScopedOperationRunner() {
156 entry_->RunNextOperationIfNeeded();
159 private:
160 SimpleEntryImpl* const entry_;
163 SimpleEntryImpl::ActiveEntryProxy::~ActiveEntryProxy() {}
165 SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type,
166 const FilePath& path,
167 const uint64 entry_hash,
168 OperationsMode operations_mode,
169 SimpleBackendImpl* backend,
170 net::NetLog* net_log)
171 : backend_(backend->AsWeakPtr()),
172 cache_type_(cache_type),
173 worker_pool_(backend->worker_pool()),
174 path_(path),
175 entry_hash_(entry_hash),
176 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
177 last_used_(Time::Now()),
178 last_modified_(last_used_),
179 sparse_data_size_(0),
180 open_count_(0),
181 doomed_(false),
182 state_(STATE_UNINITIALIZED),
183 synchronous_entry_(NULL),
184 net_log_(net::BoundNetLog::Make(
185 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)),
186 stream_0_data_(new net::GrowableIOBuffer()) {
187 static_assert(arraysize(data_size_) == arraysize(crc32s_end_offset_),
188 "arrays should be the same size");
189 static_assert(arraysize(data_size_) == arraysize(crc32s_),
190 "arrays should be the same size");
191 static_assert(arraysize(data_size_) == arraysize(have_written_),
192 "arrays should be the same size");
193 static_assert(arraysize(data_size_) == arraysize(crc_check_state_),
194 "arrays should be the same size");
195 MakeUninitialized();
196 net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY,
197 CreateNetLogSimpleEntryConstructionCallback(this));
200 void SimpleEntryImpl::SetActiveEntryProxy(
201 scoped_ptr<ActiveEntryProxy> active_entry_proxy) {
202 DCHECK(!active_entry_proxy_);
203 active_entry_proxy_.reset(active_entry_proxy.release());
206 int SimpleEntryImpl::OpenEntry(Entry** out_entry,
207 const CompletionCallback& callback) {
208 DCHECK(backend_.get());
210 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL);
212 bool have_index = backend_->index()->initialized();
213 // This enumeration is used in histograms, add entries only at end.
214 enum OpenEntryIndexEnum {
215 INDEX_NOEXIST = 0,
216 INDEX_MISS = 1,
217 INDEX_HIT = 2,
218 INDEX_MAX = 3,
220 OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
221 if (have_index) {
222 if (backend_->index()->Has(entry_hash_))
223 open_entry_index_enum = INDEX_HIT;
224 else
225 open_entry_index_enum = INDEX_MISS;
227 SIMPLE_CACHE_UMA(ENUMERATION,
228 "OpenEntryIndexState", cache_type_,
229 open_entry_index_enum, INDEX_MAX);
231 // If entry is not known to the index, initiate fast failover to the network.
232 if (open_entry_index_enum == INDEX_MISS) {
233 net_log_.AddEventWithNetErrorCode(
234 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
235 net::ERR_FAILED);
236 return net::ERR_FAILED;
239 pending_operations_.push(SimpleEntryOperation::OpenOperation(
240 this, have_index, callback, out_entry));
241 RunNextOperationIfNeeded();
242 return net::ERR_IO_PENDING;
245 int SimpleEntryImpl::CreateEntry(Entry** out_entry,
246 const CompletionCallback& callback) {
247 DCHECK(backend_.get());
248 DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
250 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL);
252 bool have_index = backend_->index()->initialized();
253 int ret_value = net::ERR_FAILED;
254 if (use_optimistic_operations_ &&
255 state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
256 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
258 ReturnEntryToCaller(out_entry);
259 pending_operations_.push(SimpleEntryOperation::CreateOperation(
260 this, have_index, CompletionCallback(), static_cast<Entry**>(NULL)));
261 ret_value = net::OK;
262 } else {
263 pending_operations_.push(SimpleEntryOperation::CreateOperation(
264 this, have_index, callback, out_entry));
265 ret_value = net::ERR_IO_PENDING;
268 // We insert the entry in the index before creating the entry files in the
269 // SimpleSynchronousEntry, because this way the worst scenario is when we
270 // have the entry in the index but we don't have the created files yet, this
271 // way we never leak files. CreationOperationComplete will remove the entry
272 // from the index if the creation fails.
273 backend_->index()->Insert(entry_hash_);
275 RunNextOperationIfNeeded();
276 return ret_value;
279 int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
280 if (doomed_)
281 return net::OK;
282 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_CALL);
283 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_BEGIN);
285 MarkAsDoomed();
286 if (backend_.get())
287 backend_->OnDoomStart(entry_hash_);
288 pending_operations_.push(SimpleEntryOperation::DoomOperation(this, callback));
289 RunNextOperationIfNeeded();
290 return net::ERR_IO_PENDING;
293 void SimpleEntryImpl::SetKey(const std::string& key) {
294 key_ = key;
295 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY,
296 net::NetLog::StringCallback("key", &key));
299 void SimpleEntryImpl::Doom() {
300 DoomEntry(CompletionCallback());
303 void SimpleEntryImpl::Close() {
304 DCHECK(io_thread_checker_.CalledOnValidThread());
305 DCHECK_LT(0, open_count_);
307 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_CALL);
309 if (--open_count_ > 0) {
310 DCHECK(!HasOneRef());
311 Release(); // Balanced in ReturnEntryToCaller().
312 return;
315 pending_operations_.push(SimpleEntryOperation::CloseOperation(this));
316 DCHECK(!HasOneRef());
317 Release(); // Balanced in ReturnEntryToCaller().
318 RunNextOperationIfNeeded();
321 std::string SimpleEntryImpl::GetKey() const {
322 DCHECK(io_thread_checker_.CalledOnValidThread());
323 return key_;
326 Time SimpleEntryImpl::GetLastUsed() const {
327 DCHECK(io_thread_checker_.CalledOnValidThread());
328 return last_used_;
331 Time SimpleEntryImpl::GetLastModified() const {
332 DCHECK(io_thread_checker_.CalledOnValidThread());
333 return last_modified_;
336 int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
337 DCHECK(io_thread_checker_.CalledOnValidThread());
338 DCHECK_LE(0, data_size_[stream_index]);
339 return data_size_[stream_index];
342 int SimpleEntryImpl::ReadData(int stream_index,
343 int offset,
344 net::IOBuffer* buf,
345 int buf_len,
346 const CompletionCallback& callback) {
347 DCHECK(io_thread_checker_.CalledOnValidThread());
349 if (net_log_.IsCapturing()) {
350 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL,
351 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
352 false));
355 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
356 buf_len < 0) {
357 if (net_log_.IsCapturing()) {
358 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
359 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
362 RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT);
363 return net::ERR_INVALID_ARGUMENT;
365 if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) ||
366 offset < 0 || !buf_len)) {
367 if (net_log_.IsCapturing()) {
368 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
369 CreateNetLogReadWriteCompleteCallback(0));
372 RecordReadResult(cache_type_, READ_RESULT_NONBLOCK_EMPTY_RETURN);
373 return 0;
376 // TODO(clamy): return immediatly when reading from stream 0.
378 // TODO(felipeg): Optimization: Add support for truly parallel read
379 // operations.
380 bool alone_in_queue =
381 pending_operations_.size() == 0 && state_ == STATE_READY;
382 pending_operations_.push(SimpleEntryOperation::ReadOperation(
383 this, stream_index, offset, buf_len, buf, callback, alone_in_queue));
384 RunNextOperationIfNeeded();
385 return net::ERR_IO_PENDING;
388 int SimpleEntryImpl::WriteData(int stream_index,
389 int offset,
390 net::IOBuffer* buf,
391 int buf_len,
392 const CompletionCallback& callback,
393 bool truncate) {
394 DCHECK(io_thread_checker_.CalledOnValidThread());
396 if (net_log_.IsCapturing()) {
397 net_log_.AddEvent(
398 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL,
399 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
400 truncate));
403 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
404 offset < 0 || buf_len < 0) {
405 if (net_log_.IsCapturing()) {
406 net_log_.AddEvent(
407 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
408 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
410 RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT);
411 return net::ERR_INVALID_ARGUMENT;
413 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
414 if (net_log_.IsCapturing()) {
415 net_log_.AddEvent(
416 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
417 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
419 RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE);
420 return net::ERR_FAILED;
422 ScopedOperationRunner operation_runner(this);
424 // Stream 0 data is kept in memory, so can be written immediatly if there are
425 // no IO operations pending.
426 if (stream_index == 0 && state_ == STATE_READY &&
427 pending_operations_.size() == 0)
428 return SetStream0Data(buf, offset, buf_len, truncate);
430 // We can only do optimistic Write if there is no pending operations, so
431 // that we are sure that the next call to RunNextOperationIfNeeded will
432 // actually run the write operation that sets the stream size. It also
433 // prevents from previous possibly-conflicting writes that could be stacked
434 // in the |pending_operations_|. We could optimize this for when we have
435 // only read operations enqueued.
436 const bool optimistic =
437 (use_optimistic_operations_ && state_ == STATE_READY &&
438 pending_operations_.size() == 0);
439 CompletionCallback op_callback;
440 scoped_refptr<net::IOBuffer> op_buf;
441 int ret_value = net::ERR_FAILED;
442 if (!optimistic) {
443 op_buf = buf;
444 op_callback = callback;
445 ret_value = net::ERR_IO_PENDING;
446 } else {
447 // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
448 // here to avoid paying the price of the RefCountedThreadSafe atomic
449 // operations.
450 if (buf) {
451 op_buf = new IOBuffer(buf_len);
452 memcpy(op_buf->data(), buf->data(), buf_len);
454 op_callback = CompletionCallback();
455 ret_value = buf_len;
456 if (net_log_.IsCapturing()) {
457 net_log_.AddEvent(
458 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC,
459 CreateNetLogReadWriteCompleteCallback(buf_len));
463 pending_operations_.push(SimpleEntryOperation::WriteOperation(this,
464 stream_index,
465 offset,
466 buf_len,
467 op_buf.get(),
468 truncate,
469 optimistic,
470 op_callback));
471 return ret_value;
474 int SimpleEntryImpl::ReadSparseData(int64 offset,
475 net::IOBuffer* buf,
476 int buf_len,
477 const CompletionCallback& callback) {
478 DCHECK(io_thread_checker_.CalledOnValidThread());
480 ScopedOperationRunner operation_runner(this);
481 pending_operations_.push(SimpleEntryOperation::ReadSparseOperation(
482 this, offset, buf_len, buf, callback));
483 return net::ERR_IO_PENDING;
486 int SimpleEntryImpl::WriteSparseData(int64 offset,
487 net::IOBuffer* buf,
488 int buf_len,
489 const CompletionCallback& callback) {
490 DCHECK(io_thread_checker_.CalledOnValidThread());
492 ScopedOperationRunner operation_runner(this);
493 pending_operations_.push(SimpleEntryOperation::WriteSparseOperation(
494 this, offset, buf_len, buf, callback));
495 return net::ERR_IO_PENDING;
498 int SimpleEntryImpl::GetAvailableRange(int64 offset,
499 int len,
500 int64* start,
501 const CompletionCallback& callback) {
502 DCHECK(io_thread_checker_.CalledOnValidThread());
504 ScopedOperationRunner operation_runner(this);
505 pending_operations_.push(SimpleEntryOperation::GetAvailableRangeOperation(
506 this, offset, len, start, callback));
507 return net::ERR_IO_PENDING;
510 bool SimpleEntryImpl::CouldBeSparse() const {
511 DCHECK(io_thread_checker_.CalledOnValidThread());
512 // TODO(ttuttle): Actually check.
513 return true;
516 void SimpleEntryImpl::CancelSparseIO() {
517 DCHECK(io_thread_checker_.CalledOnValidThread());
518 // The Simple Cache does not return distinct objects for the same non-doomed
519 // entry, so there's no need to coordinate which object is performing sparse
520 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
523 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
524 DCHECK(io_thread_checker_.CalledOnValidThread());
525 // The simple Cache does not return distinct objects for the same non-doomed
526 // entry, so there's no need to coordinate which object is performing sparse
527 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
528 return net::OK;
531 SimpleEntryImpl::~SimpleEntryImpl() {
532 DCHECK(io_thread_checker_.CalledOnValidThread());
533 DCHECK_EQ(0U, pending_operations_.size());
534 DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_FAILURE);
535 DCHECK(!synchronous_entry_);
536 net_log_.EndEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY);
539 void SimpleEntryImpl::PostClientCallback(const CompletionCallback& callback,
540 int result) {
541 if (callback.is_null())
542 return;
543 // Note that the callback is posted rather than directly invoked to avoid
544 // reentrancy issues.
545 base::ThreadTaskRunnerHandle::Get()->PostTask(
546 FROM_HERE,
547 base::Bind(&InvokeCallbackIfBackendIsAlive, backend_, callback, result));
550 void SimpleEntryImpl::MakeUninitialized() {
551 state_ = STATE_UNINITIALIZED;
552 std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
553 std::memset(crc32s_, 0, sizeof(crc32s_));
554 std::memset(have_written_, 0, sizeof(have_written_));
555 std::memset(data_size_, 0, sizeof(data_size_));
556 for (size_t i = 0; i < arraysize(crc_check_state_); ++i) {
557 crc_check_state_[i] = CRC_CHECK_NEVER_READ_AT_ALL;
561 void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
562 DCHECK(out_entry);
563 ++open_count_;
564 AddRef(); // Balanced in Close()
565 if (!backend_.get()) {
566 // This method can be called when an asynchronous operation completed.
567 // If the backend no longer exists, the callback won't be invoked, and so we
568 // must close ourselves to avoid leaking. As well, there's no guarantee the
569 // client-provided pointer (|out_entry|) hasn't been freed, and no point
570 // dereferencing it, either.
571 Close();
572 return;
574 *out_entry = this;
577 void SimpleEntryImpl::MarkAsDoomed() {
578 doomed_ = true;
579 if (!backend_.get())
580 return;
581 backend_->index()->Remove(entry_hash_);
582 active_entry_proxy_.reset();
585 void SimpleEntryImpl::RunNextOperationIfNeeded() {
586 DCHECK(io_thread_checker_.CalledOnValidThread());
587 SIMPLE_CACHE_UMA(CUSTOM_COUNTS,
588 "EntryOperationsPending", cache_type_,
589 pending_operations_.size(), 0, 100, 20);
590 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
591 scoped_ptr<SimpleEntryOperation> operation(
592 new SimpleEntryOperation(pending_operations_.front()));
593 pending_operations_.pop();
594 switch (operation->type()) {
595 case SimpleEntryOperation::TYPE_OPEN:
596 OpenEntryInternal(operation->have_index(),
597 operation->callback(),
598 operation->out_entry());
599 break;
600 case SimpleEntryOperation::TYPE_CREATE:
601 CreateEntryInternal(operation->have_index(),
602 operation->callback(),
603 operation->out_entry());
604 break;
605 case SimpleEntryOperation::TYPE_CLOSE:
606 CloseInternal();
607 break;
608 case SimpleEntryOperation::TYPE_READ:
609 RecordReadIsParallelizable(*operation);
610 ReadDataInternal(operation->index(),
611 operation->offset(),
612 operation->buf(),
613 operation->length(),
614 operation->callback());
615 break;
616 case SimpleEntryOperation::TYPE_WRITE:
617 RecordWriteDependencyType(*operation);
618 WriteDataInternal(operation->index(),
619 operation->offset(),
620 operation->buf(),
621 operation->length(),
622 operation->callback(),
623 operation->truncate());
624 break;
625 case SimpleEntryOperation::TYPE_READ_SPARSE:
626 ReadSparseDataInternal(operation->sparse_offset(),
627 operation->buf(),
628 operation->length(),
629 operation->callback());
630 break;
631 case SimpleEntryOperation::TYPE_WRITE_SPARSE:
632 WriteSparseDataInternal(operation->sparse_offset(),
633 operation->buf(),
634 operation->length(),
635 operation->callback());
636 break;
637 case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE:
638 GetAvailableRangeInternal(operation->sparse_offset(),
639 operation->length(),
640 operation->out_start(),
641 operation->callback());
642 break;
643 case SimpleEntryOperation::TYPE_DOOM:
644 DoomEntryInternal(operation->callback());
645 break;
646 default:
647 NOTREACHED();
649 // The operation is kept for histograms. Makes sure it does not leak
650 // resources.
651 executing_operation_.swap(operation);
652 executing_operation_->ReleaseReferences();
653 // |this| may have been deleted.
657 void SimpleEntryImpl::OpenEntryInternal(bool have_index,
658 const CompletionCallback& callback,
659 Entry** out_entry) {
660 ScopedOperationRunner operation_runner(this);
662 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN);
664 if (state_ == STATE_READY) {
665 ReturnEntryToCaller(out_entry);
666 PostClientCallback(callback, net::OK);
667 net_log_.AddEvent(
668 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
669 CreateNetLogSimpleEntryCreationCallback(this, net::OK));
670 return;
672 if (state_ == STATE_FAILURE) {
673 PostClientCallback(callback, net::ERR_FAILED);
674 net_log_.AddEvent(
675 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
676 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
677 return;
680 DCHECK_EQ(STATE_UNINITIALIZED, state_);
681 DCHECK(!synchronous_entry_);
682 state_ = STATE_IO_PENDING;
683 const base::TimeTicks start_time = base::TimeTicks::Now();
684 scoped_ptr<SimpleEntryCreationResults> results(
685 new SimpleEntryCreationResults(
686 SimpleEntryStat(last_used_, last_modified_, data_size_,
687 sparse_data_size_)));
688 Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry,
689 cache_type_,
690 path_,
691 entry_hash_,
692 have_index,
693 results.get());
694 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
695 this,
696 callback,
697 start_time,
698 base::Passed(&results),
699 out_entry,
700 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END);
701 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
704 void SimpleEntryImpl::CreateEntryInternal(bool have_index,
705 const CompletionCallback& callback,
706 Entry** out_entry) {
707 ScopedOperationRunner operation_runner(this);
709 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_BEGIN);
711 if (state_ != STATE_UNINITIALIZED) {
712 // There is already an active normal entry.
713 net_log_.AddEvent(
714 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END,
715 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
716 PostClientCallback(callback, net::ERR_FAILED);
717 return;
719 DCHECK_EQ(STATE_UNINITIALIZED, state_);
720 DCHECK(!synchronous_entry_);
722 state_ = STATE_IO_PENDING;
724 // Since we don't know the correct values for |last_used_| and
725 // |last_modified_| yet, we make this approximation.
726 last_used_ = last_modified_ = base::Time::Now();
728 // If creation succeeds, we should mark all streams to be saved on close.
729 for (int i = 0; i < kSimpleEntryStreamCount; ++i)
730 have_written_[i] = true;
732 const base::TimeTicks start_time = base::TimeTicks::Now();
733 scoped_ptr<SimpleEntryCreationResults> results(
734 new SimpleEntryCreationResults(
735 SimpleEntryStat(last_used_, last_modified_, data_size_,
736 sparse_data_size_)));
737 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
738 cache_type_,
739 path_,
740 key_,
741 entry_hash_,
742 have_index,
743 results.get());
744 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
745 this,
746 callback,
747 start_time,
748 base::Passed(&results),
749 out_entry,
750 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END);
751 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
754 void SimpleEntryImpl::CloseInternal() {
755 DCHECK(io_thread_checker_.CalledOnValidThread());
756 typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
757 scoped_ptr<std::vector<CRCRecord> >
758 crc32s_to_write(new std::vector<CRCRecord>());
760 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
762 if (state_ == STATE_READY) {
763 DCHECK(synchronous_entry_);
764 state_ = STATE_IO_PENDING;
765 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
766 if (have_written_[i]) {
767 if (GetDataSize(i) == crc32s_end_offset_[i]) {
768 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
769 crc32s_to_write->push_back(CRCRecord(i, true, crc));
770 } else {
771 crc32s_to_write->push_back(CRCRecord(i, false, 0));
775 } else {
776 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
779 if (synchronous_entry_) {
780 Closure task =
781 base::Bind(&SimpleSynchronousEntry::Close,
782 base::Unretained(synchronous_entry_),
783 SimpleEntryStat(last_used_, last_modified_, data_size_,
784 sparse_data_size_),
785 base::Passed(&crc32s_to_write),
786 stream_0_data_);
787 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
788 synchronous_entry_ = NULL;
789 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
791 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
792 if (!have_written_[i]) {
793 SIMPLE_CACHE_UMA(ENUMERATION,
794 "CheckCRCResult", cache_type_,
795 crc_check_state_[i], CRC_CHECK_MAX);
798 } else {
799 CloseOperationComplete();
803 void SimpleEntryImpl::ReadDataInternal(int stream_index,
804 int offset,
805 net::IOBuffer* buf,
806 int buf_len,
807 const CompletionCallback& callback) {
808 DCHECK(io_thread_checker_.CalledOnValidThread());
809 ScopedOperationRunner operation_runner(this);
811 if (net_log_.IsCapturing()) {
812 net_log_.AddEvent(
813 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN,
814 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
815 false));
818 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
819 if (!callback.is_null()) {
820 RecordReadResult(cache_type_, READ_RESULT_BAD_STATE);
821 // Note that the API states that client-provided callbacks for entry-level
822 // (i.e. non-backend) operations (e.g. read, write) are invoked even if
823 // the backend was already destroyed.
824 base::ThreadTaskRunnerHandle::Get()->PostTask(
825 FROM_HERE, base::Bind(callback, net::ERR_FAILED));
827 if (net_log_.IsCapturing()) {
828 net_log_.AddEvent(
829 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
830 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
832 return;
834 DCHECK_EQ(STATE_READY, state_);
835 if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
836 RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN);
837 // If there is nothing to read, we bail out before setting state_ to
838 // STATE_IO_PENDING.
839 if (!callback.is_null())
840 base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
841 base::Bind(callback, 0));
842 return;
845 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
847 // Since stream 0 data is kept in memory, it is read immediately.
848 if (stream_index == 0) {
849 int ret_value = ReadStream0Data(buf, offset, buf_len);
850 if (!callback.is_null()) {
851 base::ThreadTaskRunnerHandle::Get()->PostTask(
852 FROM_HERE, base::Bind(callback, ret_value));
854 return;
857 state_ = STATE_IO_PENDING;
858 if (!doomed_ && backend_.get())
859 backend_->index()->UseIfExists(entry_hash_);
861 scoped_ptr<uint32> read_crc32(new uint32());
862 scoped_ptr<int> result(new int());
863 scoped_ptr<SimpleEntryStat> entry_stat(
864 new SimpleEntryStat(last_used_, last_modified_, data_size_,
865 sparse_data_size_));
866 Closure task = base::Bind(
867 &SimpleSynchronousEntry::ReadData,
868 base::Unretained(synchronous_entry_),
869 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len),
870 make_scoped_refptr(buf),
871 read_crc32.get(),
872 entry_stat.get(),
873 result.get());
874 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete,
875 this,
876 stream_index,
877 offset,
878 callback,
879 base::Passed(&read_crc32),
880 base::Passed(&entry_stat),
881 base::Passed(&result));
882 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
885 void SimpleEntryImpl::WriteDataInternal(int stream_index,
886 int offset,
887 net::IOBuffer* buf,
888 int buf_len,
889 const CompletionCallback& callback,
890 bool truncate) {
891 DCHECK(io_thread_checker_.CalledOnValidThread());
892 ScopedOperationRunner operation_runner(this);
894 if (net_log_.IsCapturing()) {
895 net_log_.AddEvent(
896 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN,
897 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
898 truncate));
901 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
902 RecordWriteResult(cache_type_, WRITE_RESULT_BAD_STATE);
903 if (net_log_.IsCapturing()) {
904 net_log_.AddEvent(
905 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
906 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
908 if (!callback.is_null()) {
909 base::ThreadTaskRunnerHandle::Get()->PostTask(
910 FROM_HERE, base::Bind(callback, net::ERR_FAILED));
912 // |this| may be destroyed after return here.
913 return;
916 DCHECK_EQ(STATE_READY, state_);
918 // Since stream 0 data is kept in memory, it will be written immediatly.
919 if (stream_index == 0) {
920 int ret_value = SetStream0Data(buf, offset, buf_len, truncate);
921 if (!callback.is_null()) {
922 base::ThreadTaskRunnerHandle::Get()->PostTask(
923 FROM_HERE, base::Bind(callback, ret_value));
925 return;
928 // Ignore zero-length writes that do not change the file size.
929 if (buf_len == 0) {
930 int32 data_size = data_size_[stream_index];
931 if (truncate ? (offset == data_size) : (offset <= data_size)) {
932 RecordWriteResult(cache_type_, WRITE_RESULT_FAST_EMPTY_RETURN);
933 if (!callback.is_null()) {
934 base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
935 base::Bind(callback, 0));
937 return;
940 state_ = STATE_IO_PENDING;
941 if (!doomed_ && backend_.get())
942 backend_->index()->UseIfExists(entry_hash_);
944 AdvanceCrc(buf, offset, buf_len, stream_index);
946 // |entry_stat| needs to be initialized before modifying |data_size_|.
947 scoped_ptr<SimpleEntryStat> entry_stat(
948 new SimpleEntryStat(last_used_, last_modified_, data_size_,
949 sparse_data_size_));
950 if (truncate) {
951 data_size_[stream_index] = offset + buf_len;
952 } else {
953 data_size_[stream_index] = std::max(offset + buf_len,
954 GetDataSize(stream_index));
957 // Since we don't know the correct values for |last_used_| and
958 // |last_modified_| yet, we make this approximation.
959 last_used_ = last_modified_ = base::Time::Now();
961 have_written_[stream_index] = true;
962 // Writing on stream 1 affects the placement of stream 0 in the file, the EOF
963 // record will have to be rewritten.
964 if (stream_index == 1)
965 have_written_[0] = true;
967 scoped_ptr<int> result(new int());
968 Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
969 base::Unretained(synchronous_entry_),
970 SimpleSynchronousEntry::EntryOperationData(
971 stream_index, offset, buf_len, truncate,
972 doomed_),
973 make_scoped_refptr(buf),
974 entry_stat.get(),
975 result.get());
976 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete,
977 this,
978 stream_index,
979 callback,
980 base::Passed(&entry_stat),
981 base::Passed(&result));
982 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
985 void SimpleEntryImpl::ReadSparseDataInternal(
986 int64 sparse_offset,
987 net::IOBuffer* buf,
988 int buf_len,
989 const CompletionCallback& callback) {
990 DCHECK(io_thread_checker_.CalledOnValidThread());
991 ScopedOperationRunner operation_runner(this);
993 DCHECK_EQ(STATE_READY, state_);
994 state_ = STATE_IO_PENDING;
996 scoped_ptr<int> result(new int());
997 scoped_ptr<base::Time> last_used(new base::Time());
998 Closure task = base::Bind(&SimpleSynchronousEntry::ReadSparseData,
999 base::Unretained(synchronous_entry_),
1000 SimpleSynchronousEntry::EntryOperationData(
1001 sparse_offset, buf_len),
1002 make_scoped_refptr(buf),
1003 last_used.get(),
1004 result.get());
1005 Closure reply = base::Bind(&SimpleEntryImpl::ReadSparseOperationComplete,
1006 this,
1007 callback,
1008 base::Passed(&last_used),
1009 base::Passed(&result));
1010 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1013 void SimpleEntryImpl::WriteSparseDataInternal(
1014 int64 sparse_offset,
1015 net::IOBuffer* buf,
1016 int buf_len,
1017 const CompletionCallback& callback) {
1018 DCHECK(io_thread_checker_.CalledOnValidThread());
1019 ScopedOperationRunner operation_runner(this);
1021 DCHECK_EQ(STATE_READY, state_);
1022 state_ = STATE_IO_PENDING;
1024 uint64 max_sparse_data_size = kint64max;
1025 if (backend_.get()) {
1026 uint64 max_cache_size = backend_->index()->max_size();
1027 max_sparse_data_size = max_cache_size / kMaxSparseDataSizeDivisor;
1030 scoped_ptr<SimpleEntryStat> entry_stat(
1031 new SimpleEntryStat(last_used_, last_modified_, data_size_,
1032 sparse_data_size_));
1034 last_used_ = last_modified_ = base::Time::Now();
1036 scoped_ptr<int> result(new int());
1037 Closure task = base::Bind(&SimpleSynchronousEntry::WriteSparseData,
1038 base::Unretained(synchronous_entry_),
1039 SimpleSynchronousEntry::EntryOperationData(
1040 sparse_offset, buf_len),
1041 make_scoped_refptr(buf),
1042 max_sparse_data_size,
1043 entry_stat.get(),
1044 result.get());
1045 Closure reply = base::Bind(&SimpleEntryImpl::WriteSparseOperationComplete,
1046 this,
1047 callback,
1048 base::Passed(&entry_stat),
1049 base::Passed(&result));
1050 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1053 void SimpleEntryImpl::GetAvailableRangeInternal(
1054 int64 sparse_offset,
1055 int len,
1056 int64* out_start,
1057 const CompletionCallback& callback) {
1058 DCHECK(io_thread_checker_.CalledOnValidThread());
1059 ScopedOperationRunner operation_runner(this);
1061 DCHECK_EQ(STATE_READY, state_);
1062 state_ = STATE_IO_PENDING;
1064 scoped_ptr<int> result(new int());
1065 Closure task = base::Bind(&SimpleSynchronousEntry::GetAvailableRange,
1066 base::Unretained(synchronous_entry_),
1067 SimpleSynchronousEntry::EntryOperationData(
1068 sparse_offset, len),
1069 out_start,
1070 result.get());
1071 Closure reply = base::Bind(
1072 &SimpleEntryImpl::GetAvailableRangeOperationComplete,
1073 this,
1074 callback,
1075 base::Passed(&result));
1076 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1079 void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback& callback) {
1080 PostTaskAndReplyWithResult(
1081 worker_pool_.get(),
1082 FROM_HERE,
1083 base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, entry_hash_),
1084 base::Bind(
1085 &SimpleEntryImpl::DoomOperationComplete, this, callback, state_));
1086 state_ = STATE_IO_PENDING;
1089 void SimpleEntryImpl::CreationOperationComplete(
1090 const CompletionCallback& completion_callback,
1091 const base::TimeTicks& start_time,
1092 scoped_ptr<SimpleEntryCreationResults> in_results,
1093 Entry** out_entry,
1094 net::NetLog::EventType end_event_type) {
1095 DCHECK(io_thread_checker_.CalledOnValidThread());
1096 DCHECK_EQ(state_, STATE_IO_PENDING);
1097 DCHECK(in_results);
1098 ScopedOperationRunner operation_runner(this);
1099 SIMPLE_CACHE_UMA(BOOLEAN,
1100 "EntryCreationResult", cache_type_,
1101 in_results->result == net::OK);
1102 if (in_results->result != net::OK) {
1103 if (in_results->result != net::ERR_FILE_EXISTS)
1104 MarkAsDoomed();
1106 net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED);
1107 PostClientCallback(completion_callback, net::ERR_FAILED);
1108 MakeUninitialized();
1109 return;
1111 // If out_entry is NULL, it means we already called ReturnEntryToCaller from
1112 // the optimistic Create case.
1113 if (out_entry)
1114 ReturnEntryToCaller(out_entry);
1116 state_ = STATE_READY;
1117 synchronous_entry_ = in_results->sync_entry;
1118 if (in_results->stream_0_data.get()) {
1119 stream_0_data_ = in_results->stream_0_data;
1120 // The crc was read in SimpleSynchronousEntry.
1121 crc_check_state_[0] = CRC_CHECK_DONE;
1122 crc32s_[0] = in_results->stream_0_crc32;
1123 crc32s_end_offset_[0] = in_results->entry_stat.data_size(0);
1125 if (key_.empty()) {
1126 SetKey(synchronous_entry_->key());
1127 } else {
1128 // This should only be triggered when creating an entry. The key check in
1129 // the open case is handled in SimpleBackendImpl.
1130 DCHECK_EQ(key_, synchronous_entry_->key());
1132 UpdateDataFromEntryStat(in_results->entry_stat);
1133 SIMPLE_CACHE_UMA(TIMES,
1134 "EntryCreationTime", cache_type_,
1135 (base::TimeTicks::Now() - start_time));
1136 AdjustOpenEntryCountBy(cache_type_, 1);
1138 net_log_.AddEvent(end_event_type);
1139 PostClientCallback(completion_callback, net::OK);
1142 void SimpleEntryImpl::EntryOperationComplete(
1143 const CompletionCallback& completion_callback,
1144 const SimpleEntryStat& entry_stat,
1145 scoped_ptr<int> result) {
1146 DCHECK(io_thread_checker_.CalledOnValidThread());
1147 DCHECK(synchronous_entry_);
1148 DCHECK_EQ(STATE_IO_PENDING, state_);
1149 DCHECK(result);
1150 if (*result < 0) {
1151 state_ = STATE_FAILURE;
1152 MarkAsDoomed();
1153 } else {
1154 state_ = STATE_READY;
1155 UpdateDataFromEntryStat(entry_stat);
1158 if (!completion_callback.is_null()) {
1159 base::ThreadTaskRunnerHandle::Get()->PostTask(
1160 FROM_HERE, base::Bind(completion_callback, *result));
1162 RunNextOperationIfNeeded();
1165 void SimpleEntryImpl::ReadOperationComplete(
1166 int stream_index,
1167 int offset,
1168 const CompletionCallback& completion_callback,
1169 scoped_ptr<uint32> read_crc32,
1170 scoped_ptr<SimpleEntryStat> entry_stat,
1171 scoped_ptr<int> result) {
1172 DCHECK(io_thread_checker_.CalledOnValidThread());
1173 DCHECK(synchronous_entry_);
1174 DCHECK_EQ(STATE_IO_PENDING, state_);
1175 DCHECK(read_crc32);
1176 DCHECK(result);
1178 if (*result > 0 &&
1179 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) {
1180 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END;
1183 if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
1184 uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0)
1185 : crc32s_[stream_index];
1186 crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
1187 crc32s_end_offset_[stream_index] += *result;
1188 if (!have_written_[stream_index] &&
1189 GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) {
1190 // We have just read a file from start to finish, and so we have
1191 // computed a crc of the entire file. We can check it now. If a cache
1192 // entry has a single reader, the normal pattern is to read from start
1193 // to finish.
1195 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN);
1197 scoped_ptr<int> new_result(new int());
1198 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
1199 base::Unretained(synchronous_entry_),
1200 stream_index,
1201 *entry_stat,
1202 crc32s_[stream_index],
1203 new_result.get());
1204 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
1205 this, *result, stream_index,
1206 completion_callback,
1207 base::Passed(&new_result));
1208 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1209 crc_check_state_[stream_index] = CRC_CHECK_DONE;
1210 return;
1214 if (*result < 0) {
1215 crc32s_end_offset_[stream_index] = 0;
1218 if (*result < 0) {
1219 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1220 } else {
1221 RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1222 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
1223 offset + *result == GetDataSize(stream_index)) {
1224 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
1227 if (net_log_.IsCapturing()) {
1228 net_log_.AddEvent(
1229 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1230 CreateNetLogReadWriteCompleteCallback(*result));
1233 EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1236 void SimpleEntryImpl::WriteOperationComplete(
1237 int stream_index,
1238 const CompletionCallback& completion_callback,
1239 scoped_ptr<SimpleEntryStat> entry_stat,
1240 scoped_ptr<int> result) {
1241 if (*result >= 0)
1242 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1243 else
1244 RecordWriteResult(cache_type_, WRITE_RESULT_SYNC_WRITE_FAILURE);
1245 if (net_log_.IsCapturing()) {
1246 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
1247 CreateNetLogReadWriteCompleteCallback(*result));
1250 if (*result < 0) {
1251 crc32s_end_offset_[stream_index] = 0;
1254 EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1257 void SimpleEntryImpl::ReadSparseOperationComplete(
1258 const CompletionCallback& completion_callback,
1259 scoped_ptr<base::Time> last_used,
1260 scoped_ptr<int> result) {
1261 DCHECK(io_thread_checker_.CalledOnValidThread());
1262 DCHECK(synchronous_entry_);
1263 DCHECK(result);
1265 SimpleEntryStat entry_stat(*last_used, last_modified_, data_size_,
1266 sparse_data_size_);
1267 EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1270 void SimpleEntryImpl::WriteSparseOperationComplete(
1271 const CompletionCallback& completion_callback,
1272 scoped_ptr<SimpleEntryStat> entry_stat,
1273 scoped_ptr<int> result) {
1274 DCHECK(io_thread_checker_.CalledOnValidThread());
1275 DCHECK(synchronous_entry_);
1276 DCHECK(result);
1278 EntryOperationComplete(completion_callback, *entry_stat, result.Pass());
1281 void SimpleEntryImpl::GetAvailableRangeOperationComplete(
1282 const CompletionCallback& completion_callback,
1283 scoped_ptr<int> result) {
1284 DCHECK(io_thread_checker_.CalledOnValidThread());
1285 DCHECK(synchronous_entry_);
1286 DCHECK(result);
1288 SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
1289 sparse_data_size_);
1290 EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1293 void SimpleEntryImpl::DoomOperationComplete(
1294 const CompletionCallback& callback,
1295 State state_to_restore,
1296 int result) {
1297 state_ = state_to_restore;
1298 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_END);
1299 if (!callback.is_null())
1300 callback.Run(result);
1301 RunNextOperationIfNeeded();
1302 if (backend_)
1303 backend_->OnDoomComplete(entry_hash_);
1306 void SimpleEntryImpl::ChecksumOperationComplete(
1307 int orig_result,
1308 int stream_index,
1309 const CompletionCallback& completion_callback,
1310 scoped_ptr<int> result) {
1311 DCHECK(io_thread_checker_.CalledOnValidThread());
1312 DCHECK(synchronous_entry_);
1313 DCHECK_EQ(STATE_IO_PENDING, state_);
1314 DCHECK(result);
1316 if (net_log_.IsCapturing()) {
1317 net_log_.AddEventWithNetErrorCode(
1318 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END,
1319 *result);
1322 if (*result == net::OK) {
1323 *result = orig_result;
1324 if (orig_result >= 0)
1325 RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1326 else
1327 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1328 } else {
1329 RecordReadResult(cache_type_, READ_RESULT_SYNC_CHECKSUM_FAILURE);
1331 if (net_log_.IsCapturing()) {
1332 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1333 CreateNetLogReadWriteCompleteCallback(*result));
1336 SimpleEntryStat entry_stat(last_used_, last_modified_, data_size_,
1337 sparse_data_size_);
1338 EntryOperationComplete(completion_callback, entry_stat, result.Pass());
1341 void SimpleEntryImpl::CloseOperationComplete() {
1342 DCHECK(!synchronous_entry_);
1343 DCHECK_EQ(0, open_count_);
1344 DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
1345 STATE_UNINITIALIZED == state_);
1346 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END);
1347 AdjustOpenEntryCountBy(cache_type_, -1);
1348 MakeUninitialized();
1349 RunNextOperationIfNeeded();
1352 void SimpleEntryImpl::UpdateDataFromEntryStat(
1353 const SimpleEntryStat& entry_stat) {
1354 DCHECK(io_thread_checker_.CalledOnValidThread());
1355 DCHECK(synchronous_entry_);
1356 DCHECK_EQ(STATE_READY, state_);
1358 last_used_ = entry_stat.last_used();
1359 last_modified_ = entry_stat.last_modified();
1360 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
1361 data_size_[i] = entry_stat.data_size(i);
1363 sparse_data_size_ = entry_stat.sparse_data_size();
1364 if (!doomed_ && backend_.get())
1365 backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage());
1368 int64 SimpleEntryImpl::GetDiskUsage() const {
1369 int64 file_size = 0;
1370 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
1371 file_size +=
1372 simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]);
1374 file_size += sparse_data_size_;
1375 return file_size;
1378 void SimpleEntryImpl::RecordReadIsParallelizable(
1379 const SimpleEntryOperation& operation) const {
1380 if (!executing_operation_)
1381 return;
1382 // Used in histograms, please only add entries at the end.
1383 enum ReadDependencyType {
1384 // READ_STANDALONE = 0, Deprecated.
1385 READ_FOLLOWS_READ = 1,
1386 READ_FOLLOWS_CONFLICTING_WRITE = 2,
1387 READ_FOLLOWS_NON_CONFLICTING_WRITE = 3,
1388 READ_FOLLOWS_OTHER = 4,
1389 READ_ALONE_IN_QUEUE = 5,
1390 READ_DEPENDENCY_TYPE_MAX = 6,
1393 ReadDependencyType type = READ_FOLLOWS_OTHER;
1394 if (operation.alone_in_queue()) {
1395 type = READ_ALONE_IN_QUEUE;
1396 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1397 type = READ_FOLLOWS_READ;
1398 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1399 if (executing_operation_->ConflictsWith(operation))
1400 type = READ_FOLLOWS_CONFLICTING_WRITE;
1401 else
1402 type = READ_FOLLOWS_NON_CONFLICTING_WRITE;
1404 SIMPLE_CACHE_UMA(ENUMERATION,
1405 "ReadIsParallelizable", cache_type_,
1406 type, READ_DEPENDENCY_TYPE_MAX);
1409 void SimpleEntryImpl::RecordWriteDependencyType(
1410 const SimpleEntryOperation& operation) const {
1411 if (!executing_operation_)
1412 return;
1413 // Used in histograms, please only add entries at the end.
1414 enum WriteDependencyType {
1415 WRITE_OPTIMISTIC = 0,
1416 WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC = 1,
1417 WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC = 2,
1418 WRITE_FOLLOWS_CONFLICTING_WRITE = 3,
1419 WRITE_FOLLOWS_NON_CONFLICTING_WRITE = 4,
1420 WRITE_FOLLOWS_CONFLICTING_READ = 5,
1421 WRITE_FOLLOWS_NON_CONFLICTING_READ = 6,
1422 WRITE_FOLLOWS_OTHER = 7,
1423 WRITE_DEPENDENCY_TYPE_MAX = 8,
1426 WriteDependencyType type = WRITE_FOLLOWS_OTHER;
1427 if (operation.optimistic()) {
1428 type = WRITE_OPTIMISTIC;
1429 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ ||
1430 executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1431 bool conflicting = executing_operation_->ConflictsWith(operation);
1433 if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1434 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_READ
1435 : WRITE_FOLLOWS_NON_CONFLICTING_READ;
1436 } else if (executing_operation_->optimistic()) {
1437 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
1438 : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC;
1439 } else {
1440 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE
1441 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE;
1444 SIMPLE_CACHE_UMA(ENUMERATION,
1445 "WriteDependencyType", cache_type_,
1446 type, WRITE_DEPENDENCY_TYPE_MAX);
1449 int SimpleEntryImpl::ReadStream0Data(net::IOBuffer* buf,
1450 int offset,
1451 int buf_len) {
1452 if (buf_len < 0) {
1453 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1454 return 0;
1456 memcpy(buf->data(), stream_0_data_->data() + offset, buf_len);
1457 UpdateDataFromEntryStat(
1458 SimpleEntryStat(base::Time::Now(), last_modified_, data_size_,
1459 sparse_data_size_));
1460 RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1461 return buf_len;
1464 int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
1465 int offset,
1466 int buf_len,
1467 bool truncate) {
1468 // Currently, stream 0 is only used for HTTP headers, and always writes them
1469 // with a single, truncating write. Detect these writes and record the size
1470 // changes of the headers. Also, support writes to stream 0 that have
1471 // different access patterns, as required by the API contract.
1472 // All other clients of the Simple Cache are encouraged to use stream 1.
1473 have_written_[0] = true;
1474 int data_size = GetDataSize(0);
1475 if (offset == 0 && truncate) {
1476 RecordHeaderSizeChange(cache_type_, data_size, buf_len);
1477 stream_0_data_->SetCapacity(buf_len);
1478 memcpy(stream_0_data_->data(), buf->data(), buf_len);
1479 data_size_[0] = buf_len;
1480 } else {
1481 RecordUnexpectedStream0Write(cache_type_);
1482 const int buffer_size =
1483 truncate ? offset + buf_len : std::max(offset + buf_len, data_size);
1484 stream_0_data_->SetCapacity(buffer_size);
1485 // If |stream_0_data_| was extended, the extension until offset needs to be
1486 // zero-filled.
1487 const int fill_size = offset <= data_size ? 0 : offset - data_size;
1488 if (fill_size > 0)
1489 memset(stream_0_data_->data() + data_size, 0, fill_size);
1490 if (buf)
1491 memcpy(stream_0_data_->data() + offset, buf->data(), buf_len);
1492 data_size_[0] = buffer_size;
1494 base::Time modification_time = base::Time::Now();
1495 AdvanceCrc(buf, offset, buf_len, 0);
1496 UpdateDataFromEntryStat(
1497 SimpleEntryStat(modification_time, modification_time, data_size_,
1498 sparse_data_size_));
1499 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1500 return buf_len;
1503 void SimpleEntryImpl::AdvanceCrc(net::IOBuffer* buffer,
1504 int offset,
1505 int length,
1506 int stream_index) {
1507 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
1508 // if |offset == 0| or we have already computed the CRC for [0 .. offset).
1509 // We rely on most write operations being sequential, start to end to compute
1510 // the crc of the data. When we write to an entry and close without having
1511 // done a sequential write, we don't check the CRC on read.
1512 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
1513 uint32 initial_crc =
1514 (offset != 0) ? crc32s_[stream_index] : crc32(0, Z_NULL, 0);
1515 if (length > 0) {
1516 crc32s_[stream_index] = crc32(
1517 initial_crc, reinterpret_cast<const Bytef*>(buffer->data()), length);
1519 crc32s_end_offset_[stream_index] = offset + length;
1520 } else if (offset < crc32s_end_offset_[stream_index]) {
1521 // If a range for which the crc32 was already computed is rewritten, the
1522 // computation of the crc32 need to start from 0 again.
1523 crc32s_end_offset_[stream_index] = 0;
1527 } // namespace disk_cache