1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_entry_impl.h"
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/callback.h"
14 #include "base/location.h"
15 #include "base/logging.h"
16 #include "base/message_loop/message_loop_proxy.h"
17 #include "base/task_runner.h"
18 #include "base/task_runner_util.h"
19 #include "base/time/time.h"
20 #include "net/base/io_buffer.h"
21 #include "net/base/net_errors.h"
22 #include "net/disk_cache/net_log_parameters.h"
23 #include "net/disk_cache/simple/simple_backend_impl.h"
24 #include "net/disk_cache/simple/simple_histogram_macros.h"
25 #include "net/disk_cache/simple/simple_index.h"
26 #include "net/disk_cache/simple/simple_net_log_parameters.h"
27 #include "net/disk_cache/simple/simple_synchronous_entry.h"
28 #include "net/disk_cache/simple/simple_util.h"
29 #include "third_party/zlib/zlib.h"
31 namespace disk_cache
{
34 // An entry can store sparse data taking up to 1 / kMaxSparseDataSizeDivisor of
36 const int64 kMaxSparseDataSizeDivisor
= 10;
38 // Used in histograms, please only add entries at the end.
40 READ_RESULT_SUCCESS
= 0,
41 READ_RESULT_INVALID_ARGUMENT
= 1,
42 READ_RESULT_NONBLOCK_EMPTY_RETURN
= 2,
43 READ_RESULT_BAD_STATE
= 3,
44 READ_RESULT_FAST_EMPTY_RETURN
= 4,
45 READ_RESULT_SYNC_READ_FAILURE
= 5,
46 READ_RESULT_SYNC_CHECKSUM_FAILURE
= 6,
50 // Used in histograms, please only add entries at the end.
52 WRITE_RESULT_SUCCESS
= 0,
53 WRITE_RESULT_INVALID_ARGUMENT
= 1,
54 WRITE_RESULT_OVER_MAX_SIZE
= 2,
55 WRITE_RESULT_BAD_STATE
= 3,
56 WRITE_RESULT_SYNC_WRITE_FAILURE
= 4,
57 WRITE_RESULT_FAST_EMPTY_RETURN
= 5,
61 // Used in histograms, please only add entries at the end.
62 enum HeaderSizeChange
{
63 HEADER_SIZE_CHANGE_INITIAL
,
64 HEADER_SIZE_CHANGE_SAME
,
65 HEADER_SIZE_CHANGE_INCREASE
,
66 HEADER_SIZE_CHANGE_DECREASE
,
67 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE
,
68 HEADER_SIZE_CHANGE_MAX
71 void RecordReadResult(net::CacheType cache_type
, ReadResult result
) {
72 SIMPLE_CACHE_UMA(ENUMERATION
,
73 "ReadResult", cache_type
, result
, READ_RESULT_MAX
);
76 void RecordWriteResult(net::CacheType cache_type
, WriteResult result
) {
77 SIMPLE_CACHE_UMA(ENUMERATION
,
78 "WriteResult2", cache_type
, result
, WRITE_RESULT_MAX
);
81 // TODO(ttuttle): Consider removing this once we have a good handle on header
83 void RecordHeaderSizeChange(net::CacheType cache_type
,
84 int old_size
, int new_size
) {
85 HeaderSizeChange size_change
;
87 SIMPLE_CACHE_UMA(COUNTS_10000
, "HeaderSize", cache_type
, new_size
);
90 size_change
= HEADER_SIZE_CHANGE_INITIAL
;
91 } else if (new_size
== old_size
) {
92 size_change
= HEADER_SIZE_CHANGE_SAME
;
93 } else if (new_size
> old_size
) {
94 int delta
= new_size
- old_size
;
95 SIMPLE_CACHE_UMA(COUNTS_10000
,
96 "HeaderSizeIncreaseAbsolute", cache_type
, delta
);
97 SIMPLE_CACHE_UMA(PERCENTAGE
,
98 "HeaderSizeIncreasePercentage", cache_type
,
99 delta
* 100 / old_size
);
100 size_change
= HEADER_SIZE_CHANGE_INCREASE
;
101 } else { // new_size < old_size
102 int delta
= old_size
- new_size
;
103 SIMPLE_CACHE_UMA(COUNTS_10000
,
104 "HeaderSizeDecreaseAbsolute", cache_type
, delta
);
105 SIMPLE_CACHE_UMA(PERCENTAGE
,
106 "HeaderSizeDecreasePercentage", cache_type
,
107 delta
* 100 / old_size
);
108 size_change
= HEADER_SIZE_CHANGE_DECREASE
;
111 SIMPLE_CACHE_UMA(ENUMERATION
,
112 "HeaderSizeChange", cache_type
,
113 size_change
, HEADER_SIZE_CHANGE_MAX
);
116 void RecordUnexpectedStream0Write(net::CacheType cache_type
) {
117 SIMPLE_CACHE_UMA(ENUMERATION
,
118 "HeaderSizeChange", cache_type
,
119 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE
, HEADER_SIZE_CHANGE_MAX
);
122 int g_open_entry_count
= 0;
124 void AdjustOpenEntryCountBy(net::CacheType cache_type
, int offset
) {
125 g_open_entry_count
+= offset
;
126 SIMPLE_CACHE_UMA(COUNTS_10000
,
127 "GlobalOpenEntryCount", cache_type
, g_open_entry_count
);
130 void InvokeCallbackIfBackendIsAlive(
131 const base::WeakPtr
<SimpleBackendImpl
>& backend
,
132 const net::CompletionCallback
& completion_callback
,
134 DCHECK(!completion_callback
.is_null());
137 completion_callback
.Run(result
);
143 using base::FilePath
;
144 using base::MessageLoopProxy
;
146 using base::TaskRunner
;
148 // A helper class to insure that RunNextOperationIfNeeded() is called when
149 // exiting the current stack frame.
150 class SimpleEntryImpl::ScopedOperationRunner
{
152 explicit ScopedOperationRunner(SimpleEntryImpl
* entry
) : entry_(entry
) {
155 ~ScopedOperationRunner() {
156 entry_
->RunNextOperationIfNeeded();
160 SimpleEntryImpl
* const entry_
;
163 SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type
,
164 const FilePath
& path
,
165 const uint64 entry_hash
,
166 OperationsMode operations_mode
,
167 SimpleBackendImpl
* backend
,
168 net::NetLog
* net_log
)
169 : backend_(backend
->AsWeakPtr()),
170 cache_type_(cache_type
),
171 worker_pool_(backend
->worker_pool()),
173 entry_hash_(entry_hash
),
174 use_optimistic_operations_(operations_mode
== OPTIMISTIC_OPERATIONS
),
175 last_used_(Time::Now()),
176 last_modified_(last_used_
),
177 sparse_data_size_(0),
180 state_(STATE_UNINITIALIZED
),
181 synchronous_entry_(NULL
),
182 net_log_(net::BoundNetLog::Make(
183 net_log
, net::NetLog::SOURCE_DISK_CACHE_ENTRY
)),
184 stream_0_data_(new net::GrowableIOBuffer()) {
185 COMPILE_ASSERT(arraysize(data_size_
) == arraysize(crc32s_end_offset_
),
186 arrays_should_be_same_size
);
187 COMPILE_ASSERT(arraysize(data_size_
) == arraysize(crc32s_
),
188 arrays_should_be_same_size
);
189 COMPILE_ASSERT(arraysize(data_size_
) == arraysize(have_written_
),
190 arrays_should_be_same_size
);
191 COMPILE_ASSERT(arraysize(data_size_
) == arraysize(crc_check_state_
),
192 arrays_should_be_same_size
);
194 net_log_
.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY
,
195 CreateNetLogSimpleEntryConstructionCallback(this));
198 int SimpleEntryImpl::OpenEntry(Entry
** out_entry
,
199 const CompletionCallback
& callback
) {
200 DCHECK(backend_
.get());
202 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL
);
204 bool have_index
= backend_
->index()->initialized();
205 // This enumeration is used in histograms, add entries only at end.
206 enum OpenEntryIndexEnum
{
212 OpenEntryIndexEnum open_entry_index_enum
= INDEX_NOEXIST
;
214 if (backend_
->index()->Has(entry_hash_
))
215 open_entry_index_enum
= INDEX_HIT
;
217 open_entry_index_enum
= INDEX_MISS
;
219 SIMPLE_CACHE_UMA(ENUMERATION
,
220 "OpenEntryIndexState", cache_type_
,
221 open_entry_index_enum
, INDEX_MAX
);
223 // If entry is not known to the index, initiate fast failover to the network.
224 if (open_entry_index_enum
== INDEX_MISS
) {
225 net_log_
.AddEventWithNetErrorCode(
226 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END
,
228 return net::ERR_FAILED
;
231 pending_operations_
.push(SimpleEntryOperation::OpenOperation(
232 this, have_index
, callback
, out_entry
));
233 RunNextOperationIfNeeded();
234 return net::ERR_IO_PENDING
;
237 int SimpleEntryImpl::CreateEntry(Entry
** out_entry
,
238 const CompletionCallback
& callback
) {
239 DCHECK(backend_
.get());
240 DCHECK_EQ(entry_hash_
, simple_util::GetEntryHashKey(key_
));
242 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL
);
244 bool have_index
= backend_
->index()->initialized();
245 int ret_value
= net::ERR_FAILED
;
246 if (use_optimistic_operations_
&&
247 state_
== STATE_UNINITIALIZED
&& pending_operations_
.size() == 0) {
248 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC
);
250 ReturnEntryToCaller(out_entry
);
251 pending_operations_
.push(SimpleEntryOperation::CreateOperation(
252 this, have_index
, CompletionCallback(), static_cast<Entry
**>(NULL
)));
255 pending_operations_
.push(SimpleEntryOperation::CreateOperation(
256 this, have_index
, callback
, out_entry
));
257 ret_value
= net::ERR_IO_PENDING
;
260 // We insert the entry in the index before creating the entry files in the
261 // SimpleSynchronousEntry, because this way the worst scenario is when we
262 // have the entry in the index but we don't have the created files yet, this
263 // way we never leak files. CreationOperationComplete will remove the entry
264 // from the index if the creation fails.
265 backend_
->index()->Insert(entry_hash_
);
267 RunNextOperationIfNeeded();
271 int SimpleEntryImpl::DoomEntry(const CompletionCallback
& callback
) {
274 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_CALL
);
275 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_BEGIN
);
279 backend_
->OnDoomStart(entry_hash_
);
280 pending_operations_
.push(SimpleEntryOperation::DoomOperation(this, callback
));
281 RunNextOperationIfNeeded();
282 return net::ERR_IO_PENDING
;
285 void SimpleEntryImpl::SetKey(const std::string
& key
) {
287 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY
,
288 net::NetLog::StringCallback("key", &key
));
291 void SimpleEntryImpl::Doom() {
292 DoomEntry(CompletionCallback());
295 void SimpleEntryImpl::Close() {
296 DCHECK(io_thread_checker_
.CalledOnValidThread());
297 DCHECK_LT(0, open_count_
);
299 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_CALL
);
301 if (--open_count_
> 0) {
302 DCHECK(!HasOneRef());
303 Release(); // Balanced in ReturnEntryToCaller().
307 pending_operations_
.push(SimpleEntryOperation::CloseOperation(this));
308 DCHECK(!HasOneRef());
309 Release(); // Balanced in ReturnEntryToCaller().
310 RunNextOperationIfNeeded();
313 std::string
SimpleEntryImpl::GetKey() const {
314 DCHECK(io_thread_checker_
.CalledOnValidThread());
318 Time
SimpleEntryImpl::GetLastUsed() const {
319 DCHECK(io_thread_checker_
.CalledOnValidThread());
323 Time
SimpleEntryImpl::GetLastModified() const {
324 DCHECK(io_thread_checker_
.CalledOnValidThread());
325 return last_modified_
;
328 int32
SimpleEntryImpl::GetDataSize(int stream_index
) const {
329 DCHECK(io_thread_checker_
.CalledOnValidThread());
330 DCHECK_LE(0, data_size_
[stream_index
]);
331 return data_size_
[stream_index
];
334 int SimpleEntryImpl::ReadData(int stream_index
,
338 const CompletionCallback
& callback
) {
339 DCHECK(io_thread_checker_
.CalledOnValidThread());
341 if (net_log_
.IsLogging()) {
342 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL
,
343 CreateNetLogReadWriteDataCallback(stream_index
, offset
, buf_len
,
347 if (stream_index
< 0 || stream_index
>= kSimpleEntryStreamCount
||
349 if (net_log_
.IsLogging()) {
350 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
351 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT
));
354 RecordReadResult(cache_type_
, READ_RESULT_INVALID_ARGUMENT
);
355 return net::ERR_INVALID_ARGUMENT
;
357 if (pending_operations_
.empty() && (offset
>= GetDataSize(stream_index
) ||
358 offset
< 0 || !buf_len
)) {
359 if (net_log_
.IsLogging()) {
360 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
361 CreateNetLogReadWriteCompleteCallback(0));
364 RecordReadResult(cache_type_
, READ_RESULT_NONBLOCK_EMPTY_RETURN
);
368 // TODO(clamy): return immediatly when reading from stream 0.
370 // TODO(felipeg): Optimization: Add support for truly parallel read
372 bool alone_in_queue
=
373 pending_operations_
.size() == 0 && state_
== STATE_READY
;
374 pending_operations_
.push(SimpleEntryOperation::ReadOperation(
375 this, stream_index
, offset
, buf_len
, buf
, callback
, alone_in_queue
));
376 RunNextOperationIfNeeded();
377 return net::ERR_IO_PENDING
;
380 int SimpleEntryImpl::WriteData(int stream_index
,
384 const CompletionCallback
& callback
,
386 DCHECK(io_thread_checker_
.CalledOnValidThread());
388 if (net_log_
.IsLogging()) {
390 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL
,
391 CreateNetLogReadWriteDataCallback(stream_index
, offset
, buf_len
,
395 if (stream_index
< 0 || stream_index
>= kSimpleEntryStreamCount
||
396 offset
< 0 || buf_len
< 0) {
397 if (net_log_
.IsLogging()) {
399 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END
,
400 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT
));
402 RecordWriteResult(cache_type_
, WRITE_RESULT_INVALID_ARGUMENT
);
403 return net::ERR_INVALID_ARGUMENT
;
405 if (backend_
.get() && offset
+ buf_len
> backend_
->GetMaxFileSize()) {
406 if (net_log_
.IsLogging()) {
408 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END
,
409 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED
));
411 RecordWriteResult(cache_type_
, WRITE_RESULT_OVER_MAX_SIZE
);
412 return net::ERR_FAILED
;
414 ScopedOperationRunner
operation_runner(this);
416 // Stream 0 data is kept in memory, so can be written immediatly if there are
417 // no IO operations pending.
418 if (stream_index
== 0 && state_
== STATE_READY
&&
419 pending_operations_
.size() == 0)
420 return SetStream0Data(buf
, offset
, buf_len
, truncate
);
422 // We can only do optimistic Write if there is no pending operations, so
423 // that we are sure that the next call to RunNextOperationIfNeeded will
424 // actually run the write operation that sets the stream size. It also
425 // prevents from previous possibly-conflicting writes that could be stacked
426 // in the |pending_operations_|. We could optimize this for when we have
427 // only read operations enqueued.
428 const bool optimistic
=
429 (use_optimistic_operations_
&& state_
== STATE_READY
&&
430 pending_operations_
.size() == 0);
431 CompletionCallback op_callback
;
432 scoped_refptr
<net::IOBuffer
> op_buf
;
433 int ret_value
= net::ERR_FAILED
;
436 op_callback
= callback
;
437 ret_value
= net::ERR_IO_PENDING
;
439 // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
440 // here to avoid paying the price of the RefCountedThreadSafe atomic
443 op_buf
= new IOBuffer(buf_len
);
444 memcpy(op_buf
->data(), buf
->data(), buf_len
);
446 op_callback
= CompletionCallback();
448 if (net_log_
.IsLogging()) {
450 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC
,
451 CreateNetLogReadWriteCompleteCallback(buf_len
));
455 pending_operations_
.push(SimpleEntryOperation::WriteOperation(this,
466 int SimpleEntryImpl::ReadSparseData(int64 offset
,
469 const CompletionCallback
& callback
) {
470 DCHECK(io_thread_checker_
.CalledOnValidThread());
472 ScopedOperationRunner
operation_runner(this);
473 pending_operations_
.push(SimpleEntryOperation::ReadSparseOperation(
474 this, offset
, buf_len
, buf
, callback
));
475 return net::ERR_IO_PENDING
;
478 int SimpleEntryImpl::WriteSparseData(int64 offset
,
481 const CompletionCallback
& callback
) {
482 DCHECK(io_thread_checker_
.CalledOnValidThread());
484 ScopedOperationRunner
operation_runner(this);
485 pending_operations_
.push(SimpleEntryOperation::WriteSparseOperation(
486 this, offset
, buf_len
, buf
, callback
));
487 return net::ERR_IO_PENDING
;
490 int SimpleEntryImpl::GetAvailableRange(int64 offset
,
493 const CompletionCallback
& callback
) {
494 DCHECK(io_thread_checker_
.CalledOnValidThread());
496 ScopedOperationRunner
operation_runner(this);
497 pending_operations_
.push(SimpleEntryOperation::GetAvailableRangeOperation(
498 this, offset
, len
, start
, callback
));
499 return net::ERR_IO_PENDING
;
502 bool SimpleEntryImpl::CouldBeSparse() const {
503 DCHECK(io_thread_checker_
.CalledOnValidThread());
504 // TODO(ttuttle): Actually check.
508 void SimpleEntryImpl::CancelSparseIO() {
509 DCHECK(io_thread_checker_
.CalledOnValidThread());
510 // The Simple Cache does not return distinct objects for the same non-doomed
511 // entry, so there's no need to coordinate which object is performing sparse
512 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
515 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback
& callback
) {
516 DCHECK(io_thread_checker_
.CalledOnValidThread());
517 // The simple Cache does not return distinct objects for the same non-doomed
518 // entry, so there's no need to coordinate which object is performing sparse
519 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
523 SimpleEntryImpl::~SimpleEntryImpl() {
524 DCHECK(io_thread_checker_
.CalledOnValidThread());
525 DCHECK_EQ(0U, pending_operations_
.size());
526 DCHECK(state_
== STATE_UNINITIALIZED
|| state_
== STATE_FAILURE
);
527 DCHECK(!synchronous_entry_
);
528 RemoveSelfFromBackend();
529 net_log_
.EndEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY
);
532 void SimpleEntryImpl::PostClientCallback(const CompletionCallback
& callback
,
534 if (callback
.is_null())
536 // Note that the callback is posted rather than directly invoked to avoid
537 // reentrancy issues.
538 MessageLoopProxy::current()->PostTask(
540 base::Bind(&InvokeCallbackIfBackendIsAlive
, backend_
, callback
, result
));
543 void SimpleEntryImpl::MakeUninitialized() {
544 state_
= STATE_UNINITIALIZED
;
545 std::memset(crc32s_end_offset_
, 0, sizeof(crc32s_end_offset_
));
546 std::memset(crc32s_
, 0, sizeof(crc32s_
));
547 std::memset(have_written_
, 0, sizeof(have_written_
));
548 std::memset(data_size_
, 0, sizeof(data_size_
));
549 for (size_t i
= 0; i
< arraysize(crc_check_state_
); ++i
) {
550 crc_check_state_
[i
] = CRC_CHECK_NEVER_READ_AT_ALL
;
554 void SimpleEntryImpl::ReturnEntryToCaller(Entry
** out_entry
) {
557 AddRef(); // Balanced in Close()
558 if (!backend_
.get()) {
559 // This method can be called when an asynchronous operation completed.
560 // If the backend no longer exists, the callback won't be invoked, and so we
561 // must close ourselves to avoid leaking. As well, there's no guarantee the
562 // client-provided pointer (|out_entry|) hasn't been freed, and no point
563 // dereferencing it, either.
570 void SimpleEntryImpl::RemoveSelfFromBackend() {
573 backend_
->OnDeactivated(this);
576 void SimpleEntryImpl::MarkAsDoomed() {
580 backend_
->index()->Remove(entry_hash_
);
581 RemoveSelfFromBackend();
584 void SimpleEntryImpl::RunNextOperationIfNeeded() {
585 DCHECK(io_thread_checker_
.CalledOnValidThread());
586 SIMPLE_CACHE_UMA(CUSTOM_COUNTS
,
587 "EntryOperationsPending", cache_type_
,
588 pending_operations_
.size(), 0, 100, 20);
589 if (!pending_operations_
.empty() && state_
!= STATE_IO_PENDING
) {
590 scoped_ptr
<SimpleEntryOperation
> operation(
591 new SimpleEntryOperation(pending_operations_
.front()));
592 pending_operations_
.pop();
593 switch (operation
->type()) {
594 case SimpleEntryOperation::TYPE_OPEN
:
595 OpenEntryInternal(operation
->have_index(),
596 operation
->callback(),
597 operation
->out_entry());
599 case SimpleEntryOperation::TYPE_CREATE
:
600 CreateEntryInternal(operation
->have_index(),
601 operation
->callback(),
602 operation
->out_entry());
604 case SimpleEntryOperation::TYPE_CLOSE
:
607 case SimpleEntryOperation::TYPE_READ
:
608 RecordReadIsParallelizable(*operation
);
609 ReadDataInternal(operation
->index(),
613 operation
->callback());
615 case SimpleEntryOperation::TYPE_WRITE
:
616 RecordWriteDependencyType(*operation
);
617 WriteDataInternal(operation
->index(),
621 operation
->callback(),
622 operation
->truncate());
624 case SimpleEntryOperation::TYPE_READ_SPARSE
:
625 ReadSparseDataInternal(operation
->sparse_offset(),
628 operation
->callback());
630 case SimpleEntryOperation::TYPE_WRITE_SPARSE
:
631 WriteSparseDataInternal(operation
->sparse_offset(),
634 operation
->callback());
636 case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE
:
637 GetAvailableRangeInternal(operation
->sparse_offset(),
639 operation
->out_start(),
640 operation
->callback());
642 case SimpleEntryOperation::TYPE_DOOM
:
643 DoomEntryInternal(operation
->callback());
648 // The operation is kept for histograms. Makes sure it does not leak
650 executing_operation_
.swap(operation
);
651 executing_operation_
->ReleaseReferences();
652 // |this| may have been deleted.
656 void SimpleEntryImpl::OpenEntryInternal(bool have_index
,
657 const CompletionCallback
& callback
,
659 ScopedOperationRunner
operation_runner(this);
661 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN
);
663 if (state_
== STATE_READY
) {
664 ReturnEntryToCaller(out_entry
);
665 PostClientCallback(callback
, net::OK
);
667 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END
,
668 CreateNetLogSimpleEntryCreationCallback(this, net::OK
));
671 if (state_
== STATE_FAILURE
) {
672 PostClientCallback(callback
, net::ERR_FAILED
);
674 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END
,
675 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED
));
679 DCHECK_EQ(STATE_UNINITIALIZED
, state_
);
680 DCHECK(!synchronous_entry_
);
681 state_
= STATE_IO_PENDING
;
682 const base::TimeTicks start_time
= base::TimeTicks::Now();
683 scoped_ptr
<SimpleEntryCreationResults
> results(
684 new SimpleEntryCreationResults(
685 SimpleEntryStat(last_used_
, last_modified_
, data_size_
,
686 sparse_data_size_
)));
687 Closure task
= base::Bind(&SimpleSynchronousEntry::OpenEntry
,
693 Closure reply
= base::Bind(&SimpleEntryImpl::CreationOperationComplete
,
697 base::Passed(&results
),
699 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END
);
700 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
703 void SimpleEntryImpl::CreateEntryInternal(bool have_index
,
704 const CompletionCallback
& callback
,
706 ScopedOperationRunner
operation_runner(this);
708 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_BEGIN
);
710 if (state_
!= STATE_UNINITIALIZED
) {
711 // There is already an active normal entry.
713 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END
,
714 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED
));
715 PostClientCallback(callback
, net::ERR_FAILED
);
718 DCHECK_EQ(STATE_UNINITIALIZED
, state_
);
719 DCHECK(!synchronous_entry_
);
721 state_
= STATE_IO_PENDING
;
723 // Since we don't know the correct values for |last_used_| and
724 // |last_modified_| yet, we make this approximation.
725 last_used_
= last_modified_
= base::Time::Now();
727 // If creation succeeds, we should mark all streams to be saved on close.
728 for (int i
= 0; i
< kSimpleEntryStreamCount
; ++i
)
729 have_written_
[i
] = true;
731 const base::TimeTicks start_time
= base::TimeTicks::Now();
732 scoped_ptr
<SimpleEntryCreationResults
> results(
733 new SimpleEntryCreationResults(
734 SimpleEntryStat(last_used_
, last_modified_
, data_size_
,
735 sparse_data_size_
)));
736 Closure task
= base::Bind(&SimpleSynchronousEntry::CreateEntry
,
743 Closure reply
= base::Bind(&SimpleEntryImpl::CreationOperationComplete
,
747 base::Passed(&results
),
749 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END
);
750 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
753 void SimpleEntryImpl::CloseInternal() {
754 DCHECK(io_thread_checker_
.CalledOnValidThread());
755 typedef SimpleSynchronousEntry::CRCRecord CRCRecord
;
756 scoped_ptr
<std::vector
<CRCRecord
> >
757 crc32s_to_write(new std::vector
<CRCRecord
>());
759 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN
);
761 if (state_
== STATE_READY
) {
762 DCHECK(synchronous_entry_
);
763 state_
= STATE_IO_PENDING
;
764 for (int i
= 0; i
< kSimpleEntryStreamCount
; ++i
) {
765 if (have_written_
[i
]) {
766 if (GetDataSize(i
) == crc32s_end_offset_
[i
]) {
767 int32 crc
= GetDataSize(i
) == 0 ? crc32(0, Z_NULL
, 0) : crc32s_
[i
];
768 crc32s_to_write
->push_back(CRCRecord(i
, true, crc
));
770 crc32s_to_write
->push_back(CRCRecord(i
, false, 0));
775 DCHECK(STATE_UNINITIALIZED
== state_
|| STATE_FAILURE
== state_
);
778 if (synchronous_entry_
) {
780 base::Bind(&SimpleSynchronousEntry::Close
,
781 base::Unretained(synchronous_entry_
),
782 SimpleEntryStat(last_used_
, last_modified_
, data_size_
,
784 base::Passed(&crc32s_to_write
),
786 Closure reply
= base::Bind(&SimpleEntryImpl::CloseOperationComplete
, this);
787 synchronous_entry_
= NULL
;
788 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
790 for (int i
= 0; i
< kSimpleEntryStreamCount
; ++i
) {
791 if (!have_written_
[i
]) {
792 SIMPLE_CACHE_UMA(ENUMERATION
,
793 "CheckCRCResult", cache_type_
,
794 crc_check_state_
[i
], CRC_CHECK_MAX
);
798 CloseOperationComplete();
802 void SimpleEntryImpl::ReadDataInternal(int stream_index
,
806 const CompletionCallback
& callback
) {
807 DCHECK(io_thread_checker_
.CalledOnValidThread());
808 ScopedOperationRunner
operation_runner(this);
810 if (net_log_
.IsLogging()) {
812 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN
,
813 CreateNetLogReadWriteDataCallback(stream_index
, offset
, buf_len
,
817 if (state_
== STATE_FAILURE
|| state_
== STATE_UNINITIALIZED
) {
818 if (!callback
.is_null()) {
819 RecordReadResult(cache_type_
, READ_RESULT_BAD_STATE
);
820 // Note that the API states that client-provided callbacks for entry-level
821 // (i.e. non-backend) operations (e.g. read, write) are invoked even if
822 // the backend was already destroyed.
823 MessageLoopProxy::current()->PostTask(
824 FROM_HERE
, base::Bind(callback
, net::ERR_FAILED
));
826 if (net_log_
.IsLogging()) {
828 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
829 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED
));
833 DCHECK_EQ(STATE_READY
, state_
);
834 if (offset
>= GetDataSize(stream_index
) || offset
< 0 || !buf_len
) {
835 RecordReadResult(cache_type_
, READ_RESULT_FAST_EMPTY_RETURN
);
836 // If there is nothing to read, we bail out before setting state_ to
838 if (!callback
.is_null())
839 MessageLoopProxy::current()->PostTask(FROM_HERE
, base::Bind(callback
, 0));
843 buf_len
= std::min(buf_len
, GetDataSize(stream_index
) - offset
);
845 // Since stream 0 data is kept in memory, it is read immediately.
846 if (stream_index
== 0) {
847 int ret_value
= ReadStream0Data(buf
, offset
, buf_len
);
848 if (!callback
.is_null()) {
849 MessageLoopProxy::current()->PostTask(FROM_HERE
,
850 base::Bind(callback
, ret_value
));
855 state_
= STATE_IO_PENDING
;
856 if (!doomed_
&& backend_
.get())
857 backend_
->index()->UseIfExists(entry_hash_
);
859 scoped_ptr
<uint32
> read_crc32(new uint32());
860 scoped_ptr
<int> result(new int());
861 scoped_ptr
<SimpleEntryStat
> entry_stat(
862 new SimpleEntryStat(last_used_
, last_modified_
, data_size_
,
864 Closure task
= base::Bind(
865 &SimpleSynchronousEntry::ReadData
,
866 base::Unretained(synchronous_entry_
),
867 SimpleSynchronousEntry::EntryOperationData(stream_index
, offset
, buf_len
),
868 make_scoped_refptr(buf
),
872 Closure reply
= base::Bind(&SimpleEntryImpl::ReadOperationComplete
,
877 base::Passed(&read_crc32
),
878 base::Passed(&entry_stat
),
879 base::Passed(&result
));
880 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
883 void SimpleEntryImpl::WriteDataInternal(int stream_index
,
887 const CompletionCallback
& callback
,
889 DCHECK(io_thread_checker_
.CalledOnValidThread());
890 ScopedOperationRunner
operation_runner(this);
892 if (net_log_
.IsLogging()) {
894 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN
,
895 CreateNetLogReadWriteDataCallback(stream_index
, offset
, buf_len
,
899 if (state_
== STATE_FAILURE
|| state_
== STATE_UNINITIALIZED
) {
900 RecordWriteResult(cache_type_
, WRITE_RESULT_BAD_STATE
);
901 if (net_log_
.IsLogging()) {
903 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END
,
904 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED
));
906 if (!callback
.is_null()) {
907 MessageLoopProxy::current()->PostTask(
908 FROM_HERE
, base::Bind(callback
, net::ERR_FAILED
));
910 // |this| may be destroyed after return here.
914 DCHECK_EQ(STATE_READY
, state_
);
916 // Since stream 0 data is kept in memory, it will be written immediatly.
917 if (stream_index
== 0) {
918 int ret_value
= SetStream0Data(buf
, offset
, buf_len
, truncate
);
919 if (!callback
.is_null()) {
920 MessageLoopProxy::current()->PostTask(FROM_HERE
,
921 base::Bind(callback
, ret_value
));
926 // Ignore zero-length writes that do not change the file size.
928 int32 data_size
= data_size_
[stream_index
];
929 if (truncate
? (offset
== data_size
) : (offset
<= data_size
)) {
930 RecordWriteResult(cache_type_
, WRITE_RESULT_FAST_EMPTY_RETURN
);
931 if (!callback
.is_null()) {
932 MessageLoopProxy::current()->PostTask(FROM_HERE
, base::Bind(
938 state_
= STATE_IO_PENDING
;
939 if (!doomed_
&& backend_
.get())
940 backend_
->index()->UseIfExists(entry_hash_
);
942 AdvanceCrc(buf
, offset
, buf_len
, stream_index
);
944 // |entry_stat| needs to be initialized before modifying |data_size_|.
945 scoped_ptr
<SimpleEntryStat
> entry_stat(
946 new SimpleEntryStat(last_used_
, last_modified_
, data_size_
,
949 data_size_
[stream_index
] = offset
+ buf_len
;
951 data_size_
[stream_index
] = std::max(offset
+ buf_len
,
952 GetDataSize(stream_index
));
955 // Since we don't know the correct values for |last_used_| and
956 // |last_modified_| yet, we make this approximation.
957 last_used_
= last_modified_
= base::Time::Now();
959 have_written_
[stream_index
] = true;
960 // Writing on stream 1 affects the placement of stream 0 in the file, the EOF
961 // record will have to be rewritten.
962 if (stream_index
== 1)
963 have_written_
[0] = true;
965 scoped_ptr
<int> result(new int());
966 Closure task
= base::Bind(&SimpleSynchronousEntry::WriteData
,
967 base::Unretained(synchronous_entry_
),
968 SimpleSynchronousEntry::EntryOperationData(
969 stream_index
, offset
, buf_len
, truncate
,
971 make_scoped_refptr(buf
),
974 Closure reply
= base::Bind(&SimpleEntryImpl::WriteOperationComplete
,
978 base::Passed(&entry_stat
),
979 base::Passed(&result
));
980 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
983 void SimpleEntryImpl::ReadSparseDataInternal(
987 const CompletionCallback
& callback
) {
988 DCHECK(io_thread_checker_
.CalledOnValidThread());
989 ScopedOperationRunner
operation_runner(this);
991 DCHECK_EQ(STATE_READY
, state_
);
992 state_
= STATE_IO_PENDING
;
994 scoped_ptr
<int> result(new int());
995 scoped_ptr
<base::Time
> last_used(new base::Time());
996 Closure task
= base::Bind(&SimpleSynchronousEntry::ReadSparseData
,
997 base::Unretained(synchronous_entry_
),
998 SimpleSynchronousEntry::EntryOperationData(
999 sparse_offset
, buf_len
),
1000 make_scoped_refptr(buf
),
1003 Closure reply
= base::Bind(&SimpleEntryImpl::ReadSparseOperationComplete
,
1006 base::Passed(&last_used
),
1007 base::Passed(&result
));
1008 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
1011 void SimpleEntryImpl::WriteSparseDataInternal(
1012 int64 sparse_offset
,
1015 const CompletionCallback
& callback
) {
1016 DCHECK(io_thread_checker_
.CalledOnValidThread());
1017 ScopedOperationRunner
operation_runner(this);
1019 DCHECK_EQ(STATE_READY
, state_
);
1020 state_
= STATE_IO_PENDING
;
1022 int64 max_sparse_data_size
= kint64max
;
1023 if (backend_
.get()) {
1024 int64 max_cache_size
= backend_
->index()->max_size();
1025 max_sparse_data_size
= max_cache_size
/ kMaxSparseDataSizeDivisor
;
1028 scoped_ptr
<SimpleEntryStat
> entry_stat(
1029 new SimpleEntryStat(last_used_
, last_modified_
, data_size_
,
1030 sparse_data_size_
));
1032 last_used_
= last_modified_
= base::Time::Now();
1034 scoped_ptr
<int> result(new int());
1035 Closure task
= base::Bind(&SimpleSynchronousEntry::WriteSparseData
,
1036 base::Unretained(synchronous_entry_
),
1037 SimpleSynchronousEntry::EntryOperationData(
1038 sparse_offset
, buf_len
),
1039 make_scoped_refptr(buf
),
1040 max_sparse_data_size
,
1043 Closure reply
= base::Bind(&SimpleEntryImpl::WriteSparseOperationComplete
,
1046 base::Passed(&entry_stat
),
1047 base::Passed(&result
));
1048 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
1051 void SimpleEntryImpl::GetAvailableRangeInternal(
1052 int64 sparse_offset
,
1055 const CompletionCallback
& callback
) {
1056 DCHECK(io_thread_checker_
.CalledOnValidThread());
1057 ScopedOperationRunner
operation_runner(this);
1059 DCHECK_EQ(STATE_READY
, state_
);
1060 state_
= STATE_IO_PENDING
;
1062 scoped_ptr
<int> result(new int());
1063 Closure task
= base::Bind(&SimpleSynchronousEntry::GetAvailableRange
,
1064 base::Unretained(synchronous_entry_
),
1065 SimpleSynchronousEntry::EntryOperationData(
1066 sparse_offset
, len
),
1069 Closure reply
= base::Bind(
1070 &SimpleEntryImpl::GetAvailableRangeOperationComplete
,
1073 base::Passed(&result
));
1074 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
1077 void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback
& callback
) {
1078 PostTaskAndReplyWithResult(
1079 worker_pool_
, FROM_HERE
,
1080 base::Bind(&SimpleSynchronousEntry::DoomEntry
, path_
, entry_hash_
),
1081 base::Bind(&SimpleEntryImpl::DoomOperationComplete
, this, callback
,
1083 state_
= STATE_IO_PENDING
;
1086 void SimpleEntryImpl::CreationOperationComplete(
1087 const CompletionCallback
& completion_callback
,
1088 const base::TimeTicks
& start_time
,
1089 scoped_ptr
<SimpleEntryCreationResults
> in_results
,
1091 net::NetLog::EventType end_event_type
) {
1092 DCHECK(io_thread_checker_
.CalledOnValidThread());
1093 DCHECK_EQ(state_
, STATE_IO_PENDING
);
1095 ScopedOperationRunner
operation_runner(this);
1096 SIMPLE_CACHE_UMA(BOOLEAN
,
1097 "EntryCreationResult", cache_type_
,
1098 in_results
->result
== net::OK
);
1099 if (in_results
->result
!= net::OK
) {
1100 if (in_results
->result
!= net::ERR_FILE_EXISTS
)
1103 net_log_
.AddEventWithNetErrorCode(end_event_type
, net::ERR_FAILED
);
1104 PostClientCallback(completion_callback
, net::ERR_FAILED
);
1105 MakeUninitialized();
1108 // If out_entry is NULL, it means we already called ReturnEntryToCaller from
1109 // the optimistic Create case.
1111 ReturnEntryToCaller(out_entry
);
1113 state_
= STATE_READY
;
1114 synchronous_entry_
= in_results
->sync_entry
;
1115 if (in_results
->stream_0_data
) {
1116 stream_0_data_
= in_results
->stream_0_data
;
1117 // The crc was read in SimpleSynchronousEntry.
1118 crc_check_state_
[0] = CRC_CHECK_DONE
;
1119 crc32s_
[0] = in_results
->stream_0_crc32
;
1120 crc32s_end_offset_
[0] = in_results
->entry_stat
.data_size(0);
1123 SetKey(synchronous_entry_
->key());
1125 // This should only be triggered when creating an entry. The key check in
1126 // the open case is handled in SimpleBackendImpl.
1127 DCHECK_EQ(key_
, synchronous_entry_
->key());
1129 UpdateDataFromEntryStat(in_results
->entry_stat
);
1130 SIMPLE_CACHE_UMA(TIMES
,
1131 "EntryCreationTime", cache_type_
,
1132 (base::TimeTicks::Now() - start_time
));
1133 AdjustOpenEntryCountBy(cache_type_
, 1);
1135 net_log_
.AddEvent(end_event_type
);
1136 PostClientCallback(completion_callback
, net::OK
);
1139 void SimpleEntryImpl::EntryOperationComplete(
1140 const CompletionCallback
& completion_callback
,
1141 const SimpleEntryStat
& entry_stat
,
1142 scoped_ptr
<int> result
) {
1143 DCHECK(io_thread_checker_
.CalledOnValidThread());
1144 DCHECK(synchronous_entry_
);
1145 DCHECK_EQ(STATE_IO_PENDING
, state_
);
1148 state_
= STATE_FAILURE
;
1151 state_
= STATE_READY
;
1152 UpdateDataFromEntryStat(entry_stat
);
1155 if (!completion_callback
.is_null()) {
1156 MessageLoopProxy::current()->PostTask(FROM_HERE
, base::Bind(
1157 completion_callback
, *result
));
1159 RunNextOperationIfNeeded();
1162 void SimpleEntryImpl::ReadOperationComplete(
1165 const CompletionCallback
& completion_callback
,
1166 scoped_ptr
<uint32
> read_crc32
,
1167 scoped_ptr
<SimpleEntryStat
> entry_stat
,
1168 scoped_ptr
<int> result
) {
1169 DCHECK(io_thread_checker_
.CalledOnValidThread());
1170 DCHECK(synchronous_entry_
);
1171 DCHECK_EQ(STATE_IO_PENDING
, state_
);
1176 crc_check_state_
[stream_index
] == CRC_CHECK_NEVER_READ_AT_ALL
) {
1177 crc_check_state_
[stream_index
] = CRC_CHECK_NEVER_READ_TO_END
;
1180 if (*result
> 0 && crc32s_end_offset_
[stream_index
] == offset
) {
1181 uint32 current_crc
= offset
== 0 ? crc32(0, Z_NULL
, 0)
1182 : crc32s_
[stream_index
];
1183 crc32s_
[stream_index
] = crc32_combine(current_crc
, *read_crc32
, *result
);
1184 crc32s_end_offset_
[stream_index
] += *result
;
1185 if (!have_written_
[stream_index
] &&
1186 GetDataSize(stream_index
) == crc32s_end_offset_
[stream_index
]) {
1187 // We have just read a file from start to finish, and so we have
1188 // computed a crc of the entire file. We can check it now. If a cache
1189 // entry has a single reader, the normal pattern is to read from start
1192 // Other cases are possible. In the case of two readers on the same
1193 // entry, one reader can be behind the other. In this case we compute
1194 // the crc as the most advanced reader progresses, and check it for
1195 // both readers as they read the last byte.
1197 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN
);
1199 scoped_ptr
<int> new_result(new int());
1200 Closure task
= base::Bind(&SimpleSynchronousEntry::CheckEOFRecord
,
1201 base::Unretained(synchronous_entry_
),
1204 crc32s_
[stream_index
],
1206 Closure reply
= base::Bind(&SimpleEntryImpl::ChecksumOperationComplete
,
1207 this, *result
, stream_index
,
1208 completion_callback
,
1209 base::Passed(&new_result
));
1210 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
1211 crc_check_state_
[stream_index
] = CRC_CHECK_DONE
;
1217 crc32s_end_offset_
[stream_index
] = 0;
1221 RecordReadResult(cache_type_
, READ_RESULT_SYNC_READ_FAILURE
);
1223 RecordReadResult(cache_type_
, READ_RESULT_SUCCESS
);
1224 if (crc_check_state_
[stream_index
] == CRC_CHECK_NEVER_READ_TO_END
&&
1225 offset
+ *result
== GetDataSize(stream_index
)) {
1226 crc_check_state_
[stream_index
] = CRC_CHECK_NOT_DONE
;
1229 if (net_log_
.IsLogging()) {
1231 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
1232 CreateNetLogReadWriteCompleteCallback(*result
));
1235 EntryOperationComplete(completion_callback
, *entry_stat
, result
.Pass());
1238 void SimpleEntryImpl::WriteOperationComplete(
1240 const CompletionCallback
& completion_callback
,
1241 scoped_ptr
<SimpleEntryStat
> entry_stat
,
1242 scoped_ptr
<int> result
) {
1244 RecordWriteResult(cache_type_
, WRITE_RESULT_SUCCESS
);
1246 RecordWriteResult(cache_type_
, WRITE_RESULT_SYNC_WRITE_FAILURE
);
1247 if (net_log_
.IsLogging()) {
1248 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END
,
1249 CreateNetLogReadWriteCompleteCallback(*result
));
1253 crc32s_end_offset_
[stream_index
] = 0;
1256 EntryOperationComplete(completion_callback
, *entry_stat
, result
.Pass());
1259 void SimpleEntryImpl::ReadSparseOperationComplete(
1260 const CompletionCallback
& completion_callback
,
1261 scoped_ptr
<base::Time
> last_used
,
1262 scoped_ptr
<int> result
) {
1263 DCHECK(io_thread_checker_
.CalledOnValidThread());
1264 DCHECK(synchronous_entry_
);
1267 SimpleEntryStat
entry_stat(*last_used
, last_modified_
, data_size_
,
1269 EntryOperationComplete(completion_callback
, entry_stat
, result
.Pass());
1272 void SimpleEntryImpl::WriteSparseOperationComplete(
1273 const CompletionCallback
& completion_callback
,
1274 scoped_ptr
<SimpleEntryStat
> entry_stat
,
1275 scoped_ptr
<int> result
) {
1276 DCHECK(io_thread_checker_
.CalledOnValidThread());
1277 DCHECK(synchronous_entry_
);
1280 EntryOperationComplete(completion_callback
, *entry_stat
, result
.Pass());
1283 void SimpleEntryImpl::GetAvailableRangeOperationComplete(
1284 const CompletionCallback
& completion_callback
,
1285 scoped_ptr
<int> result
) {
1286 DCHECK(io_thread_checker_
.CalledOnValidThread());
1287 DCHECK(synchronous_entry_
);
1290 SimpleEntryStat
entry_stat(last_used_
, last_modified_
, data_size_
,
1292 EntryOperationComplete(completion_callback
, entry_stat
, result
.Pass());
1295 void SimpleEntryImpl::DoomOperationComplete(
1296 const CompletionCallback
& callback
,
1297 State state_to_restore
,
1299 state_
= state_to_restore
;
1300 if (!callback
.is_null())
1301 callback
.Run(result
);
1302 RunNextOperationIfNeeded();
1304 backend_
->OnDoomComplete(entry_hash_
);
1307 void SimpleEntryImpl::ChecksumOperationComplete(
1310 const CompletionCallback
& completion_callback
,
1311 scoped_ptr
<int> result
) {
1312 DCHECK(io_thread_checker_
.CalledOnValidThread());
1313 DCHECK(synchronous_entry_
);
1314 DCHECK_EQ(STATE_IO_PENDING
, state_
);
1317 if (net_log_
.IsLogging()) {
1318 net_log_
.AddEventWithNetErrorCode(
1319 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END
,
1323 if (*result
== net::OK
) {
1324 *result
= orig_result
;
1325 if (orig_result
>= 0)
1326 RecordReadResult(cache_type_
, READ_RESULT_SUCCESS
);
1328 RecordReadResult(cache_type_
, READ_RESULT_SYNC_READ_FAILURE
);
1330 RecordReadResult(cache_type_
, READ_RESULT_SYNC_CHECKSUM_FAILURE
);
1332 if (net_log_
.IsLogging()) {
1333 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
1334 CreateNetLogReadWriteCompleteCallback(*result
));
1337 SimpleEntryStat
entry_stat(last_used_
, last_modified_
, data_size_
,
1339 EntryOperationComplete(completion_callback
, entry_stat
, result
.Pass());
1342 void SimpleEntryImpl::CloseOperationComplete() {
1343 DCHECK(!synchronous_entry_
);
1344 DCHECK_EQ(0, open_count_
);
1345 DCHECK(STATE_IO_PENDING
== state_
|| STATE_FAILURE
== state_
||
1346 STATE_UNINITIALIZED
== state_
);
1347 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END
);
1348 AdjustOpenEntryCountBy(cache_type_
, -1);
1349 MakeUninitialized();
1350 RunNextOperationIfNeeded();
1353 void SimpleEntryImpl::UpdateDataFromEntryStat(
1354 const SimpleEntryStat
& entry_stat
) {
1355 DCHECK(io_thread_checker_
.CalledOnValidThread());
1356 DCHECK(synchronous_entry_
);
1357 DCHECK_EQ(STATE_READY
, state_
);
1359 last_used_
= entry_stat
.last_used();
1360 last_modified_
= entry_stat
.last_modified();
1361 for (int i
= 0; i
< kSimpleEntryStreamCount
; ++i
) {
1362 data_size_
[i
] = entry_stat
.data_size(i
);
1364 sparse_data_size_
= entry_stat
.sparse_data_size();
1365 if (!doomed_
&& backend_
.get())
1366 backend_
->index()->UpdateEntrySize(entry_hash_
, GetDiskUsage());
1369 int64
SimpleEntryImpl::GetDiskUsage() const {
1370 int64 file_size
= 0;
1371 for (int i
= 0; i
< kSimpleEntryStreamCount
; ++i
) {
1373 simple_util::GetFileSizeFromKeyAndDataSize(key_
, data_size_
[i
]);
1375 file_size
+= sparse_data_size_
;
1379 void SimpleEntryImpl::RecordReadIsParallelizable(
1380 const SimpleEntryOperation
& operation
) const {
1381 if (!executing_operation_
)
1383 // Used in histograms, please only add entries at the end.
1384 enum ReadDependencyType
{
1385 // READ_STANDALONE = 0, Deprecated.
1386 READ_FOLLOWS_READ
= 1,
1387 READ_FOLLOWS_CONFLICTING_WRITE
= 2,
1388 READ_FOLLOWS_NON_CONFLICTING_WRITE
= 3,
1389 READ_FOLLOWS_OTHER
= 4,
1390 READ_ALONE_IN_QUEUE
= 5,
1391 READ_DEPENDENCY_TYPE_MAX
= 6,
1394 ReadDependencyType type
= READ_FOLLOWS_OTHER
;
1395 if (operation
.alone_in_queue()) {
1396 type
= READ_ALONE_IN_QUEUE
;
1397 } else if (executing_operation_
->type() == SimpleEntryOperation::TYPE_READ
) {
1398 type
= READ_FOLLOWS_READ
;
1399 } else if (executing_operation_
->type() == SimpleEntryOperation::TYPE_WRITE
) {
1400 if (executing_operation_
->ConflictsWith(operation
))
1401 type
= READ_FOLLOWS_CONFLICTING_WRITE
;
1403 type
= READ_FOLLOWS_NON_CONFLICTING_WRITE
;
1405 SIMPLE_CACHE_UMA(ENUMERATION
,
1406 "ReadIsParallelizable", cache_type_
,
1407 type
, READ_DEPENDENCY_TYPE_MAX
);
1410 void SimpleEntryImpl::RecordWriteDependencyType(
1411 const SimpleEntryOperation
& operation
) const {
1412 if (!executing_operation_
)
1414 // Used in histograms, please only add entries at the end.
1415 enum WriteDependencyType
{
1416 WRITE_OPTIMISTIC
= 0,
1417 WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
= 1,
1418 WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC
= 2,
1419 WRITE_FOLLOWS_CONFLICTING_WRITE
= 3,
1420 WRITE_FOLLOWS_NON_CONFLICTING_WRITE
= 4,
1421 WRITE_FOLLOWS_CONFLICTING_READ
= 5,
1422 WRITE_FOLLOWS_NON_CONFLICTING_READ
= 6,
1423 WRITE_FOLLOWS_OTHER
= 7,
1424 WRITE_DEPENDENCY_TYPE_MAX
= 8,
1427 WriteDependencyType type
= WRITE_FOLLOWS_OTHER
;
1428 if (operation
.optimistic()) {
1429 type
= WRITE_OPTIMISTIC
;
1430 } else if (executing_operation_
->type() == SimpleEntryOperation::TYPE_READ
||
1431 executing_operation_
->type() == SimpleEntryOperation::TYPE_WRITE
) {
1432 bool conflicting
= executing_operation_
->ConflictsWith(operation
);
1434 if (executing_operation_
->type() == SimpleEntryOperation::TYPE_READ
) {
1435 type
= conflicting
? WRITE_FOLLOWS_CONFLICTING_READ
1436 : WRITE_FOLLOWS_NON_CONFLICTING_READ
;
1437 } else if (executing_operation_
->optimistic()) {
1438 type
= conflicting
? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
1439 : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC
;
1441 type
= conflicting
? WRITE_FOLLOWS_CONFLICTING_WRITE
1442 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE
;
1445 SIMPLE_CACHE_UMA(ENUMERATION
,
1446 "WriteDependencyType", cache_type_
,
1447 type
, WRITE_DEPENDENCY_TYPE_MAX
);
1450 int SimpleEntryImpl::ReadStream0Data(net::IOBuffer
* buf
,
1454 RecordReadResult(cache_type_
, READ_RESULT_SYNC_READ_FAILURE
);
1457 memcpy(buf
->data(), stream_0_data_
->data() + offset
, buf_len
);
1458 UpdateDataFromEntryStat(
1459 SimpleEntryStat(base::Time::Now(), last_modified_
, data_size_
,
1460 sparse_data_size_
));
1461 RecordReadResult(cache_type_
, READ_RESULT_SUCCESS
);
1465 int SimpleEntryImpl::SetStream0Data(net::IOBuffer
* buf
,
1469 // Currently, stream 0 is only used for HTTP headers, and always writes them
1470 // with a single, truncating write. Detect these writes and record the size
1471 // changes of the headers. Also, support writes to stream 0 that have
1472 // different access patterns, as required by the API contract.
1473 // All other clients of the Simple Cache are encouraged to use stream 1.
1474 have_written_
[0] = true;
1475 int data_size
= GetDataSize(0);
1476 if (offset
== 0 && truncate
) {
1477 RecordHeaderSizeChange(cache_type_
, data_size
, buf_len
);
1478 stream_0_data_
->SetCapacity(buf_len
);
1479 memcpy(stream_0_data_
->data(), buf
->data(), buf_len
);
1480 data_size_
[0] = buf_len
;
1482 RecordUnexpectedStream0Write(cache_type_
);
1483 const int buffer_size
=
1484 truncate
? offset
+ buf_len
: std::max(offset
+ buf_len
, data_size
);
1485 stream_0_data_
->SetCapacity(buffer_size
);
1486 // If |stream_0_data_| was extended, the extension until offset needs to be
1488 const int fill_size
= offset
<= data_size
? 0 : offset
- data_size
;
1490 memset(stream_0_data_
->data() + data_size
, 0, fill_size
);
1492 memcpy(stream_0_data_
->data() + offset
, buf
->data(), buf_len
);
1493 data_size_
[0] = buffer_size
;
1495 base::Time modification_time
= base::Time::Now();
1496 AdvanceCrc(buf
, offset
, buf_len
, 0);
1497 UpdateDataFromEntryStat(
1498 SimpleEntryStat(modification_time
, modification_time
, data_size_
,
1499 sparse_data_size_
));
1500 RecordWriteResult(cache_type_
, WRITE_RESULT_SUCCESS
);
1504 void SimpleEntryImpl::AdvanceCrc(net::IOBuffer
* buffer
,
1508 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
1509 // if |offset == 0| or we have already computed the CRC for [0 .. offset).
1510 // We rely on most write operations being sequential, start to end to compute
1511 // the crc of the data. When we write to an entry and close without having
1512 // done a sequential write, we don't check the CRC on read.
1513 if (offset
== 0 || crc32s_end_offset_
[stream_index
] == offset
) {
1514 uint32 initial_crc
=
1515 (offset
!= 0) ? crc32s_
[stream_index
] : crc32(0, Z_NULL
, 0);
1517 crc32s_
[stream_index
] = crc32(
1518 initial_crc
, reinterpret_cast<const Bytef
*>(buffer
->data()), length
);
1520 crc32s_end_offset_
[stream_index
] = offset
+ length
;
1521 } else if (offset
< crc32s_end_offset_
[stream_index
]) {
1522 // If a range for which the crc32 was already computed is rewritten, the
1523 // computation of the crc32 need to start from 0 again.
1524 crc32s_end_offset_
[stream_index
] = 0;
1528 } // namespace disk_cache