1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_entry_impl.h"
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/callback.h"
14 #include "base/location.h"
15 #include "base/logging.h"
16 #include "base/single_thread_task_runner.h"
17 #include "base/task_runner.h"
18 #include "base/task_runner_util.h"
19 #include "base/thread_task_runner_handle.h"
20 #include "base/time/time.h"
21 #include "net/base/io_buffer.h"
22 #include "net/base/net_errors.h"
23 #include "net/disk_cache/net_log_parameters.h"
24 #include "net/disk_cache/simple/simple_backend_impl.h"
25 #include "net/disk_cache/simple/simple_histogram_macros.h"
26 #include "net/disk_cache/simple/simple_index.h"
27 #include "net/disk_cache/simple/simple_net_log_parameters.h"
28 #include "net/disk_cache/simple/simple_synchronous_entry.h"
29 #include "net/disk_cache/simple/simple_util.h"
30 #include "third_party/zlib/zlib.h"
32 namespace disk_cache
{
35 // An entry can store sparse data taking up to 1 / kMaxSparseDataSizeDivisor of
37 const int64 kMaxSparseDataSizeDivisor
= 10;
39 // Used in histograms, please only add entries at the end.
41 READ_RESULT_SUCCESS
= 0,
42 READ_RESULT_INVALID_ARGUMENT
= 1,
43 READ_RESULT_NONBLOCK_EMPTY_RETURN
= 2,
44 READ_RESULT_BAD_STATE
= 3,
45 READ_RESULT_FAST_EMPTY_RETURN
= 4,
46 READ_RESULT_SYNC_READ_FAILURE
= 5,
47 READ_RESULT_SYNC_CHECKSUM_FAILURE
= 6,
51 // Used in histograms, please only add entries at the end.
53 WRITE_RESULT_SUCCESS
= 0,
54 WRITE_RESULT_INVALID_ARGUMENT
= 1,
55 WRITE_RESULT_OVER_MAX_SIZE
= 2,
56 WRITE_RESULT_BAD_STATE
= 3,
57 WRITE_RESULT_SYNC_WRITE_FAILURE
= 4,
58 WRITE_RESULT_FAST_EMPTY_RETURN
= 5,
62 // Used in histograms, please only add entries at the end.
63 enum HeaderSizeChange
{
64 HEADER_SIZE_CHANGE_INITIAL
,
65 HEADER_SIZE_CHANGE_SAME
,
66 HEADER_SIZE_CHANGE_INCREASE
,
67 HEADER_SIZE_CHANGE_DECREASE
,
68 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE
,
69 HEADER_SIZE_CHANGE_MAX
72 void RecordReadResult(net::CacheType cache_type
, ReadResult result
) {
73 SIMPLE_CACHE_UMA(ENUMERATION
,
74 "ReadResult", cache_type
, result
, READ_RESULT_MAX
);
77 void RecordWriteResult(net::CacheType cache_type
, WriteResult result
) {
78 SIMPLE_CACHE_UMA(ENUMERATION
,
79 "WriteResult2", cache_type
, result
, WRITE_RESULT_MAX
);
82 // TODO(ttuttle): Consider removing this once we have a good handle on header
84 void RecordHeaderSizeChange(net::CacheType cache_type
,
85 int old_size
, int new_size
) {
86 HeaderSizeChange size_change
;
88 SIMPLE_CACHE_UMA(COUNTS_10000
, "HeaderSize", cache_type
, new_size
);
91 size_change
= HEADER_SIZE_CHANGE_INITIAL
;
92 } else if (new_size
== old_size
) {
93 size_change
= HEADER_SIZE_CHANGE_SAME
;
94 } else if (new_size
> old_size
) {
95 int delta
= new_size
- old_size
;
96 SIMPLE_CACHE_UMA(COUNTS_10000
,
97 "HeaderSizeIncreaseAbsolute", cache_type
, delta
);
98 SIMPLE_CACHE_UMA(PERCENTAGE
,
99 "HeaderSizeIncreasePercentage", cache_type
,
100 delta
* 100 / old_size
);
101 size_change
= HEADER_SIZE_CHANGE_INCREASE
;
102 } else { // new_size < old_size
103 int delta
= old_size
- new_size
;
104 SIMPLE_CACHE_UMA(COUNTS_10000
,
105 "HeaderSizeDecreaseAbsolute", cache_type
, delta
);
106 SIMPLE_CACHE_UMA(PERCENTAGE
,
107 "HeaderSizeDecreasePercentage", cache_type
,
108 delta
* 100 / old_size
);
109 size_change
= HEADER_SIZE_CHANGE_DECREASE
;
112 SIMPLE_CACHE_UMA(ENUMERATION
,
113 "HeaderSizeChange", cache_type
,
114 size_change
, HEADER_SIZE_CHANGE_MAX
);
117 void RecordUnexpectedStream0Write(net::CacheType cache_type
) {
118 SIMPLE_CACHE_UMA(ENUMERATION
,
119 "HeaderSizeChange", cache_type
,
120 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE
, HEADER_SIZE_CHANGE_MAX
);
123 int g_open_entry_count
= 0;
125 void AdjustOpenEntryCountBy(net::CacheType cache_type
, int offset
) {
126 g_open_entry_count
+= offset
;
127 SIMPLE_CACHE_UMA(COUNTS_10000
,
128 "GlobalOpenEntryCount", cache_type
, g_open_entry_count
);
131 void InvokeCallbackIfBackendIsAlive(
132 const base::WeakPtr
<SimpleBackendImpl
>& backend
,
133 const net::CompletionCallback
& completion_callback
,
135 DCHECK(!completion_callback
.is_null());
138 completion_callback
.Run(result
);
144 using base::FilePath
;
145 using base::MessageLoopProxy
;
147 using base::TaskRunner
;
149 // A helper class to insure that RunNextOperationIfNeeded() is called when
150 // exiting the current stack frame.
151 class SimpleEntryImpl::ScopedOperationRunner
{
153 explicit ScopedOperationRunner(SimpleEntryImpl
* entry
) : entry_(entry
) {
156 ~ScopedOperationRunner() {
157 entry_
->RunNextOperationIfNeeded();
161 SimpleEntryImpl
* const entry_
;
164 SimpleEntryImpl::ActiveEntryProxy::~ActiveEntryProxy() {}
166 SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type
,
167 const FilePath
& path
,
168 const uint64 entry_hash
,
169 OperationsMode operations_mode
,
170 SimpleBackendImpl
* backend
,
171 net::NetLog
* net_log
)
172 : backend_(backend
->AsWeakPtr()),
173 cache_type_(cache_type
),
174 worker_pool_(backend
->worker_pool()),
176 entry_hash_(entry_hash
),
177 use_optimistic_operations_(operations_mode
== OPTIMISTIC_OPERATIONS
),
178 last_used_(Time::Now()),
179 last_modified_(last_used_
),
180 sparse_data_size_(0),
183 state_(STATE_UNINITIALIZED
),
184 synchronous_entry_(NULL
),
185 net_log_(net::BoundNetLog::Make(
186 net_log
, net::NetLog::SOURCE_DISK_CACHE_ENTRY
)),
187 stream_0_data_(new net::GrowableIOBuffer()) {
188 COMPILE_ASSERT(arraysize(data_size_
) == arraysize(crc32s_end_offset_
),
189 arrays_should_be_same_size
);
190 COMPILE_ASSERT(arraysize(data_size_
) == arraysize(crc32s_
),
191 arrays_should_be_same_size
);
192 COMPILE_ASSERT(arraysize(data_size_
) == arraysize(have_written_
),
193 arrays_should_be_same_size
);
194 COMPILE_ASSERT(arraysize(data_size_
) == arraysize(crc_check_state_
),
195 arrays_should_be_same_size
);
197 net_log_
.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY
,
198 CreateNetLogSimpleEntryConstructionCallback(this));
201 void SimpleEntryImpl::SetActiveEntryProxy(
202 scoped_ptr
<ActiveEntryProxy
> active_entry_proxy
) {
203 DCHECK(!active_entry_proxy_
);
204 active_entry_proxy_
.reset(active_entry_proxy
.release());
207 int SimpleEntryImpl::OpenEntry(Entry
** out_entry
,
208 const CompletionCallback
& callback
) {
209 DCHECK(backend_
.get());
211 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL
);
213 bool have_index
= backend_
->index()->initialized();
214 // This enumeration is used in histograms, add entries only at end.
215 enum OpenEntryIndexEnum
{
221 OpenEntryIndexEnum open_entry_index_enum
= INDEX_NOEXIST
;
223 if (backend_
->index()->Has(entry_hash_
))
224 open_entry_index_enum
= INDEX_HIT
;
226 open_entry_index_enum
= INDEX_MISS
;
228 SIMPLE_CACHE_UMA(ENUMERATION
,
229 "OpenEntryIndexState", cache_type_
,
230 open_entry_index_enum
, INDEX_MAX
);
232 // If entry is not known to the index, initiate fast failover to the network.
233 if (open_entry_index_enum
== INDEX_MISS
) {
234 net_log_
.AddEventWithNetErrorCode(
235 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END
,
237 return net::ERR_FAILED
;
240 pending_operations_
.push(SimpleEntryOperation::OpenOperation(
241 this, have_index
, callback
, out_entry
));
242 RunNextOperationIfNeeded();
243 return net::ERR_IO_PENDING
;
246 int SimpleEntryImpl::CreateEntry(Entry
** out_entry
,
247 const CompletionCallback
& callback
) {
248 DCHECK(backend_
.get());
249 DCHECK_EQ(entry_hash_
, simple_util::GetEntryHashKey(key_
));
251 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL
);
253 bool have_index
= backend_
->index()->initialized();
254 int ret_value
= net::ERR_FAILED
;
255 if (use_optimistic_operations_
&&
256 state_
== STATE_UNINITIALIZED
&& pending_operations_
.size() == 0) {
257 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC
);
259 ReturnEntryToCaller(out_entry
);
260 pending_operations_
.push(SimpleEntryOperation::CreateOperation(
261 this, have_index
, CompletionCallback(), static_cast<Entry
**>(NULL
)));
264 pending_operations_
.push(SimpleEntryOperation::CreateOperation(
265 this, have_index
, callback
, out_entry
));
266 ret_value
= net::ERR_IO_PENDING
;
269 // We insert the entry in the index before creating the entry files in the
270 // SimpleSynchronousEntry, because this way the worst scenario is when we
271 // have the entry in the index but we don't have the created files yet, this
272 // way we never leak files. CreationOperationComplete will remove the entry
273 // from the index if the creation fails.
274 backend_
->index()->Insert(entry_hash_
);
276 RunNextOperationIfNeeded();
280 int SimpleEntryImpl::DoomEntry(const CompletionCallback
& callback
) {
283 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_CALL
);
284 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_BEGIN
);
288 backend_
->OnDoomStart(entry_hash_
);
289 pending_operations_
.push(SimpleEntryOperation::DoomOperation(this, callback
));
290 RunNextOperationIfNeeded();
291 return net::ERR_IO_PENDING
;
294 void SimpleEntryImpl::SetKey(const std::string
& key
) {
296 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY
,
297 net::NetLog::StringCallback("key", &key
));
300 void SimpleEntryImpl::Doom() {
301 DoomEntry(CompletionCallback());
304 void SimpleEntryImpl::Close() {
305 DCHECK(io_thread_checker_
.CalledOnValidThread());
306 DCHECK_LT(0, open_count_
);
308 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_CALL
);
310 if (--open_count_
> 0) {
311 DCHECK(!HasOneRef());
312 Release(); // Balanced in ReturnEntryToCaller().
316 pending_operations_
.push(SimpleEntryOperation::CloseOperation(this));
317 DCHECK(!HasOneRef());
318 Release(); // Balanced in ReturnEntryToCaller().
319 RunNextOperationIfNeeded();
322 std::string
SimpleEntryImpl::GetKey() const {
323 DCHECK(io_thread_checker_
.CalledOnValidThread());
327 Time
SimpleEntryImpl::GetLastUsed() const {
328 DCHECK(io_thread_checker_
.CalledOnValidThread());
332 Time
SimpleEntryImpl::GetLastModified() const {
333 DCHECK(io_thread_checker_
.CalledOnValidThread());
334 return last_modified_
;
337 int32
SimpleEntryImpl::GetDataSize(int stream_index
) const {
338 DCHECK(io_thread_checker_
.CalledOnValidThread());
339 DCHECK_LE(0, data_size_
[stream_index
]);
340 return data_size_
[stream_index
];
343 int SimpleEntryImpl::ReadData(int stream_index
,
347 const CompletionCallback
& callback
) {
348 DCHECK(io_thread_checker_
.CalledOnValidThread());
350 if (net_log_
.IsLogging()) {
351 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL
,
352 CreateNetLogReadWriteDataCallback(stream_index
, offset
, buf_len
,
356 if (stream_index
< 0 || stream_index
>= kSimpleEntryStreamCount
||
358 if (net_log_
.IsLogging()) {
359 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
360 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT
));
363 RecordReadResult(cache_type_
, READ_RESULT_INVALID_ARGUMENT
);
364 return net::ERR_INVALID_ARGUMENT
;
366 if (pending_operations_
.empty() && (offset
>= GetDataSize(stream_index
) ||
367 offset
< 0 || !buf_len
)) {
368 if (net_log_
.IsLogging()) {
369 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
370 CreateNetLogReadWriteCompleteCallback(0));
373 RecordReadResult(cache_type_
, READ_RESULT_NONBLOCK_EMPTY_RETURN
);
377 // TODO(clamy): return immediatly when reading from stream 0.
379 // TODO(felipeg): Optimization: Add support for truly parallel read
381 bool alone_in_queue
=
382 pending_operations_
.size() == 0 && state_
== STATE_READY
;
383 pending_operations_
.push(SimpleEntryOperation::ReadOperation(
384 this, stream_index
, offset
, buf_len
, buf
, callback
, alone_in_queue
));
385 RunNextOperationIfNeeded();
386 return net::ERR_IO_PENDING
;
389 int SimpleEntryImpl::WriteData(int stream_index
,
393 const CompletionCallback
& callback
,
395 DCHECK(io_thread_checker_
.CalledOnValidThread());
397 if (net_log_
.IsLogging()) {
399 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL
,
400 CreateNetLogReadWriteDataCallback(stream_index
, offset
, buf_len
,
404 if (stream_index
< 0 || stream_index
>= kSimpleEntryStreamCount
||
405 offset
< 0 || buf_len
< 0) {
406 if (net_log_
.IsLogging()) {
408 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END
,
409 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT
));
411 RecordWriteResult(cache_type_
, WRITE_RESULT_INVALID_ARGUMENT
);
412 return net::ERR_INVALID_ARGUMENT
;
414 if (backend_
.get() && offset
+ buf_len
> backend_
->GetMaxFileSize()) {
415 if (net_log_
.IsLogging()) {
417 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END
,
418 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED
));
420 RecordWriteResult(cache_type_
, WRITE_RESULT_OVER_MAX_SIZE
);
421 return net::ERR_FAILED
;
423 ScopedOperationRunner
operation_runner(this);
425 // Stream 0 data is kept in memory, so can be written immediatly if there are
426 // no IO operations pending.
427 if (stream_index
== 0 && state_
== STATE_READY
&&
428 pending_operations_
.size() == 0)
429 return SetStream0Data(buf
, offset
, buf_len
, truncate
);
431 // We can only do optimistic Write if there is no pending operations, so
432 // that we are sure that the next call to RunNextOperationIfNeeded will
433 // actually run the write operation that sets the stream size. It also
434 // prevents from previous possibly-conflicting writes that could be stacked
435 // in the |pending_operations_|. We could optimize this for when we have
436 // only read operations enqueued.
437 const bool optimistic
=
438 (use_optimistic_operations_
&& state_
== STATE_READY
&&
439 pending_operations_
.size() == 0);
440 CompletionCallback op_callback
;
441 scoped_refptr
<net::IOBuffer
> op_buf
;
442 int ret_value
= net::ERR_FAILED
;
445 op_callback
= callback
;
446 ret_value
= net::ERR_IO_PENDING
;
448 // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
449 // here to avoid paying the price of the RefCountedThreadSafe atomic
452 op_buf
= new IOBuffer(buf_len
);
453 memcpy(op_buf
->data(), buf
->data(), buf_len
);
455 op_callback
= CompletionCallback();
457 if (net_log_
.IsLogging()) {
459 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC
,
460 CreateNetLogReadWriteCompleteCallback(buf_len
));
464 pending_operations_
.push(SimpleEntryOperation::WriteOperation(this,
475 int SimpleEntryImpl::ReadSparseData(int64 offset
,
478 const CompletionCallback
& callback
) {
479 DCHECK(io_thread_checker_
.CalledOnValidThread());
481 ScopedOperationRunner
operation_runner(this);
482 pending_operations_
.push(SimpleEntryOperation::ReadSparseOperation(
483 this, offset
, buf_len
, buf
, callback
));
484 return net::ERR_IO_PENDING
;
487 int SimpleEntryImpl::WriteSparseData(int64 offset
,
490 const CompletionCallback
& callback
) {
491 DCHECK(io_thread_checker_
.CalledOnValidThread());
493 ScopedOperationRunner
operation_runner(this);
494 pending_operations_
.push(SimpleEntryOperation::WriteSparseOperation(
495 this, offset
, buf_len
, buf
, callback
));
496 return net::ERR_IO_PENDING
;
499 int SimpleEntryImpl::GetAvailableRange(int64 offset
,
502 const CompletionCallback
& callback
) {
503 DCHECK(io_thread_checker_
.CalledOnValidThread());
505 ScopedOperationRunner
operation_runner(this);
506 pending_operations_
.push(SimpleEntryOperation::GetAvailableRangeOperation(
507 this, offset
, len
, start
, callback
));
508 return net::ERR_IO_PENDING
;
511 bool SimpleEntryImpl::CouldBeSparse() const {
512 DCHECK(io_thread_checker_
.CalledOnValidThread());
513 // TODO(ttuttle): Actually check.
517 void SimpleEntryImpl::CancelSparseIO() {
518 DCHECK(io_thread_checker_
.CalledOnValidThread());
519 // The Simple Cache does not return distinct objects for the same non-doomed
520 // entry, so there's no need to coordinate which object is performing sparse
521 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
524 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback
& callback
) {
525 DCHECK(io_thread_checker_
.CalledOnValidThread());
526 // The simple Cache does not return distinct objects for the same non-doomed
527 // entry, so there's no need to coordinate which object is performing sparse
528 // I/O. Therefore, CancelSparseIO and ReadyForSparseIO succeed instantly.
532 SimpleEntryImpl::~SimpleEntryImpl() {
533 DCHECK(io_thread_checker_
.CalledOnValidThread());
534 DCHECK_EQ(0U, pending_operations_
.size());
535 DCHECK(state_
== STATE_UNINITIALIZED
|| state_
== STATE_FAILURE
);
536 DCHECK(!synchronous_entry_
);
537 net_log_
.EndEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY
);
540 void SimpleEntryImpl::PostClientCallback(const CompletionCallback
& callback
,
542 if (callback
.is_null())
544 // Note that the callback is posted rather than directly invoked to avoid
545 // reentrancy issues.
546 base::ThreadTaskRunnerHandle::Get()->PostTask(
548 base::Bind(&InvokeCallbackIfBackendIsAlive
, backend_
, callback
, result
));
551 void SimpleEntryImpl::MakeUninitialized() {
552 state_
= STATE_UNINITIALIZED
;
553 std::memset(crc32s_end_offset_
, 0, sizeof(crc32s_end_offset_
));
554 std::memset(crc32s_
, 0, sizeof(crc32s_
));
555 std::memset(have_written_
, 0, sizeof(have_written_
));
556 std::memset(data_size_
, 0, sizeof(data_size_
));
557 for (size_t i
= 0; i
< arraysize(crc_check_state_
); ++i
) {
558 crc_check_state_
[i
] = CRC_CHECK_NEVER_READ_AT_ALL
;
562 void SimpleEntryImpl::ReturnEntryToCaller(Entry
** out_entry
) {
565 AddRef(); // Balanced in Close()
566 if (!backend_
.get()) {
567 // This method can be called when an asynchronous operation completed.
568 // If the backend no longer exists, the callback won't be invoked, and so we
569 // must close ourselves to avoid leaking. As well, there's no guarantee the
570 // client-provided pointer (|out_entry|) hasn't been freed, and no point
571 // dereferencing it, either.
578 void SimpleEntryImpl::MarkAsDoomed() {
582 backend_
->index()->Remove(entry_hash_
);
583 active_entry_proxy_
.reset();
586 void SimpleEntryImpl::RunNextOperationIfNeeded() {
587 DCHECK(io_thread_checker_
.CalledOnValidThread());
588 SIMPLE_CACHE_UMA(CUSTOM_COUNTS
,
589 "EntryOperationsPending", cache_type_
,
590 pending_operations_
.size(), 0, 100, 20);
591 if (!pending_operations_
.empty() && state_
!= STATE_IO_PENDING
) {
592 scoped_ptr
<SimpleEntryOperation
> operation(
593 new SimpleEntryOperation(pending_operations_
.front()));
594 pending_operations_
.pop();
595 switch (operation
->type()) {
596 case SimpleEntryOperation::TYPE_OPEN
:
597 OpenEntryInternal(operation
->have_index(),
598 operation
->callback(),
599 operation
->out_entry());
601 case SimpleEntryOperation::TYPE_CREATE
:
602 CreateEntryInternal(operation
->have_index(),
603 operation
->callback(),
604 operation
->out_entry());
606 case SimpleEntryOperation::TYPE_CLOSE
:
609 case SimpleEntryOperation::TYPE_READ
:
610 RecordReadIsParallelizable(*operation
);
611 ReadDataInternal(operation
->index(),
615 operation
->callback());
617 case SimpleEntryOperation::TYPE_WRITE
:
618 RecordWriteDependencyType(*operation
);
619 WriteDataInternal(operation
->index(),
623 operation
->callback(),
624 operation
->truncate());
626 case SimpleEntryOperation::TYPE_READ_SPARSE
:
627 ReadSparseDataInternal(operation
->sparse_offset(),
630 operation
->callback());
632 case SimpleEntryOperation::TYPE_WRITE_SPARSE
:
633 WriteSparseDataInternal(operation
->sparse_offset(),
636 operation
->callback());
638 case SimpleEntryOperation::TYPE_GET_AVAILABLE_RANGE
:
639 GetAvailableRangeInternal(operation
->sparse_offset(),
641 operation
->out_start(),
642 operation
->callback());
644 case SimpleEntryOperation::TYPE_DOOM
:
645 DoomEntryInternal(operation
->callback());
650 // The operation is kept for histograms. Makes sure it does not leak
652 executing_operation_
.swap(operation
);
653 executing_operation_
->ReleaseReferences();
654 // |this| may have been deleted.
658 void SimpleEntryImpl::OpenEntryInternal(bool have_index
,
659 const CompletionCallback
& callback
,
661 ScopedOperationRunner
operation_runner(this);
663 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN
);
665 if (state_
== STATE_READY
) {
666 ReturnEntryToCaller(out_entry
);
667 PostClientCallback(callback
, net::OK
);
669 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END
,
670 CreateNetLogSimpleEntryCreationCallback(this, net::OK
));
673 if (state_
== STATE_FAILURE
) {
674 PostClientCallback(callback
, net::ERR_FAILED
);
676 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END
,
677 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED
));
681 DCHECK_EQ(STATE_UNINITIALIZED
, state_
);
682 DCHECK(!synchronous_entry_
);
683 state_
= STATE_IO_PENDING
;
684 const base::TimeTicks start_time
= base::TimeTicks::Now();
685 scoped_ptr
<SimpleEntryCreationResults
> results(
686 new SimpleEntryCreationResults(
687 SimpleEntryStat(last_used_
, last_modified_
, data_size_
,
688 sparse_data_size_
)));
689 Closure task
= base::Bind(&SimpleSynchronousEntry::OpenEntry
,
695 Closure reply
= base::Bind(&SimpleEntryImpl::CreationOperationComplete
,
699 base::Passed(&results
),
701 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END
);
702 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
705 void SimpleEntryImpl::CreateEntryInternal(bool have_index
,
706 const CompletionCallback
& callback
,
708 ScopedOperationRunner
operation_runner(this);
710 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_BEGIN
);
712 if (state_
!= STATE_UNINITIALIZED
) {
713 // There is already an active normal entry.
715 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END
,
716 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED
));
717 PostClientCallback(callback
, net::ERR_FAILED
);
720 DCHECK_EQ(STATE_UNINITIALIZED
, state_
);
721 DCHECK(!synchronous_entry_
);
723 state_
= STATE_IO_PENDING
;
725 // Since we don't know the correct values for |last_used_| and
726 // |last_modified_| yet, we make this approximation.
727 last_used_
= last_modified_
= base::Time::Now();
729 // If creation succeeds, we should mark all streams to be saved on close.
730 for (int i
= 0; i
< kSimpleEntryStreamCount
; ++i
)
731 have_written_
[i
] = true;
733 const base::TimeTicks start_time
= base::TimeTicks::Now();
734 scoped_ptr
<SimpleEntryCreationResults
> results(
735 new SimpleEntryCreationResults(
736 SimpleEntryStat(last_used_
, last_modified_
, data_size_
,
737 sparse_data_size_
)));
738 Closure task
= base::Bind(&SimpleSynchronousEntry::CreateEntry
,
745 Closure reply
= base::Bind(&SimpleEntryImpl::CreationOperationComplete
,
749 base::Passed(&results
),
751 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END
);
752 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
755 void SimpleEntryImpl::CloseInternal() {
756 DCHECK(io_thread_checker_
.CalledOnValidThread());
757 typedef SimpleSynchronousEntry::CRCRecord CRCRecord
;
758 scoped_ptr
<std::vector
<CRCRecord
> >
759 crc32s_to_write(new std::vector
<CRCRecord
>());
761 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN
);
763 if (state_
== STATE_READY
) {
764 DCHECK(synchronous_entry_
);
765 state_
= STATE_IO_PENDING
;
766 for (int i
= 0; i
< kSimpleEntryStreamCount
; ++i
) {
767 if (have_written_
[i
]) {
768 if (GetDataSize(i
) == crc32s_end_offset_
[i
]) {
769 int32 crc
= GetDataSize(i
) == 0 ? crc32(0, Z_NULL
, 0) : crc32s_
[i
];
770 crc32s_to_write
->push_back(CRCRecord(i
, true, crc
));
772 crc32s_to_write
->push_back(CRCRecord(i
, false, 0));
777 DCHECK(STATE_UNINITIALIZED
== state_
|| STATE_FAILURE
== state_
);
780 if (synchronous_entry_
) {
782 base::Bind(&SimpleSynchronousEntry::Close
,
783 base::Unretained(synchronous_entry_
),
784 SimpleEntryStat(last_used_
, last_modified_
, data_size_
,
786 base::Passed(&crc32s_to_write
),
788 Closure reply
= base::Bind(&SimpleEntryImpl::CloseOperationComplete
, this);
789 synchronous_entry_
= NULL
;
790 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
792 for (int i
= 0; i
< kSimpleEntryStreamCount
; ++i
) {
793 if (!have_written_
[i
]) {
794 SIMPLE_CACHE_UMA(ENUMERATION
,
795 "CheckCRCResult", cache_type_
,
796 crc_check_state_
[i
], CRC_CHECK_MAX
);
800 CloseOperationComplete();
804 void SimpleEntryImpl::ReadDataInternal(int stream_index
,
808 const CompletionCallback
& callback
) {
809 DCHECK(io_thread_checker_
.CalledOnValidThread());
810 ScopedOperationRunner
operation_runner(this);
812 if (net_log_
.IsLogging()) {
814 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN
,
815 CreateNetLogReadWriteDataCallback(stream_index
, offset
, buf_len
,
819 if (state_
== STATE_FAILURE
|| state_
== STATE_UNINITIALIZED
) {
820 if (!callback
.is_null()) {
821 RecordReadResult(cache_type_
, READ_RESULT_BAD_STATE
);
822 // Note that the API states that client-provided callbacks for entry-level
823 // (i.e. non-backend) operations (e.g. read, write) are invoked even if
824 // the backend was already destroyed.
825 base::ThreadTaskRunnerHandle::Get()->PostTask(
826 FROM_HERE
, base::Bind(callback
, net::ERR_FAILED
));
828 if (net_log_
.IsLogging()) {
830 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
831 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED
));
835 DCHECK_EQ(STATE_READY
, state_
);
836 if (offset
>= GetDataSize(stream_index
) || offset
< 0 || !buf_len
) {
837 RecordReadResult(cache_type_
, READ_RESULT_FAST_EMPTY_RETURN
);
838 // If there is nothing to read, we bail out before setting state_ to
840 if (!callback
.is_null())
841 base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE
,
842 base::Bind(callback
, 0));
846 buf_len
= std::min(buf_len
, GetDataSize(stream_index
) - offset
);
848 // Since stream 0 data is kept in memory, it is read immediately.
849 if (stream_index
== 0) {
850 int ret_value
= ReadStream0Data(buf
, offset
, buf_len
);
851 if (!callback
.is_null()) {
852 base::ThreadTaskRunnerHandle::Get()->PostTask(
853 FROM_HERE
, base::Bind(callback
, ret_value
));
858 state_
= STATE_IO_PENDING
;
859 if (!doomed_
&& backend_
.get())
860 backend_
->index()->UseIfExists(entry_hash_
);
862 scoped_ptr
<uint32
> read_crc32(new uint32());
863 scoped_ptr
<int> result(new int());
864 scoped_ptr
<SimpleEntryStat
> entry_stat(
865 new SimpleEntryStat(last_used_
, last_modified_
, data_size_
,
867 Closure task
= base::Bind(
868 &SimpleSynchronousEntry::ReadData
,
869 base::Unretained(synchronous_entry_
),
870 SimpleSynchronousEntry::EntryOperationData(stream_index
, offset
, buf_len
),
871 make_scoped_refptr(buf
),
875 Closure reply
= base::Bind(&SimpleEntryImpl::ReadOperationComplete
,
880 base::Passed(&read_crc32
),
881 base::Passed(&entry_stat
),
882 base::Passed(&result
));
883 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
886 void SimpleEntryImpl::WriteDataInternal(int stream_index
,
890 const CompletionCallback
& callback
,
892 DCHECK(io_thread_checker_
.CalledOnValidThread());
893 ScopedOperationRunner
operation_runner(this);
895 if (net_log_
.IsLogging()) {
897 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN
,
898 CreateNetLogReadWriteDataCallback(stream_index
, offset
, buf_len
,
902 if (state_
== STATE_FAILURE
|| state_
== STATE_UNINITIALIZED
) {
903 RecordWriteResult(cache_type_
, WRITE_RESULT_BAD_STATE
);
904 if (net_log_
.IsLogging()) {
906 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END
,
907 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED
));
909 if (!callback
.is_null()) {
910 base::ThreadTaskRunnerHandle::Get()->PostTask(
911 FROM_HERE
, base::Bind(callback
, net::ERR_FAILED
));
913 // |this| may be destroyed after return here.
917 DCHECK_EQ(STATE_READY
, state_
);
919 // Since stream 0 data is kept in memory, it will be written immediatly.
920 if (stream_index
== 0) {
921 int ret_value
= SetStream0Data(buf
, offset
, buf_len
, truncate
);
922 if (!callback
.is_null()) {
923 base::ThreadTaskRunnerHandle::Get()->PostTask(
924 FROM_HERE
, base::Bind(callback
, ret_value
));
929 // Ignore zero-length writes that do not change the file size.
931 int32 data_size
= data_size_
[stream_index
];
932 if (truncate
? (offset
== data_size
) : (offset
<= data_size
)) {
933 RecordWriteResult(cache_type_
, WRITE_RESULT_FAST_EMPTY_RETURN
);
934 if (!callback
.is_null()) {
935 base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE
,
936 base::Bind(callback
, 0));
941 state_
= STATE_IO_PENDING
;
942 if (!doomed_
&& backend_
.get())
943 backend_
->index()->UseIfExists(entry_hash_
);
945 AdvanceCrc(buf
, offset
, buf_len
, stream_index
);
947 // |entry_stat| needs to be initialized before modifying |data_size_|.
948 scoped_ptr
<SimpleEntryStat
> entry_stat(
949 new SimpleEntryStat(last_used_
, last_modified_
, data_size_
,
952 data_size_
[stream_index
] = offset
+ buf_len
;
954 data_size_
[stream_index
] = std::max(offset
+ buf_len
,
955 GetDataSize(stream_index
));
958 // Since we don't know the correct values for |last_used_| and
959 // |last_modified_| yet, we make this approximation.
960 last_used_
= last_modified_
= base::Time::Now();
962 have_written_
[stream_index
] = true;
963 // Writing on stream 1 affects the placement of stream 0 in the file, the EOF
964 // record will have to be rewritten.
965 if (stream_index
== 1)
966 have_written_
[0] = true;
968 scoped_ptr
<int> result(new int());
969 Closure task
= base::Bind(&SimpleSynchronousEntry::WriteData
,
970 base::Unretained(synchronous_entry_
),
971 SimpleSynchronousEntry::EntryOperationData(
972 stream_index
, offset
, buf_len
, truncate
,
974 make_scoped_refptr(buf
),
977 Closure reply
= base::Bind(&SimpleEntryImpl::WriteOperationComplete
,
981 base::Passed(&entry_stat
),
982 base::Passed(&result
));
983 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
986 void SimpleEntryImpl::ReadSparseDataInternal(
990 const CompletionCallback
& callback
) {
991 DCHECK(io_thread_checker_
.CalledOnValidThread());
992 ScopedOperationRunner
operation_runner(this);
994 DCHECK_EQ(STATE_READY
, state_
);
995 state_
= STATE_IO_PENDING
;
997 scoped_ptr
<int> result(new int());
998 scoped_ptr
<base::Time
> last_used(new base::Time());
999 Closure task
= base::Bind(&SimpleSynchronousEntry::ReadSparseData
,
1000 base::Unretained(synchronous_entry_
),
1001 SimpleSynchronousEntry::EntryOperationData(
1002 sparse_offset
, buf_len
),
1003 make_scoped_refptr(buf
),
1006 Closure reply
= base::Bind(&SimpleEntryImpl::ReadSparseOperationComplete
,
1009 base::Passed(&last_used
),
1010 base::Passed(&result
));
1011 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
1014 void SimpleEntryImpl::WriteSparseDataInternal(
1015 int64 sparse_offset
,
1018 const CompletionCallback
& callback
) {
1019 DCHECK(io_thread_checker_
.CalledOnValidThread());
1020 ScopedOperationRunner
operation_runner(this);
1022 DCHECK_EQ(STATE_READY
, state_
);
1023 state_
= STATE_IO_PENDING
;
1025 int64 max_sparse_data_size
= kint64max
;
1026 if (backend_
.get()) {
1027 int64 max_cache_size
= backend_
->index()->max_size();
1028 max_sparse_data_size
= max_cache_size
/ kMaxSparseDataSizeDivisor
;
1031 scoped_ptr
<SimpleEntryStat
> entry_stat(
1032 new SimpleEntryStat(last_used_
, last_modified_
, data_size_
,
1033 sparse_data_size_
));
1035 last_used_
= last_modified_
= base::Time::Now();
1037 scoped_ptr
<int> result(new int());
1038 Closure task
= base::Bind(&SimpleSynchronousEntry::WriteSparseData
,
1039 base::Unretained(synchronous_entry_
),
1040 SimpleSynchronousEntry::EntryOperationData(
1041 sparse_offset
, buf_len
),
1042 make_scoped_refptr(buf
),
1043 max_sparse_data_size
,
1046 Closure reply
= base::Bind(&SimpleEntryImpl::WriteSparseOperationComplete
,
1049 base::Passed(&entry_stat
),
1050 base::Passed(&result
));
1051 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
1054 void SimpleEntryImpl::GetAvailableRangeInternal(
1055 int64 sparse_offset
,
1058 const CompletionCallback
& callback
) {
1059 DCHECK(io_thread_checker_
.CalledOnValidThread());
1060 ScopedOperationRunner
operation_runner(this);
1062 DCHECK_EQ(STATE_READY
, state_
);
1063 state_
= STATE_IO_PENDING
;
1065 scoped_ptr
<int> result(new int());
1066 Closure task
= base::Bind(&SimpleSynchronousEntry::GetAvailableRange
,
1067 base::Unretained(synchronous_entry_
),
1068 SimpleSynchronousEntry::EntryOperationData(
1069 sparse_offset
, len
),
1072 Closure reply
= base::Bind(
1073 &SimpleEntryImpl::GetAvailableRangeOperationComplete
,
1076 base::Passed(&result
));
1077 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
1080 void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback
& callback
) {
1081 PostTaskAndReplyWithResult(
1084 base::Bind(&SimpleSynchronousEntry::DoomEntry
, path_
, entry_hash_
),
1086 &SimpleEntryImpl::DoomOperationComplete
, this, callback
, state_
));
1087 state_
= STATE_IO_PENDING
;
1090 void SimpleEntryImpl::CreationOperationComplete(
1091 const CompletionCallback
& completion_callback
,
1092 const base::TimeTicks
& start_time
,
1093 scoped_ptr
<SimpleEntryCreationResults
> in_results
,
1095 net::NetLog::EventType end_event_type
) {
1096 DCHECK(io_thread_checker_
.CalledOnValidThread());
1097 DCHECK_EQ(state_
, STATE_IO_PENDING
);
1099 ScopedOperationRunner
operation_runner(this);
1100 SIMPLE_CACHE_UMA(BOOLEAN
,
1101 "EntryCreationResult", cache_type_
,
1102 in_results
->result
== net::OK
);
1103 if (in_results
->result
!= net::OK
) {
1104 if (in_results
->result
!= net::ERR_FILE_EXISTS
)
1107 net_log_
.AddEventWithNetErrorCode(end_event_type
, net::ERR_FAILED
);
1108 PostClientCallback(completion_callback
, net::ERR_FAILED
);
1109 MakeUninitialized();
1112 // If out_entry is NULL, it means we already called ReturnEntryToCaller from
1113 // the optimistic Create case.
1115 ReturnEntryToCaller(out_entry
);
1117 state_
= STATE_READY
;
1118 synchronous_entry_
= in_results
->sync_entry
;
1119 if (in_results
->stream_0_data
.get()) {
1120 stream_0_data_
= in_results
->stream_0_data
;
1121 // The crc was read in SimpleSynchronousEntry.
1122 crc_check_state_
[0] = CRC_CHECK_DONE
;
1123 crc32s_
[0] = in_results
->stream_0_crc32
;
1124 crc32s_end_offset_
[0] = in_results
->entry_stat
.data_size(0);
1127 SetKey(synchronous_entry_
->key());
1129 // This should only be triggered when creating an entry. The key check in
1130 // the open case is handled in SimpleBackendImpl.
1131 DCHECK_EQ(key_
, synchronous_entry_
->key());
1133 UpdateDataFromEntryStat(in_results
->entry_stat
);
1134 SIMPLE_CACHE_UMA(TIMES
,
1135 "EntryCreationTime", cache_type_
,
1136 (base::TimeTicks::Now() - start_time
));
1137 AdjustOpenEntryCountBy(cache_type_
, 1);
1139 net_log_
.AddEvent(end_event_type
);
1140 PostClientCallback(completion_callback
, net::OK
);
1143 void SimpleEntryImpl::EntryOperationComplete(
1144 const CompletionCallback
& completion_callback
,
1145 const SimpleEntryStat
& entry_stat
,
1146 scoped_ptr
<int> result
) {
1147 DCHECK(io_thread_checker_
.CalledOnValidThread());
1148 DCHECK(synchronous_entry_
);
1149 DCHECK_EQ(STATE_IO_PENDING
, state_
);
1152 state_
= STATE_FAILURE
;
1155 state_
= STATE_READY
;
1156 UpdateDataFromEntryStat(entry_stat
);
1159 if (!completion_callback
.is_null()) {
1160 base::ThreadTaskRunnerHandle::Get()->PostTask(
1161 FROM_HERE
, base::Bind(completion_callback
, *result
));
1163 RunNextOperationIfNeeded();
1166 void SimpleEntryImpl::ReadOperationComplete(
1169 const CompletionCallback
& completion_callback
,
1170 scoped_ptr
<uint32
> read_crc32
,
1171 scoped_ptr
<SimpleEntryStat
> entry_stat
,
1172 scoped_ptr
<int> result
) {
1173 DCHECK(io_thread_checker_
.CalledOnValidThread());
1174 DCHECK(synchronous_entry_
);
1175 DCHECK_EQ(STATE_IO_PENDING
, state_
);
1180 crc_check_state_
[stream_index
] == CRC_CHECK_NEVER_READ_AT_ALL
) {
1181 crc_check_state_
[stream_index
] = CRC_CHECK_NEVER_READ_TO_END
;
1184 if (*result
> 0 && crc32s_end_offset_
[stream_index
] == offset
) {
1185 uint32 current_crc
= offset
== 0 ? crc32(0, Z_NULL
, 0)
1186 : crc32s_
[stream_index
];
1187 crc32s_
[stream_index
] = crc32_combine(current_crc
, *read_crc32
, *result
);
1188 crc32s_end_offset_
[stream_index
] += *result
;
1189 if (!have_written_
[stream_index
] &&
1190 GetDataSize(stream_index
) == crc32s_end_offset_
[stream_index
]) {
1191 // We have just read a file from start to finish, and so we have
1192 // computed a crc of the entire file. We can check it now. If a cache
1193 // entry has a single reader, the normal pattern is to read from start
1196 // Other cases are possible. In the case of two readers on the same
1197 // entry, one reader can be behind the other. In this case we compute
1198 // the crc as the most advanced reader progresses, and check it for
1199 // both readers as they read the last byte.
1201 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN
);
1203 scoped_ptr
<int> new_result(new int());
1204 Closure task
= base::Bind(&SimpleSynchronousEntry::CheckEOFRecord
,
1205 base::Unretained(synchronous_entry_
),
1208 crc32s_
[stream_index
],
1210 Closure reply
= base::Bind(&SimpleEntryImpl::ChecksumOperationComplete
,
1211 this, *result
, stream_index
,
1212 completion_callback
,
1213 base::Passed(&new_result
));
1214 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
1215 crc_check_state_
[stream_index
] = CRC_CHECK_DONE
;
1221 crc32s_end_offset_
[stream_index
] = 0;
1225 RecordReadResult(cache_type_
, READ_RESULT_SYNC_READ_FAILURE
);
1227 RecordReadResult(cache_type_
, READ_RESULT_SUCCESS
);
1228 if (crc_check_state_
[stream_index
] == CRC_CHECK_NEVER_READ_TO_END
&&
1229 offset
+ *result
== GetDataSize(stream_index
)) {
1230 crc_check_state_
[stream_index
] = CRC_CHECK_NOT_DONE
;
1233 if (net_log_
.IsLogging()) {
1235 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
1236 CreateNetLogReadWriteCompleteCallback(*result
));
1239 EntryOperationComplete(completion_callback
, *entry_stat
, result
.Pass());
1242 void SimpleEntryImpl::WriteOperationComplete(
1244 const CompletionCallback
& completion_callback
,
1245 scoped_ptr
<SimpleEntryStat
> entry_stat
,
1246 scoped_ptr
<int> result
) {
1248 RecordWriteResult(cache_type_
, WRITE_RESULT_SUCCESS
);
1250 RecordWriteResult(cache_type_
, WRITE_RESULT_SYNC_WRITE_FAILURE
);
1251 if (net_log_
.IsLogging()) {
1252 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END
,
1253 CreateNetLogReadWriteCompleteCallback(*result
));
1257 crc32s_end_offset_
[stream_index
] = 0;
1260 EntryOperationComplete(completion_callback
, *entry_stat
, result
.Pass());
1263 void SimpleEntryImpl::ReadSparseOperationComplete(
1264 const CompletionCallback
& completion_callback
,
1265 scoped_ptr
<base::Time
> last_used
,
1266 scoped_ptr
<int> result
) {
1267 DCHECK(io_thread_checker_
.CalledOnValidThread());
1268 DCHECK(synchronous_entry_
);
1271 SimpleEntryStat
entry_stat(*last_used
, last_modified_
, data_size_
,
1273 EntryOperationComplete(completion_callback
, entry_stat
, result
.Pass());
1276 void SimpleEntryImpl::WriteSparseOperationComplete(
1277 const CompletionCallback
& completion_callback
,
1278 scoped_ptr
<SimpleEntryStat
> entry_stat
,
1279 scoped_ptr
<int> result
) {
1280 DCHECK(io_thread_checker_
.CalledOnValidThread());
1281 DCHECK(synchronous_entry_
);
1284 EntryOperationComplete(completion_callback
, *entry_stat
, result
.Pass());
1287 void SimpleEntryImpl::GetAvailableRangeOperationComplete(
1288 const CompletionCallback
& completion_callback
,
1289 scoped_ptr
<int> result
) {
1290 DCHECK(io_thread_checker_
.CalledOnValidThread());
1291 DCHECK(synchronous_entry_
);
1294 SimpleEntryStat
entry_stat(last_used_
, last_modified_
, data_size_
,
1296 EntryOperationComplete(completion_callback
, entry_stat
, result
.Pass());
1299 void SimpleEntryImpl::DoomOperationComplete(
1300 const CompletionCallback
& callback
,
1301 State state_to_restore
,
1303 state_
= state_to_restore
;
1304 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_END
);
1305 if (!callback
.is_null())
1306 callback
.Run(result
);
1307 RunNextOperationIfNeeded();
1309 backend_
->OnDoomComplete(entry_hash_
);
1312 void SimpleEntryImpl::ChecksumOperationComplete(
1315 const CompletionCallback
& completion_callback
,
1316 scoped_ptr
<int> result
) {
1317 DCHECK(io_thread_checker_
.CalledOnValidThread());
1318 DCHECK(synchronous_entry_
);
1319 DCHECK_EQ(STATE_IO_PENDING
, state_
);
1322 if (net_log_
.IsLogging()) {
1323 net_log_
.AddEventWithNetErrorCode(
1324 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END
,
1328 if (*result
== net::OK
) {
1329 *result
= orig_result
;
1330 if (orig_result
>= 0)
1331 RecordReadResult(cache_type_
, READ_RESULT_SUCCESS
);
1333 RecordReadResult(cache_type_
, READ_RESULT_SYNC_READ_FAILURE
);
1335 RecordReadResult(cache_type_
, READ_RESULT_SYNC_CHECKSUM_FAILURE
);
1337 if (net_log_
.IsLogging()) {
1338 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
1339 CreateNetLogReadWriteCompleteCallback(*result
));
1342 SimpleEntryStat
entry_stat(last_used_
, last_modified_
, data_size_
,
1344 EntryOperationComplete(completion_callback
, entry_stat
, result
.Pass());
1347 void SimpleEntryImpl::CloseOperationComplete() {
1348 DCHECK(!synchronous_entry_
);
1349 DCHECK_EQ(0, open_count_
);
1350 DCHECK(STATE_IO_PENDING
== state_
|| STATE_FAILURE
== state_
||
1351 STATE_UNINITIALIZED
== state_
);
1352 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END
);
1353 AdjustOpenEntryCountBy(cache_type_
, -1);
1354 MakeUninitialized();
1355 RunNextOperationIfNeeded();
1358 void SimpleEntryImpl::UpdateDataFromEntryStat(
1359 const SimpleEntryStat
& entry_stat
) {
1360 DCHECK(io_thread_checker_
.CalledOnValidThread());
1361 DCHECK(synchronous_entry_
);
1362 DCHECK_EQ(STATE_READY
, state_
);
1364 last_used_
= entry_stat
.last_used();
1365 last_modified_
= entry_stat
.last_modified();
1366 for (int i
= 0; i
< kSimpleEntryStreamCount
; ++i
) {
1367 data_size_
[i
] = entry_stat
.data_size(i
);
1369 sparse_data_size_
= entry_stat
.sparse_data_size();
1370 if (!doomed_
&& backend_
.get())
1371 backend_
->index()->UpdateEntrySize(entry_hash_
, GetDiskUsage());
1374 int64
SimpleEntryImpl::GetDiskUsage() const {
1375 int64 file_size
= 0;
1376 for (int i
= 0; i
< kSimpleEntryStreamCount
; ++i
) {
1378 simple_util::GetFileSizeFromKeyAndDataSize(key_
, data_size_
[i
]);
1380 file_size
+= sparse_data_size_
;
1384 void SimpleEntryImpl::RecordReadIsParallelizable(
1385 const SimpleEntryOperation
& operation
) const {
1386 if (!executing_operation_
)
1388 // Used in histograms, please only add entries at the end.
1389 enum ReadDependencyType
{
1390 // READ_STANDALONE = 0, Deprecated.
1391 READ_FOLLOWS_READ
= 1,
1392 READ_FOLLOWS_CONFLICTING_WRITE
= 2,
1393 READ_FOLLOWS_NON_CONFLICTING_WRITE
= 3,
1394 READ_FOLLOWS_OTHER
= 4,
1395 READ_ALONE_IN_QUEUE
= 5,
1396 READ_DEPENDENCY_TYPE_MAX
= 6,
1399 ReadDependencyType type
= READ_FOLLOWS_OTHER
;
1400 if (operation
.alone_in_queue()) {
1401 type
= READ_ALONE_IN_QUEUE
;
1402 } else if (executing_operation_
->type() == SimpleEntryOperation::TYPE_READ
) {
1403 type
= READ_FOLLOWS_READ
;
1404 } else if (executing_operation_
->type() == SimpleEntryOperation::TYPE_WRITE
) {
1405 if (executing_operation_
->ConflictsWith(operation
))
1406 type
= READ_FOLLOWS_CONFLICTING_WRITE
;
1408 type
= READ_FOLLOWS_NON_CONFLICTING_WRITE
;
1410 SIMPLE_CACHE_UMA(ENUMERATION
,
1411 "ReadIsParallelizable", cache_type_
,
1412 type
, READ_DEPENDENCY_TYPE_MAX
);
1415 void SimpleEntryImpl::RecordWriteDependencyType(
1416 const SimpleEntryOperation
& operation
) const {
1417 if (!executing_operation_
)
1419 // Used in histograms, please only add entries at the end.
1420 enum WriteDependencyType
{
1421 WRITE_OPTIMISTIC
= 0,
1422 WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
= 1,
1423 WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC
= 2,
1424 WRITE_FOLLOWS_CONFLICTING_WRITE
= 3,
1425 WRITE_FOLLOWS_NON_CONFLICTING_WRITE
= 4,
1426 WRITE_FOLLOWS_CONFLICTING_READ
= 5,
1427 WRITE_FOLLOWS_NON_CONFLICTING_READ
= 6,
1428 WRITE_FOLLOWS_OTHER
= 7,
1429 WRITE_DEPENDENCY_TYPE_MAX
= 8,
1432 WriteDependencyType type
= WRITE_FOLLOWS_OTHER
;
1433 if (operation
.optimistic()) {
1434 type
= WRITE_OPTIMISTIC
;
1435 } else if (executing_operation_
->type() == SimpleEntryOperation::TYPE_READ
||
1436 executing_operation_
->type() == SimpleEntryOperation::TYPE_WRITE
) {
1437 bool conflicting
= executing_operation_
->ConflictsWith(operation
);
1439 if (executing_operation_
->type() == SimpleEntryOperation::TYPE_READ
) {
1440 type
= conflicting
? WRITE_FOLLOWS_CONFLICTING_READ
1441 : WRITE_FOLLOWS_NON_CONFLICTING_READ
;
1442 } else if (executing_operation_
->optimistic()) {
1443 type
= conflicting
? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
1444 : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC
;
1446 type
= conflicting
? WRITE_FOLLOWS_CONFLICTING_WRITE
1447 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE
;
1450 SIMPLE_CACHE_UMA(ENUMERATION
,
1451 "WriteDependencyType", cache_type_
,
1452 type
, WRITE_DEPENDENCY_TYPE_MAX
);
1455 int SimpleEntryImpl::ReadStream0Data(net::IOBuffer
* buf
,
1459 RecordReadResult(cache_type_
, READ_RESULT_SYNC_READ_FAILURE
);
1462 memcpy(buf
->data(), stream_0_data_
->data() + offset
, buf_len
);
1463 UpdateDataFromEntryStat(
1464 SimpleEntryStat(base::Time::Now(), last_modified_
, data_size_
,
1465 sparse_data_size_
));
1466 RecordReadResult(cache_type_
, READ_RESULT_SUCCESS
);
1470 int SimpleEntryImpl::SetStream0Data(net::IOBuffer
* buf
,
1474 // Currently, stream 0 is only used for HTTP headers, and always writes them
1475 // with a single, truncating write. Detect these writes and record the size
1476 // changes of the headers. Also, support writes to stream 0 that have
1477 // different access patterns, as required by the API contract.
1478 // All other clients of the Simple Cache are encouraged to use stream 1.
1479 have_written_
[0] = true;
1480 int data_size
= GetDataSize(0);
1481 if (offset
== 0 && truncate
) {
1482 RecordHeaderSizeChange(cache_type_
, data_size
, buf_len
);
1483 stream_0_data_
->SetCapacity(buf_len
);
1484 memcpy(stream_0_data_
->data(), buf
->data(), buf_len
);
1485 data_size_
[0] = buf_len
;
1487 RecordUnexpectedStream0Write(cache_type_
);
1488 const int buffer_size
=
1489 truncate
? offset
+ buf_len
: std::max(offset
+ buf_len
, data_size
);
1490 stream_0_data_
->SetCapacity(buffer_size
);
1491 // If |stream_0_data_| was extended, the extension until offset needs to be
1493 const int fill_size
= offset
<= data_size
? 0 : offset
- data_size
;
1495 memset(stream_0_data_
->data() + data_size
, 0, fill_size
);
1497 memcpy(stream_0_data_
->data() + offset
, buf
->data(), buf_len
);
1498 data_size_
[0] = buffer_size
;
1500 base::Time modification_time
= base::Time::Now();
1501 AdvanceCrc(buf
, offset
, buf_len
, 0);
1502 UpdateDataFromEntryStat(
1503 SimpleEntryStat(modification_time
, modification_time
, data_size_
,
1504 sparse_data_size_
));
1505 RecordWriteResult(cache_type_
, WRITE_RESULT_SUCCESS
);
1509 void SimpleEntryImpl::AdvanceCrc(net::IOBuffer
* buffer
,
1513 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
1514 // if |offset == 0| or we have already computed the CRC for [0 .. offset).
1515 // We rely on most write operations being sequential, start to end to compute
1516 // the crc of the data. When we write to an entry and close without having
1517 // done a sequential write, we don't check the CRC on read.
1518 if (offset
== 0 || crc32s_end_offset_
[stream_index
] == offset
) {
1519 uint32 initial_crc
=
1520 (offset
!= 0) ? crc32s_
[stream_index
] : crc32(0, Z_NULL
, 0);
1522 crc32s_
[stream_index
] = crc32(
1523 initial_crc
, reinterpret_cast<const Bytef
*>(buffer
->data()), length
);
1525 crc32s_end_offset_
[stream_index
] = offset
+ length
;
1526 } else if (offset
< crc32s_end_offset_
[stream_index
]) {
1527 // If a range for which the crc32 was already computed is rewritten, the
1528 // computation of the crc32 need to start from 0 again.
1529 crc32s_end_offset_
[stream_index
] = 0;
1533 } // namespace disk_cache