1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_entry_impl.h"
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/callback.h"
14 #include "base/location.h"
15 #include "base/logging.h"
16 #include "base/message_loop/message_loop_proxy.h"
17 #include "base/metrics/histogram.h"
18 #include "base/task_runner.h"
19 #include "base/time/time.h"
20 #include "net/base/io_buffer.h"
21 #include "net/base/net_errors.h"
22 #include "net/disk_cache/net_log_parameters.h"
23 #include "net/disk_cache/simple/simple_backend_impl.h"
24 #include "net/disk_cache/simple/simple_index.h"
25 #include "net/disk_cache/simple/simple_net_log_parameters.h"
26 #include "net/disk_cache/simple/simple_synchronous_entry.h"
27 #include "net/disk_cache/simple/simple_util.h"
28 #include "third_party/zlib/zlib.h"
32 // Used in histograms, please only add entries at the end.
34 READ_RESULT_SUCCESS
= 0,
35 READ_RESULT_INVALID_ARGUMENT
= 1,
36 READ_RESULT_NONBLOCK_EMPTY_RETURN
= 2,
37 READ_RESULT_BAD_STATE
= 3,
38 READ_RESULT_FAST_EMPTY_RETURN
= 4,
39 READ_RESULT_SYNC_READ_FAILURE
= 5,
40 READ_RESULT_SYNC_CHECKSUM_FAILURE
= 6,
44 // Used in histograms, please only add entries at the end.
46 WRITE_RESULT_SUCCESS
= 0,
47 WRITE_RESULT_INVALID_ARGUMENT
= 1,
48 WRITE_RESULT_OVER_MAX_SIZE
= 2,
49 WRITE_RESULT_BAD_STATE
= 3,
50 WRITE_RESULT_SYNC_WRITE_FAILURE
= 4,
54 // Used in histograms, please only add entries at the end.
55 enum HeaderSizeChange
{
56 HEADER_SIZE_CHANGE_INITIAL
,
57 HEADER_SIZE_CHANGE_SAME
,
58 HEADER_SIZE_CHANGE_INCREASE
,
59 HEADER_SIZE_CHANGE_DECREASE
,
60 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE
,
61 HEADER_SIZE_CHANGE_MAX
64 void RecordReadResult(ReadResult result
) {
65 UMA_HISTOGRAM_ENUMERATION("SimpleCache.ReadResult", result
, READ_RESULT_MAX
);
68 void RecordWriteResult(WriteResult result
) {
69 UMA_HISTOGRAM_ENUMERATION("SimpleCache.WriteResult",
70 result
, WRITE_RESULT_MAX
);
73 // TODO(ttuttle): Consider removing this once we have a good handle on header
75 void RecordHeaderSizeChange(int old_size
, int new_size
) {
76 HeaderSizeChange size_change
;
78 UMA_HISTOGRAM_COUNTS_10000("SimpleCache.HeaderSize", new_size
);
81 size_change
= HEADER_SIZE_CHANGE_INITIAL
;
82 } else if (new_size
== old_size
) {
83 size_change
= HEADER_SIZE_CHANGE_SAME
;
84 } else if (new_size
> old_size
) {
85 int delta
= new_size
- old_size
;
86 UMA_HISTOGRAM_COUNTS_10000("SimpleCache.HeaderSizeIncreaseAbsolute",
88 UMA_HISTOGRAM_PERCENTAGE("SimpleCache.HeaderSizeIncreasePercentage",
89 delta
* 100 / old_size
);
90 size_change
= HEADER_SIZE_CHANGE_INCREASE
;
91 } else { // new_size < old_size
92 int delta
= old_size
- new_size
;
93 UMA_HISTOGRAM_COUNTS_10000("SimpleCache.HeaderSizeDecreaseAbsolute",
95 UMA_HISTOGRAM_PERCENTAGE("SimpleCache.HeaderSizeDecreasePercentage",
96 delta
* 100 / old_size
);
97 size_change
= HEADER_SIZE_CHANGE_DECREASE
;
100 UMA_HISTOGRAM_ENUMERATION("SimpleCache.HeaderSizeChange",
102 HEADER_SIZE_CHANGE_MAX
);
105 void RecordUnexpectedStream0Write() {
106 UMA_HISTOGRAM_ENUMERATION("SimpleCache.HeaderSizeChange",
107 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE
,
108 HEADER_SIZE_CHANGE_MAX
);
111 // Short trampoline to take an owned input parameter and call a net completion
112 // callback with its value.
113 void CallCompletionCallback(const net::CompletionCallback
& callback
,
114 scoped_ptr
<int> result
) {
116 if (!callback
.is_null())
117 callback
.Run(*result
);
120 int g_open_entry_count
= 0;
122 void AdjustOpenEntryCountBy(int offset
) {
123 g_open_entry_count
+= offset
;
124 UMA_HISTOGRAM_COUNTS_10000("SimpleCache.GlobalOpenEntryCount",
130 namespace disk_cache
{
133 using base::FilePath
;
134 using base::MessageLoopProxy
;
136 using base::TaskRunner
;
138 // A helper class to insure that RunNextOperationIfNeeded() is called when
139 // exiting the current stack frame.
140 class SimpleEntryImpl::ScopedOperationRunner
{
142 explicit ScopedOperationRunner(SimpleEntryImpl
* entry
) : entry_(entry
) {
145 ~ScopedOperationRunner() {
146 entry_
->RunNextOperationIfNeeded();
150 SimpleEntryImpl
* const entry_
;
153 SimpleEntryImpl::SimpleEntryImpl(const FilePath
& path
,
154 const uint64 entry_hash
,
155 OperationsMode operations_mode
,
156 SimpleBackendImpl
* backend
,
157 net::NetLog
* net_log
)
158 : backend_(backend
->AsWeakPtr()),
159 worker_pool_(backend
->worker_pool()),
161 entry_hash_(entry_hash
),
162 use_optimistic_operations_(operations_mode
== OPTIMISTIC_OPERATIONS
),
163 last_used_(Time::Now()),
164 last_modified_(last_used_
),
166 state_(STATE_UNINITIALIZED
),
167 synchronous_entry_(NULL
),
168 net_log_(net::BoundNetLog::Make(
169 net_log
, net::NetLog::SOURCE_DISK_CACHE_ENTRY
)) {
170 COMPILE_ASSERT(arraysize(data_size_
) == arraysize(crc32s_end_offset_
),
171 arrays_should_be_same_size
);
172 COMPILE_ASSERT(arraysize(data_size_
) == arraysize(crc32s_
),
173 arrays_should_be_same_size
);
174 COMPILE_ASSERT(arraysize(data_size_
) == arraysize(have_written_
),
175 arrays_should_be_same_size
);
176 COMPILE_ASSERT(arraysize(data_size_
) == arraysize(crc_check_state_
),
177 arrays_should_be_same_size
);
179 net_log_
.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY
,
180 CreateNetLogSimpleEntryConstructionCallback(this));
183 int SimpleEntryImpl::OpenEntry(Entry
** out_entry
,
184 const CompletionCallback
& callback
) {
185 DCHECK(backend_
.get());
187 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL
);
189 bool have_index
= backend_
->index()->initialized();
190 // This enumeration is used in histograms, add entries only at end.
191 enum OpenEntryIndexEnum
{
197 OpenEntryIndexEnum open_entry_index_enum
= INDEX_NOEXIST
;
199 if (backend_
->index()->Has(entry_hash_
))
200 open_entry_index_enum
= INDEX_HIT
;
202 open_entry_index_enum
= INDEX_MISS
;
204 UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState",
205 open_entry_index_enum
, INDEX_MAX
);
207 // If entry is not known to the index, initiate fast failover to the network.
208 if (open_entry_index_enum
== INDEX_MISS
) {
209 net_log_
.AddEventWithNetErrorCode(
210 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END
,
212 return net::ERR_FAILED
;
215 pending_operations_
.push(SimpleEntryOperation::OpenOperation(
216 this, have_index
, callback
, out_entry
));
217 RunNextOperationIfNeeded();
218 return net::ERR_IO_PENDING
;
221 int SimpleEntryImpl::CreateEntry(Entry
** out_entry
,
222 const CompletionCallback
& callback
) {
223 DCHECK(backend_
.get());
224 DCHECK_EQ(entry_hash_
, simple_util::GetEntryHashKey(key_
));
226 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL
);
228 bool have_index
= backend_
->index()->initialized();
229 int ret_value
= net::ERR_FAILED
;
230 if (use_optimistic_operations_
&&
231 state_
== STATE_UNINITIALIZED
&& pending_operations_
.size() == 0) {
232 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC
);
234 ReturnEntryToCaller(out_entry
);
235 pending_operations_
.push(SimpleEntryOperation::CreateOperation(
236 this, have_index
, CompletionCallback(), static_cast<Entry
**>(NULL
)));
239 pending_operations_
.push(SimpleEntryOperation::CreateOperation(
240 this, have_index
, callback
, out_entry
));
241 ret_value
= net::ERR_IO_PENDING
;
244 // We insert the entry in the index before creating the entry files in the
245 // SimpleSynchronousEntry, because this way the worst scenario is when we
246 // have the entry in the index but we don't have the created files yet, this
247 // way we never leak files. CreationOperationComplete will remove the entry
248 // from the index if the creation fails.
249 backend_
->index()->Insert(key_
);
251 RunNextOperationIfNeeded();
255 int SimpleEntryImpl::DoomEntry(const CompletionCallback
& callback
) {
256 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_CALL
);
257 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_BEGIN
);
260 scoped_ptr
<int> result(new int());
261 Closure task
= base::Bind(&SimpleSynchronousEntry::DoomEntry
, path_
, key_
,
262 entry_hash_
, result
.get());
263 Closure reply
= base::Bind(&CallCompletionCallback
,
264 callback
, base::Passed(&result
));
265 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
266 return net::ERR_IO_PENDING
;
269 void SimpleEntryImpl::SetKey(const std::string
& key
) {
271 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY
,
272 net::NetLog::StringCallback("key", &key
));
275 void SimpleEntryImpl::Doom() {
276 DoomEntry(CompletionCallback());
279 void SimpleEntryImpl::Close() {
280 DCHECK(io_thread_checker_
.CalledOnValidThread());
281 DCHECK_LT(0, open_count_
);
283 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_CALL
);
285 if (--open_count_
> 0) {
286 DCHECK(!HasOneRef());
287 Release(); // Balanced in ReturnEntryToCaller().
291 pending_operations_
.push(SimpleEntryOperation::CloseOperation(this));
292 DCHECK(!HasOneRef());
293 Release(); // Balanced in ReturnEntryToCaller().
294 RunNextOperationIfNeeded();
297 std::string
SimpleEntryImpl::GetKey() const {
298 DCHECK(io_thread_checker_
.CalledOnValidThread());
302 Time
SimpleEntryImpl::GetLastUsed() const {
303 DCHECK(io_thread_checker_
.CalledOnValidThread());
307 Time
SimpleEntryImpl::GetLastModified() const {
308 DCHECK(io_thread_checker_
.CalledOnValidThread());
309 return last_modified_
;
312 int32
SimpleEntryImpl::GetDataSize(int stream_index
) const {
313 DCHECK(io_thread_checker_
.CalledOnValidThread());
314 DCHECK_LE(0, data_size_
[stream_index
]);
315 return data_size_
[stream_index
];
318 int SimpleEntryImpl::ReadData(int stream_index
,
322 const CompletionCallback
& callback
) {
323 DCHECK(io_thread_checker_
.CalledOnValidThread());
325 if (net_log_
.IsLoggingAllEvents()) {
326 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL
,
327 CreateNetLogReadWriteDataCallback(stream_index
, offset
, buf_len
,
331 if (stream_index
< 0 || stream_index
>= kSimpleEntryFileCount
||
333 if (net_log_
.IsLoggingAllEvents()) {
334 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
335 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT
));
338 RecordReadResult(READ_RESULT_INVALID_ARGUMENT
);
339 return net::ERR_INVALID_ARGUMENT
;
341 if (pending_operations_
.empty() && (offset
>= GetDataSize(stream_index
) ||
342 offset
< 0 || !buf_len
)) {
343 if (net_log_
.IsLoggingAllEvents()) {
344 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
345 CreateNetLogReadWriteCompleteCallback(0));
348 RecordReadResult(READ_RESULT_NONBLOCK_EMPTY_RETURN
);
352 // TODO(felipeg): Optimization: Add support for truly parallel read
354 bool alone_in_queue
=
355 pending_operations_
.size() == 0 && state_
== STATE_READY
;
356 pending_operations_
.push(SimpleEntryOperation::ReadOperation(
357 this, stream_index
, offset
, buf_len
, buf
, callback
, alone_in_queue
));
358 RunNextOperationIfNeeded();
359 return net::ERR_IO_PENDING
;
362 int SimpleEntryImpl::WriteData(int stream_index
,
366 const CompletionCallback
& callback
,
368 DCHECK(io_thread_checker_
.CalledOnValidThread());
370 if (net_log_
.IsLoggingAllEvents()) {
372 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL
,
373 CreateNetLogReadWriteDataCallback(stream_index
, offset
, buf_len
,
377 if (stream_index
< 0 || stream_index
>= kSimpleEntryFileCount
|| offset
< 0 ||
379 if (net_log_
.IsLoggingAllEvents()) {
381 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END
,
382 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT
));
384 RecordWriteResult(WRITE_RESULT_INVALID_ARGUMENT
);
385 return net::ERR_INVALID_ARGUMENT
;
387 if (backend_
.get() && offset
+ buf_len
> backend_
->GetMaxFileSize()) {
388 if (net_log_
.IsLoggingAllEvents()) {
390 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END
,
391 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED
));
393 RecordWriteResult(WRITE_RESULT_OVER_MAX_SIZE
);
394 return net::ERR_FAILED
;
396 ScopedOperationRunner
operation_runner(this);
398 // Currently, Simple Cache is only used for HTTP, which stores the headers in
399 // stream 0 and always writes them with a single, truncating write. Detect
400 // these writes and record the size and size changes of the headers. Also,
401 // note writes to stream 0 that violate those assumptions.
402 if (stream_index
== 0) {
403 if (offset
== 0 && truncate
)
404 RecordHeaderSizeChange(data_size_
[0], buf_len
);
406 RecordUnexpectedStream0Write();
409 // We can only do optimistic Write if there is no pending operations, so
410 // that we are sure that the next call to RunNextOperationIfNeeded will
411 // actually run the write operation that sets the stream size. It also
412 // prevents from previous possibly-conflicting writes that could be stacked
413 // in the |pending_operations_|. We could optimize this for when we have
414 // only read operations enqueued.
415 const bool optimistic
=
416 (use_optimistic_operations_
&& state_
== STATE_READY
&&
417 pending_operations_
.size() == 0);
418 CompletionCallback op_callback
;
419 scoped_refptr
<net::IOBuffer
> op_buf
;
420 int ret_value
= net::ERR_FAILED
;
423 op_callback
= callback
;
424 ret_value
= net::ERR_IO_PENDING
;
426 // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
427 // here to avoid paying the price of the RefCountedThreadSafe atomic
430 op_buf
= new IOBuffer(buf_len
);
431 memcpy(op_buf
->data(), buf
->data(), buf_len
);
433 op_callback
= CompletionCallback();
435 if (net_log_
.IsLoggingAllEvents()) {
437 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC
,
438 CreateNetLogReadWriteCompleteCallback(buf_len
));
442 pending_operations_
.push(SimpleEntryOperation::WriteOperation(this,
453 int SimpleEntryImpl::ReadSparseData(int64 offset
,
456 const CompletionCallback
& callback
) {
457 DCHECK(io_thread_checker_
.CalledOnValidThread());
458 // TODO(gavinp): Determine if the simple backend should support sparse data.
460 return net::ERR_FAILED
;
463 int SimpleEntryImpl::WriteSparseData(int64 offset
,
466 const CompletionCallback
& callback
) {
467 DCHECK(io_thread_checker_
.CalledOnValidThread());
468 // TODO(gavinp): Determine if the simple backend should support sparse data.
470 return net::ERR_FAILED
;
473 int SimpleEntryImpl::GetAvailableRange(int64 offset
,
476 const CompletionCallback
& callback
) {
477 DCHECK(io_thread_checker_
.CalledOnValidThread());
478 // TODO(gavinp): Determine if the simple backend should support sparse data.
480 return net::ERR_FAILED
;
483 bool SimpleEntryImpl::CouldBeSparse() const {
484 DCHECK(io_thread_checker_
.CalledOnValidThread());
485 // TODO(gavinp): Determine if the simple backend should support sparse data.
489 void SimpleEntryImpl::CancelSparseIO() {
490 DCHECK(io_thread_checker_
.CalledOnValidThread());
491 // TODO(gavinp): Determine if the simple backend should support sparse data.
495 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback
& callback
) {
496 DCHECK(io_thread_checker_
.CalledOnValidThread());
497 // TODO(gavinp): Determine if the simple backend should support sparse data.
499 return net::ERR_FAILED
;
502 SimpleEntryImpl::~SimpleEntryImpl() {
503 DCHECK(io_thread_checker_
.CalledOnValidThread());
504 DCHECK_EQ(0U, pending_operations_
.size());
505 DCHECK(state_
== STATE_UNINITIALIZED
|| state_
== STATE_FAILURE
);
506 DCHECK(!synchronous_entry_
);
507 RemoveSelfFromBackend();
508 net_log_
.EndEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY
);
511 void SimpleEntryImpl::MakeUninitialized() {
512 state_
= STATE_UNINITIALIZED
;
513 std::memset(crc32s_end_offset_
, 0, sizeof(crc32s_end_offset_
));
514 std::memset(crc32s_
, 0, sizeof(crc32s_
));
515 std::memset(have_written_
, 0, sizeof(have_written_
));
516 std::memset(data_size_
, 0, sizeof(data_size_
));
517 for (size_t i
= 0; i
< arraysize(crc_check_state_
); ++i
) {
518 crc_check_state_
[i
] = CRC_CHECK_NEVER_READ_AT_ALL
;
522 void SimpleEntryImpl::ReturnEntryToCaller(Entry
** out_entry
) {
525 AddRef(); // Balanced in Close()
529 void SimpleEntryImpl::RemoveSelfFromBackend() {
532 backend_
->OnDeactivated(this);
536 void SimpleEntryImpl::MarkAsDoomed() {
539 backend_
->index()->Remove(key_
);
540 RemoveSelfFromBackend();
543 void SimpleEntryImpl::RunNextOperationIfNeeded() {
544 DCHECK(io_thread_checker_
.CalledOnValidThread());
545 UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending",
546 pending_operations_
.size(), 0, 100, 20);
547 if (!pending_operations_
.empty() && state_
!= STATE_IO_PENDING
) {
548 scoped_ptr
<SimpleEntryOperation
> operation(
549 new SimpleEntryOperation(pending_operations_
.front()));
550 pending_operations_
.pop();
551 switch (operation
->type()) {
552 case SimpleEntryOperation::TYPE_OPEN
:
553 OpenEntryInternal(operation
->have_index(),
554 operation
->callback(),
555 operation
->out_entry());
557 case SimpleEntryOperation::TYPE_CREATE
:
558 CreateEntryInternal(operation
->have_index(),
559 operation
->callback(),
560 operation
->out_entry());
562 case SimpleEntryOperation::TYPE_CLOSE
:
565 case SimpleEntryOperation::TYPE_READ
:
566 RecordReadIsParallelizable(*operation
);
567 ReadDataInternal(operation
->index(),
571 operation
->callback());
573 case SimpleEntryOperation::TYPE_WRITE
:
574 RecordWriteDependencyType(*operation
);
575 WriteDataInternal(operation
->index(),
579 operation
->callback(),
580 operation
->truncate());
585 // The operation is kept for histograms. Makes sure it does not leak
587 executing_operation_
.swap(operation
);
588 executing_operation_
->ReleaseReferences();
589 // |this| may have been deleted.
593 void SimpleEntryImpl::OpenEntryInternal(bool have_index
,
594 const CompletionCallback
& callback
,
596 ScopedOperationRunner
operation_runner(this);
598 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN
);
600 if (state_
== STATE_READY
) {
601 ReturnEntryToCaller(out_entry
);
602 MessageLoopProxy::current()->PostTask(FROM_HERE
, base::Bind(callback
,
605 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END
,
606 CreateNetLogSimpleEntryCreationCallback(this, net::OK
));
608 } else if (state_
== STATE_FAILURE
) {
609 if (!callback
.is_null()) {
610 MessageLoopProxy::current()->PostTask(FROM_HERE
, base::Bind(
611 callback
, net::ERR_FAILED
));
614 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END
,
615 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED
));
619 DCHECK_EQ(STATE_UNINITIALIZED
, state_
);
620 DCHECK(!synchronous_entry_
);
621 state_
= STATE_IO_PENDING
;
622 const base::TimeTicks start_time
= base::TimeTicks::Now();
623 scoped_ptr
<SimpleEntryCreationResults
> results(
624 new SimpleEntryCreationResults(
625 SimpleEntryStat(last_used_
, last_modified_
, data_size_
)));
626 Closure task
= base::Bind(&SimpleSynchronousEntry::OpenEntry
,
631 Closure reply
= base::Bind(&SimpleEntryImpl::CreationOperationComplete
,
635 base::Passed(&results
),
637 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END
);
638 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
641 void SimpleEntryImpl::CreateEntryInternal(bool have_index
,
642 const CompletionCallback
& callback
,
644 ScopedOperationRunner
operation_runner(this);
646 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_BEGIN
);
648 if (state_
!= STATE_UNINITIALIZED
) {
649 // There is already an active normal entry.
651 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END
,
652 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED
));
654 if (!callback
.is_null()) {
655 MessageLoopProxy::current()->PostTask(FROM_HERE
, base::Bind(
656 callback
, net::ERR_FAILED
));
660 DCHECK_EQ(STATE_UNINITIALIZED
, state_
);
661 DCHECK(!synchronous_entry_
);
663 state_
= STATE_IO_PENDING
;
665 // Since we don't know the correct values for |last_used_| and
666 // |last_modified_| yet, we make this approximation.
667 last_used_
= last_modified_
= base::Time::Now();
669 // If creation succeeds, we should mark all streams to be saved on close.
670 for (int i
= 0; i
< kSimpleEntryFileCount
; ++i
)
671 have_written_
[i
] = true;
673 const base::TimeTicks start_time
= base::TimeTicks::Now();
674 scoped_ptr
<SimpleEntryCreationResults
> results(
675 new SimpleEntryCreationResults(
676 SimpleEntryStat(last_used_
, last_modified_
, data_size_
)));
677 Closure task
= base::Bind(&SimpleSynchronousEntry::CreateEntry
,
683 Closure reply
= base::Bind(&SimpleEntryImpl::CreationOperationComplete
,
687 base::Passed(&results
),
689 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END
);
690 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
693 void SimpleEntryImpl::CloseInternal() {
694 DCHECK(io_thread_checker_
.CalledOnValidThread());
695 typedef SimpleSynchronousEntry::CRCRecord CRCRecord
;
696 scoped_ptr
<std::vector
<CRCRecord
> >
697 crc32s_to_write(new std::vector
<CRCRecord
>());
699 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN
);
701 if (state_
== STATE_READY
) {
702 DCHECK(synchronous_entry_
);
703 state_
= STATE_IO_PENDING
;
704 for (int i
= 0; i
< kSimpleEntryFileCount
; ++i
) {
705 if (have_written_
[i
]) {
706 if (GetDataSize(i
) == crc32s_end_offset_
[i
]) {
707 int32 crc
= GetDataSize(i
) == 0 ? crc32(0, Z_NULL
, 0) : crc32s_
[i
];
708 crc32s_to_write
->push_back(CRCRecord(i
, true, crc
));
710 crc32s_to_write
->push_back(CRCRecord(i
, false, 0));
715 DCHECK(STATE_UNINITIALIZED
== state_
|| STATE_FAILURE
== state_
);
718 if (synchronous_entry_
) {
720 base::Bind(&SimpleSynchronousEntry::Close
,
721 base::Unretained(synchronous_entry_
),
722 SimpleEntryStat(last_used_
, last_modified_
, data_size_
),
723 base::Passed(&crc32s_to_write
));
724 Closure reply
= base::Bind(&SimpleEntryImpl::CloseOperationComplete
, this);
725 synchronous_entry_
= NULL
;
726 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
728 for (int i
= 0; i
< kSimpleEntryFileCount
; ++i
) {
729 if (!have_written_
[i
]) {
730 UMA_HISTOGRAM_ENUMERATION("SimpleCache.CheckCRCResult",
731 crc_check_state_
[i
], CRC_CHECK_MAX
);
735 synchronous_entry_
= NULL
;
736 CloseOperationComplete();
740 void SimpleEntryImpl::ReadDataInternal(int stream_index
,
744 const CompletionCallback
& callback
) {
745 DCHECK(io_thread_checker_
.CalledOnValidThread());
746 ScopedOperationRunner
operation_runner(this);
748 if (net_log_
.IsLoggingAllEvents()) {
750 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN
,
751 CreateNetLogReadWriteDataCallback(stream_index
, offset
, buf_len
,
755 if (state_
== STATE_FAILURE
|| state_
== STATE_UNINITIALIZED
) {
756 if (!callback
.is_null()) {
757 RecordReadResult(READ_RESULT_BAD_STATE
);
758 MessageLoopProxy::current()->PostTask(FROM_HERE
, base::Bind(
759 callback
, net::ERR_FAILED
));
761 if (net_log_
.IsLoggingAllEvents()) {
763 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
764 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED
));
768 DCHECK_EQ(STATE_READY
, state_
);
769 if (offset
>= GetDataSize(stream_index
) || offset
< 0 || !buf_len
) {
770 RecordReadResult(READ_RESULT_FAST_EMPTY_RETURN
);
771 // If there is nothing to read, we bail out before setting state_ to
773 if (!callback
.is_null())
774 MessageLoopProxy::current()->PostTask(FROM_HERE
, base::Bind(
779 buf_len
= std::min(buf_len
, GetDataSize(stream_index
) - offset
);
781 state_
= STATE_IO_PENDING
;
783 backend_
->index()->UseIfExists(key_
);
785 scoped_ptr
<uint32
> read_crc32(new uint32());
786 scoped_ptr
<int> result(new int());
787 scoped_ptr
<base::Time
> last_used(new base::Time());
788 Closure task
= base::Bind(
789 &SimpleSynchronousEntry::ReadData
,
790 base::Unretained(synchronous_entry_
),
791 SimpleSynchronousEntry::EntryOperationData(stream_index
, offset
, buf_len
),
792 make_scoped_refptr(buf
),
796 Closure reply
= base::Bind(&SimpleEntryImpl::ReadOperationComplete
,
801 base::Passed(&read_crc32
),
802 base::Passed(&last_used
),
803 base::Passed(&result
));
804 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
807 void SimpleEntryImpl::WriteDataInternal(int stream_index
,
811 const CompletionCallback
& callback
,
813 DCHECK(io_thread_checker_
.CalledOnValidThread());
814 ScopedOperationRunner
operation_runner(this);
816 if (net_log_
.IsLoggingAllEvents()) {
818 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN
,
819 CreateNetLogReadWriteDataCallback(stream_index
, offset
, buf_len
,
823 if (state_
== STATE_FAILURE
|| state_
== STATE_UNINITIALIZED
) {
824 RecordWriteResult(WRITE_RESULT_BAD_STATE
);
825 if (net_log_
.IsLoggingAllEvents()) {
827 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END
,
828 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED
));
830 if (!callback
.is_null()) {
831 // We need to posttask so that we don't go in a loop when we call the
832 // callback directly.
833 MessageLoopProxy::current()->PostTask(FROM_HERE
, base::Bind(
834 callback
, net::ERR_FAILED
));
836 // |this| may be destroyed after return here.
840 DCHECK_EQ(STATE_READY
, state_
);
841 state_
= STATE_IO_PENDING
;
843 backend_
->index()->UseIfExists(key_
);
844 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
845 // if |offset == 0| or we have already computed the CRC for [0 .. offset).
846 // We rely on most write operations being sequential, start to end to compute
847 // the crc of the data. When we write to an entry and close without having
848 // done a sequential write, we don't check the CRC on read.
849 if (offset
== 0 || crc32s_end_offset_
[stream_index
] == offset
) {
850 uint32 initial_crc
= (offset
!= 0) ? crc32s_
[stream_index
]
851 : crc32(0, Z_NULL
, 0);
853 crc32s_
[stream_index
] = crc32(initial_crc
,
854 reinterpret_cast<const Bytef
*>(buf
->data()),
857 crc32s_end_offset_
[stream_index
] = offset
+ buf_len
;
860 // |entry_stat| needs to be initialized before modifying |data_size_|.
861 scoped_ptr
<SimpleEntryStat
> entry_stat(
862 new SimpleEntryStat(last_used_
, last_modified_
, data_size_
));
864 data_size_
[stream_index
] = offset
+ buf_len
;
866 data_size_
[stream_index
] = std::max(offset
+ buf_len
,
867 GetDataSize(stream_index
));
870 // Since we don't know the correct values for |last_used_| and
871 // |last_modified_| yet, we make this approximation.
872 last_used_
= last_modified_
= base::Time::Now();
874 have_written_
[stream_index
] = true;
876 scoped_ptr
<int> result(new int());
877 Closure task
= base::Bind(&SimpleSynchronousEntry::WriteData
,
878 base::Unretained(synchronous_entry_
),
879 SimpleSynchronousEntry::EntryOperationData(
880 stream_index
, offset
, buf_len
, truncate
),
881 make_scoped_refptr(buf
),
884 Closure reply
= base::Bind(&SimpleEntryImpl::WriteOperationComplete
,
888 base::Passed(&entry_stat
),
889 base::Passed(&result
));
890 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
893 void SimpleEntryImpl::CreationOperationComplete(
894 const CompletionCallback
& completion_callback
,
895 const base::TimeTicks
& start_time
,
896 scoped_ptr
<SimpleEntryCreationResults
> in_results
,
898 net::NetLog::EventType end_event_type
) {
899 DCHECK(io_thread_checker_
.CalledOnValidThread());
900 DCHECK_EQ(state_
, STATE_IO_PENDING
);
902 ScopedOperationRunner
operation_runner(this);
903 UMA_HISTOGRAM_BOOLEAN(
904 "SimpleCache.EntryCreationResult", in_results
->result
== net::OK
);
905 if (in_results
->result
!= net::OK
) {
906 if (in_results
->result
!= net::ERR_FILE_EXISTS
)
909 net_log_
.AddEventWithNetErrorCode(end_event_type
, net::ERR_FAILED
);
911 if (!completion_callback
.is_null()) {
912 MessageLoopProxy::current()->PostTask(FROM_HERE
, base::Bind(
913 completion_callback
, net::ERR_FAILED
));
918 // If out_entry is NULL, it means we already called ReturnEntryToCaller from
919 // the optimistic Create case.
921 ReturnEntryToCaller(out_entry
);
923 state_
= STATE_READY
;
924 synchronous_entry_
= in_results
->sync_entry
;
926 SetKey(synchronous_entry_
->key());
928 // This should only be triggered when creating an entry. The key check in
929 // the open case is handled in SimpleBackendImpl.
930 DCHECK_EQ(key_
, synchronous_entry_
->key());
932 UpdateDataFromEntryStat(in_results
->entry_stat
);
933 UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime",
934 (base::TimeTicks::Now() - start_time
));
935 AdjustOpenEntryCountBy(1);
937 net_log_
.AddEvent(end_event_type
);
938 if (!completion_callback
.is_null()) {
939 MessageLoopProxy::current()->PostTask(FROM_HERE
, base::Bind(
940 completion_callback
, net::OK
));
944 void SimpleEntryImpl::EntryOperationComplete(
946 const CompletionCallback
& completion_callback
,
947 const SimpleEntryStat
& entry_stat
,
948 scoped_ptr
<int> result
) {
949 DCHECK(io_thread_checker_
.CalledOnValidThread());
950 DCHECK(synchronous_entry_
);
951 DCHECK_EQ(STATE_IO_PENDING
, state_
);
953 state_
= STATE_READY
;
956 state_
= STATE_FAILURE
;
957 crc32s_end_offset_
[stream_index
] = 0;
959 UpdateDataFromEntryStat(entry_stat
);
962 if (!completion_callback
.is_null()) {
963 MessageLoopProxy::current()->PostTask(FROM_HERE
, base::Bind(
964 completion_callback
, *result
));
966 RunNextOperationIfNeeded();
969 void SimpleEntryImpl::ReadOperationComplete(
972 const CompletionCallback
& completion_callback
,
973 scoped_ptr
<uint32
> read_crc32
,
974 scoped_ptr
<base::Time
> last_used
,
975 scoped_ptr
<int> result
) {
976 DCHECK(io_thread_checker_
.CalledOnValidThread());
977 DCHECK(synchronous_entry_
);
978 DCHECK_EQ(STATE_IO_PENDING
, state_
);
983 crc_check_state_
[stream_index
] == CRC_CHECK_NEVER_READ_AT_ALL
) {
984 crc_check_state_
[stream_index
] = CRC_CHECK_NEVER_READ_TO_END
;
987 if (*result
> 0 && crc32s_end_offset_
[stream_index
] == offset
) {
988 uint32 current_crc
= offset
== 0 ? crc32(0, Z_NULL
, 0)
989 : crc32s_
[stream_index
];
990 crc32s_
[stream_index
] = crc32_combine(current_crc
, *read_crc32
, *result
);
991 crc32s_end_offset_
[stream_index
] += *result
;
992 if (!have_written_
[stream_index
] &&
993 GetDataSize(stream_index
) == crc32s_end_offset_
[stream_index
]) {
994 // We have just read a file from start to finish, and so we have
995 // computed a crc of the entire file. We can check it now. If a cache
996 // entry has a single reader, the normal pattern is to read from start
999 // Other cases are possible. In the case of two readers on the same
1000 // entry, one reader can be behind the other. In this case we compute
1001 // the crc as the most advanced reader progresses, and check it for
1002 // both readers as they read the last byte.
1004 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN
);
1006 scoped_ptr
<int> new_result(new int());
1007 Closure task
= base::Bind(&SimpleSynchronousEntry::CheckEOFRecord
,
1008 base::Unretained(synchronous_entry_
),
1010 data_size_
[stream_index
],
1011 crc32s_
[stream_index
],
1013 Closure reply
= base::Bind(&SimpleEntryImpl::ChecksumOperationComplete
,
1014 this, *result
, stream_index
,
1015 completion_callback
,
1016 base::Passed(&new_result
));
1017 worker_pool_
->PostTaskAndReply(FROM_HERE
, task
, reply
);
1018 crc_check_state_
[stream_index
] = CRC_CHECK_DONE
;
1024 RecordReadResult(READ_RESULT_SYNC_READ_FAILURE
);
1026 RecordReadResult(READ_RESULT_SUCCESS
);
1027 if (crc_check_state_
[stream_index
] == CRC_CHECK_NEVER_READ_TO_END
&&
1028 offset
+ *result
== GetDataSize(stream_index
)) {
1029 crc_check_state_
[stream_index
] = CRC_CHECK_NOT_DONE
;
1032 if (net_log_
.IsLoggingAllEvents()) {
1034 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
1035 CreateNetLogReadWriteCompleteCallback(*result
));
1038 EntryOperationComplete(
1040 completion_callback
,
1041 SimpleEntryStat(*last_used
, last_modified_
, data_size_
),
1045 void SimpleEntryImpl::WriteOperationComplete(
1047 const CompletionCallback
& completion_callback
,
1048 scoped_ptr
<SimpleEntryStat
> entry_stat
,
1049 scoped_ptr
<int> result
) {
1051 RecordWriteResult(WRITE_RESULT_SUCCESS
);
1053 RecordWriteResult(WRITE_RESULT_SYNC_WRITE_FAILURE
);
1054 if (net_log_
.IsLoggingAllEvents()) {
1055 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END
,
1056 CreateNetLogReadWriteCompleteCallback(*result
));
1059 EntryOperationComplete(
1060 stream_index
, completion_callback
, *entry_stat
, result
.Pass());
1063 void SimpleEntryImpl::ChecksumOperationComplete(
1066 const CompletionCallback
& completion_callback
,
1067 scoped_ptr
<int> result
) {
1068 DCHECK(io_thread_checker_
.CalledOnValidThread());
1069 DCHECK(synchronous_entry_
);
1070 DCHECK_EQ(STATE_IO_PENDING
, state_
);
1073 if (net_log_
.IsLoggingAllEvents()) {
1074 net_log_
.AddEventWithNetErrorCode(
1075 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END
,
1079 if (*result
== net::OK
) {
1080 *result
= orig_result
;
1081 if (orig_result
>= 0)
1082 RecordReadResult(READ_RESULT_SUCCESS
);
1084 RecordReadResult(READ_RESULT_SYNC_READ_FAILURE
);
1086 RecordReadResult(READ_RESULT_SYNC_CHECKSUM_FAILURE
);
1088 if (net_log_
.IsLoggingAllEvents()) {
1089 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END
,
1090 CreateNetLogReadWriteCompleteCallback(*result
));
1093 EntryOperationComplete(
1095 completion_callback
,
1096 SimpleEntryStat(last_used_
, last_modified_
, data_size_
),
1100 void SimpleEntryImpl::CloseOperationComplete() {
1101 DCHECK(!synchronous_entry_
);
1102 DCHECK_EQ(0, open_count_
);
1103 DCHECK(STATE_IO_PENDING
== state_
|| STATE_FAILURE
== state_
||
1104 STATE_UNINITIALIZED
== state_
);
1105 net_log_
.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END
);
1106 AdjustOpenEntryCountBy(-1);
1107 MakeUninitialized();
1108 RunNextOperationIfNeeded();
1111 void SimpleEntryImpl::UpdateDataFromEntryStat(
1112 const SimpleEntryStat
& entry_stat
) {
1113 DCHECK(io_thread_checker_
.CalledOnValidThread());
1114 DCHECK(synchronous_entry_
);
1115 DCHECK_EQ(STATE_READY
, state_
);
1117 last_used_
= entry_stat
.last_used
;
1118 last_modified_
= entry_stat
.last_modified
;
1119 for (int i
= 0; i
< kSimpleEntryFileCount
; ++i
) {
1120 data_size_
[i
] = entry_stat
.data_size
[i
];
1123 backend_
->index()->UpdateEntrySize(key_
, GetDiskUsage());
1126 int64
SimpleEntryImpl::GetDiskUsage() const {
1127 int64 file_size
= 0;
1128 for (int i
= 0; i
< kSimpleEntryFileCount
; ++i
) {
1130 simple_util::GetFileSizeFromKeyAndDataSize(key_
, data_size_
[i
]);
1135 void SimpleEntryImpl::RecordReadIsParallelizable(
1136 const SimpleEntryOperation
& operation
) const {
1137 if (!executing_operation_
)
1139 // TODO(clamy): The values of this histogram should be changed to something
1141 bool parallelizable_read
=
1142 !operation
.alone_in_queue() &&
1143 executing_operation_
->type() == SimpleEntryOperation::TYPE_READ
;
1144 UMA_HISTOGRAM_BOOLEAN("SimpleCache.ReadIsParallelizable",
1145 parallelizable_read
);
1148 void SimpleEntryImpl::RecordWriteDependencyType(
1149 const SimpleEntryOperation
& operation
) const {
1150 if (!executing_operation_
)
1152 // Used in histograms, please only add entries at the end.
1153 enum WriteDependencyType
{
1154 WRITE_OPTIMISTIC
= 0,
1155 WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
= 1,
1156 WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC
= 2,
1157 WRITE_FOLLOWS_CONFLICTING_WRITE
= 3,
1158 WRITE_FOLLOWS_NON_CONFLICTING_WRITE
= 4,
1159 WRITE_FOLLOWS_CONFLICTING_READ
= 5,
1160 WRITE_FOLLOWS_NON_CONFLICTING_READ
= 6,
1161 WRITE_FOLLOWS_OTHER
= 7,
1162 WRITE_DEPENDENCY_TYPE_MAX
= 8,
1165 WriteDependencyType type
= WRITE_FOLLOWS_OTHER
;
1166 if (operation
.optimistic()) {
1167 type
= WRITE_OPTIMISTIC
;
1168 } else if (executing_operation_
->type() == SimpleEntryOperation::TYPE_READ
||
1169 executing_operation_
->type() == SimpleEntryOperation::TYPE_WRITE
) {
1170 bool conflicting
= executing_operation_
->ConflictsWith(operation
);
1172 if (executing_operation_
->type() == SimpleEntryOperation::TYPE_READ
) {
1173 type
= conflicting
? WRITE_FOLLOWS_CONFLICTING_READ
1174 : WRITE_FOLLOWS_NON_CONFLICTING_READ
;
1175 } else if (executing_operation_
->optimistic()) {
1176 type
= conflicting
? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
1177 : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC
;
1179 type
= conflicting
? WRITE_FOLLOWS_CONFLICTING_WRITE
1180 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE
;
1183 UMA_HISTOGRAM_ENUMERATION(
1184 "SimpleCache.WriteDependencyType", type
, WRITE_DEPENDENCY_TYPE_MAX
);
1187 } // namespace disk_cache