Windows should animate when they are about to get docked at screen edges.
[chromium-blink-merge.git] / net / disk_cache / simple / simple_entry_impl.cc
blob3c3ec7daff631eaf9cb197223fe5f21464e65d2e
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/simple/simple_entry_impl.h"
7 #include <algorithm>
8 #include <cstring>
9 #include <vector>
11 #include "base/bind.h"
12 #include "base/bind_helpers.h"
13 #include "base/callback.h"
14 #include "base/location.h"
15 #include "base/logging.h"
16 #include "base/message_loop/message_loop_proxy.h"
17 #include "base/metrics/histogram.h"
18 #include "base/task_runner.h"
19 #include "base/time/time.h"
20 #include "net/base/io_buffer.h"
21 #include "net/base/net_errors.h"
22 #include "net/disk_cache/net_log_parameters.h"
23 #include "net/disk_cache/simple/simple_backend_impl.h"
24 #include "net/disk_cache/simple/simple_index.h"
25 #include "net/disk_cache/simple/simple_net_log_parameters.h"
26 #include "net/disk_cache/simple/simple_synchronous_entry.h"
27 #include "net/disk_cache/simple/simple_util.h"
28 #include "third_party/zlib/zlib.h"
30 namespace {
32 // Used in histograms, please only add entries at the end.
33 enum ReadResult {
34 READ_RESULT_SUCCESS = 0,
35 READ_RESULT_INVALID_ARGUMENT = 1,
36 READ_RESULT_NONBLOCK_EMPTY_RETURN = 2,
37 READ_RESULT_BAD_STATE = 3,
38 READ_RESULT_FAST_EMPTY_RETURN = 4,
39 READ_RESULT_SYNC_READ_FAILURE = 5,
40 READ_RESULT_SYNC_CHECKSUM_FAILURE = 6,
41 READ_RESULT_MAX = 7,
44 // Used in histograms, please only add entries at the end.
45 enum WriteResult {
46 WRITE_RESULT_SUCCESS = 0,
47 WRITE_RESULT_INVALID_ARGUMENT = 1,
48 WRITE_RESULT_OVER_MAX_SIZE = 2,
49 WRITE_RESULT_BAD_STATE = 3,
50 WRITE_RESULT_SYNC_WRITE_FAILURE = 4,
51 WRITE_RESULT_MAX = 5,
54 // Used in histograms, please only add entries at the end.
55 enum HeaderSizeChange {
56 HEADER_SIZE_CHANGE_INITIAL,
57 HEADER_SIZE_CHANGE_SAME,
58 HEADER_SIZE_CHANGE_INCREASE,
59 HEADER_SIZE_CHANGE_DECREASE,
60 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
61 HEADER_SIZE_CHANGE_MAX
64 void RecordReadResult(ReadResult result) {
65 UMA_HISTOGRAM_ENUMERATION("SimpleCache.ReadResult", result, READ_RESULT_MAX);
68 void RecordWriteResult(WriteResult result) {
69 UMA_HISTOGRAM_ENUMERATION("SimpleCache.WriteResult",
70 result, WRITE_RESULT_MAX);
73 // TODO(ttuttle): Consider removing this once we have a good handle on header
74 // size changes.
75 void RecordHeaderSizeChange(int old_size, int new_size) {
76 HeaderSizeChange size_change;
78 UMA_HISTOGRAM_COUNTS_10000("SimpleCache.HeaderSize", new_size);
80 if (old_size == 0) {
81 size_change = HEADER_SIZE_CHANGE_INITIAL;
82 } else if (new_size == old_size) {
83 size_change = HEADER_SIZE_CHANGE_SAME;
84 } else if (new_size > old_size) {
85 int delta = new_size - old_size;
86 UMA_HISTOGRAM_COUNTS_10000("SimpleCache.HeaderSizeIncreaseAbsolute",
87 delta);
88 UMA_HISTOGRAM_PERCENTAGE("SimpleCache.HeaderSizeIncreasePercentage",
89 delta * 100 / old_size);
90 size_change = HEADER_SIZE_CHANGE_INCREASE;
91 } else { // new_size < old_size
92 int delta = old_size - new_size;
93 UMA_HISTOGRAM_COUNTS_10000("SimpleCache.HeaderSizeDecreaseAbsolute",
94 delta);
95 UMA_HISTOGRAM_PERCENTAGE("SimpleCache.HeaderSizeDecreasePercentage",
96 delta * 100 / old_size);
97 size_change = HEADER_SIZE_CHANGE_DECREASE;
100 UMA_HISTOGRAM_ENUMERATION("SimpleCache.HeaderSizeChange",
101 size_change,
102 HEADER_SIZE_CHANGE_MAX);
105 void RecordUnexpectedStream0Write() {
106 UMA_HISTOGRAM_ENUMERATION("SimpleCache.HeaderSizeChange",
107 HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
108 HEADER_SIZE_CHANGE_MAX);
111 // Short trampoline to take an owned input parameter and call a net completion
112 // callback with its value.
113 void CallCompletionCallback(const net::CompletionCallback& callback,
114 scoped_ptr<int> result) {
115 DCHECK(result);
116 if (!callback.is_null())
117 callback.Run(*result);
120 int g_open_entry_count = 0;
122 void AdjustOpenEntryCountBy(int offset) {
123 g_open_entry_count += offset;
124 UMA_HISTOGRAM_COUNTS_10000("SimpleCache.GlobalOpenEntryCount",
125 g_open_entry_count);
128 } // namespace
130 namespace disk_cache {
132 using base::Closure;
133 using base::FilePath;
134 using base::MessageLoopProxy;
135 using base::Time;
136 using base::TaskRunner;
138 // A helper class to insure that RunNextOperationIfNeeded() is called when
139 // exiting the current stack frame.
140 class SimpleEntryImpl::ScopedOperationRunner {
141 public:
142 explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
145 ~ScopedOperationRunner() {
146 entry_->RunNextOperationIfNeeded();
149 private:
150 SimpleEntryImpl* const entry_;
153 SimpleEntryImpl::SimpleEntryImpl(const FilePath& path,
154 const uint64 entry_hash,
155 OperationsMode operations_mode,
156 SimpleBackendImpl* backend,
157 net::NetLog* net_log)
158 : backend_(backend->AsWeakPtr()),
159 worker_pool_(backend->worker_pool()),
160 path_(path),
161 entry_hash_(entry_hash),
162 use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
163 last_used_(Time::Now()),
164 last_modified_(last_used_),
165 open_count_(0),
166 state_(STATE_UNINITIALIZED),
167 synchronous_entry_(NULL),
168 net_log_(net::BoundNetLog::Make(
169 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)) {
170 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
171 arrays_should_be_same_size);
172 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
173 arrays_should_be_same_size);
174 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
175 arrays_should_be_same_size);
176 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_),
177 arrays_should_be_same_size);
178 MakeUninitialized();
179 net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY,
180 CreateNetLogSimpleEntryConstructionCallback(this));
183 int SimpleEntryImpl::OpenEntry(Entry** out_entry,
184 const CompletionCallback& callback) {
185 DCHECK(backend_.get());
187 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL);
189 bool have_index = backend_->index()->initialized();
190 // This enumeration is used in histograms, add entries only at end.
191 enum OpenEntryIndexEnum {
192 INDEX_NOEXIST = 0,
193 INDEX_MISS = 1,
194 INDEX_HIT = 2,
195 INDEX_MAX = 3,
197 OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
198 if (have_index) {
199 if (backend_->index()->Has(entry_hash_))
200 open_entry_index_enum = INDEX_HIT;
201 else
202 open_entry_index_enum = INDEX_MISS;
204 UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState",
205 open_entry_index_enum, INDEX_MAX);
207 // If entry is not known to the index, initiate fast failover to the network.
208 if (open_entry_index_enum == INDEX_MISS) {
209 net_log_.AddEventWithNetErrorCode(
210 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
211 net::ERR_FAILED);
212 return net::ERR_FAILED;
215 pending_operations_.push(SimpleEntryOperation::OpenOperation(
216 this, have_index, callback, out_entry));
217 RunNextOperationIfNeeded();
218 return net::ERR_IO_PENDING;
221 int SimpleEntryImpl::CreateEntry(Entry** out_entry,
222 const CompletionCallback& callback) {
223 DCHECK(backend_.get());
224 DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
226 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL);
228 bool have_index = backend_->index()->initialized();
229 int ret_value = net::ERR_FAILED;
230 if (use_optimistic_operations_ &&
231 state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
232 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
234 ReturnEntryToCaller(out_entry);
235 pending_operations_.push(SimpleEntryOperation::CreateOperation(
236 this, have_index, CompletionCallback(), static_cast<Entry**>(NULL)));
237 ret_value = net::OK;
238 } else {
239 pending_operations_.push(SimpleEntryOperation::CreateOperation(
240 this, have_index, callback, out_entry));
241 ret_value = net::ERR_IO_PENDING;
244 // We insert the entry in the index before creating the entry files in the
245 // SimpleSynchronousEntry, because this way the worst scenario is when we
246 // have the entry in the index but we don't have the created files yet, this
247 // way we never leak files. CreationOperationComplete will remove the entry
248 // from the index if the creation fails.
249 backend_->index()->Insert(key_);
251 RunNextOperationIfNeeded();
252 return ret_value;
255 int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
256 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_CALL);
257 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_BEGIN);
259 MarkAsDoomed();
260 scoped_ptr<int> result(new int());
261 Closure task = base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_,
262 entry_hash_, result.get());
263 Closure reply = base::Bind(&CallCompletionCallback,
264 callback, base::Passed(&result));
265 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
266 return net::ERR_IO_PENDING;
269 void SimpleEntryImpl::SetKey(const std::string& key) {
270 key_ = key;
271 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY,
272 net::NetLog::StringCallback("key", &key));
275 void SimpleEntryImpl::Doom() {
276 DoomEntry(CompletionCallback());
279 void SimpleEntryImpl::Close() {
280 DCHECK(io_thread_checker_.CalledOnValidThread());
281 DCHECK_LT(0, open_count_);
283 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_CALL);
285 if (--open_count_ > 0) {
286 DCHECK(!HasOneRef());
287 Release(); // Balanced in ReturnEntryToCaller().
288 return;
291 pending_operations_.push(SimpleEntryOperation::CloseOperation(this));
292 DCHECK(!HasOneRef());
293 Release(); // Balanced in ReturnEntryToCaller().
294 RunNextOperationIfNeeded();
297 std::string SimpleEntryImpl::GetKey() const {
298 DCHECK(io_thread_checker_.CalledOnValidThread());
299 return key_;
302 Time SimpleEntryImpl::GetLastUsed() const {
303 DCHECK(io_thread_checker_.CalledOnValidThread());
304 return last_used_;
307 Time SimpleEntryImpl::GetLastModified() const {
308 DCHECK(io_thread_checker_.CalledOnValidThread());
309 return last_modified_;
312 int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
313 DCHECK(io_thread_checker_.CalledOnValidThread());
314 DCHECK_LE(0, data_size_[stream_index]);
315 return data_size_[stream_index];
318 int SimpleEntryImpl::ReadData(int stream_index,
319 int offset,
320 net::IOBuffer* buf,
321 int buf_len,
322 const CompletionCallback& callback) {
323 DCHECK(io_thread_checker_.CalledOnValidThread());
325 if (net_log_.IsLoggingAllEvents()) {
326 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL,
327 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
328 false));
331 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount ||
332 buf_len < 0) {
333 if (net_log_.IsLoggingAllEvents()) {
334 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
335 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
338 RecordReadResult(READ_RESULT_INVALID_ARGUMENT);
339 return net::ERR_INVALID_ARGUMENT;
341 if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) ||
342 offset < 0 || !buf_len)) {
343 if (net_log_.IsLoggingAllEvents()) {
344 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
345 CreateNetLogReadWriteCompleteCallback(0));
348 RecordReadResult(READ_RESULT_NONBLOCK_EMPTY_RETURN);
349 return 0;
352 // TODO(felipeg): Optimization: Add support for truly parallel read
353 // operations.
354 bool alone_in_queue =
355 pending_operations_.size() == 0 && state_ == STATE_READY;
356 pending_operations_.push(SimpleEntryOperation::ReadOperation(
357 this, stream_index, offset, buf_len, buf, callback, alone_in_queue));
358 RunNextOperationIfNeeded();
359 return net::ERR_IO_PENDING;
362 int SimpleEntryImpl::WriteData(int stream_index,
363 int offset,
364 net::IOBuffer* buf,
365 int buf_len,
366 const CompletionCallback& callback,
367 bool truncate) {
368 DCHECK(io_thread_checker_.CalledOnValidThread());
370 if (net_log_.IsLoggingAllEvents()) {
371 net_log_.AddEvent(
372 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL,
373 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
374 truncate));
377 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 ||
378 buf_len < 0) {
379 if (net_log_.IsLoggingAllEvents()) {
380 net_log_.AddEvent(
381 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
382 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
384 RecordWriteResult(WRITE_RESULT_INVALID_ARGUMENT);
385 return net::ERR_INVALID_ARGUMENT;
387 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
388 if (net_log_.IsLoggingAllEvents()) {
389 net_log_.AddEvent(
390 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
391 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
393 RecordWriteResult(WRITE_RESULT_OVER_MAX_SIZE);
394 return net::ERR_FAILED;
396 ScopedOperationRunner operation_runner(this);
398 // Currently, Simple Cache is only used for HTTP, which stores the headers in
399 // stream 0 and always writes them with a single, truncating write. Detect
400 // these writes and record the size and size changes of the headers. Also,
401 // note writes to stream 0 that violate those assumptions.
402 if (stream_index == 0) {
403 if (offset == 0 && truncate)
404 RecordHeaderSizeChange(data_size_[0], buf_len);
405 else
406 RecordUnexpectedStream0Write();
409 // We can only do optimistic Write if there is no pending operations, so
410 // that we are sure that the next call to RunNextOperationIfNeeded will
411 // actually run the write operation that sets the stream size. It also
412 // prevents from previous possibly-conflicting writes that could be stacked
413 // in the |pending_operations_|. We could optimize this for when we have
414 // only read operations enqueued.
415 const bool optimistic =
416 (use_optimistic_operations_ && state_ == STATE_READY &&
417 pending_operations_.size() == 0);
418 CompletionCallback op_callback;
419 scoped_refptr<net::IOBuffer> op_buf;
420 int ret_value = net::ERR_FAILED;
421 if (!optimistic) {
422 op_buf = buf;
423 op_callback = callback;
424 ret_value = net::ERR_IO_PENDING;
425 } else {
426 // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
427 // here to avoid paying the price of the RefCountedThreadSafe atomic
428 // operations.
429 if (buf) {
430 op_buf = new IOBuffer(buf_len);
431 memcpy(op_buf->data(), buf->data(), buf_len);
433 op_callback = CompletionCallback();
434 ret_value = buf_len;
435 if (net_log_.IsLoggingAllEvents()) {
436 net_log_.AddEvent(
437 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC,
438 CreateNetLogReadWriteCompleteCallback(buf_len));
442 pending_operations_.push(SimpleEntryOperation::WriteOperation(this,
443 stream_index,
444 offset,
445 buf_len,
446 op_buf.get(),
447 truncate,
448 optimistic,
449 op_callback));
450 return ret_value;
453 int SimpleEntryImpl::ReadSparseData(int64 offset,
454 net::IOBuffer* buf,
455 int buf_len,
456 const CompletionCallback& callback) {
457 DCHECK(io_thread_checker_.CalledOnValidThread());
458 // TODO(gavinp): Determine if the simple backend should support sparse data.
459 NOTIMPLEMENTED();
460 return net::ERR_FAILED;
463 int SimpleEntryImpl::WriteSparseData(int64 offset,
464 net::IOBuffer* buf,
465 int buf_len,
466 const CompletionCallback& callback) {
467 DCHECK(io_thread_checker_.CalledOnValidThread());
468 // TODO(gavinp): Determine if the simple backend should support sparse data.
469 NOTIMPLEMENTED();
470 return net::ERR_FAILED;
473 int SimpleEntryImpl::GetAvailableRange(int64 offset,
474 int len,
475 int64* start,
476 const CompletionCallback& callback) {
477 DCHECK(io_thread_checker_.CalledOnValidThread());
478 // TODO(gavinp): Determine if the simple backend should support sparse data.
479 NOTIMPLEMENTED();
480 return net::ERR_FAILED;
483 bool SimpleEntryImpl::CouldBeSparse() const {
484 DCHECK(io_thread_checker_.CalledOnValidThread());
485 // TODO(gavinp): Determine if the simple backend should support sparse data.
486 return false;
489 void SimpleEntryImpl::CancelSparseIO() {
490 DCHECK(io_thread_checker_.CalledOnValidThread());
491 // TODO(gavinp): Determine if the simple backend should support sparse data.
492 NOTIMPLEMENTED();
495 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
496 DCHECK(io_thread_checker_.CalledOnValidThread());
497 // TODO(gavinp): Determine if the simple backend should support sparse data.
498 NOTIMPLEMENTED();
499 return net::ERR_FAILED;
502 SimpleEntryImpl::~SimpleEntryImpl() {
503 DCHECK(io_thread_checker_.CalledOnValidThread());
504 DCHECK_EQ(0U, pending_operations_.size());
505 DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_FAILURE);
506 DCHECK(!synchronous_entry_);
507 RemoveSelfFromBackend();
508 net_log_.EndEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY);
511 void SimpleEntryImpl::MakeUninitialized() {
512 state_ = STATE_UNINITIALIZED;
513 std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
514 std::memset(crc32s_, 0, sizeof(crc32s_));
515 std::memset(have_written_, 0, sizeof(have_written_));
516 std::memset(data_size_, 0, sizeof(data_size_));
517 for (size_t i = 0; i < arraysize(crc_check_state_); ++i) {
518 crc_check_state_[i] = CRC_CHECK_NEVER_READ_AT_ALL;
522 void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
523 DCHECK(out_entry);
524 ++open_count_;
525 AddRef(); // Balanced in Close()
526 *out_entry = this;
529 void SimpleEntryImpl::RemoveSelfFromBackend() {
530 if (!backend_.get())
531 return;
532 backend_->OnDeactivated(this);
533 backend_.reset();
536 void SimpleEntryImpl::MarkAsDoomed() {
537 if (!backend_.get())
538 return;
539 backend_->index()->Remove(key_);
540 RemoveSelfFromBackend();
543 void SimpleEntryImpl::RunNextOperationIfNeeded() {
544 DCHECK(io_thread_checker_.CalledOnValidThread());
545 UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending",
546 pending_operations_.size(), 0, 100, 20);
547 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
548 scoped_ptr<SimpleEntryOperation> operation(
549 new SimpleEntryOperation(pending_operations_.front()));
550 pending_operations_.pop();
551 switch (operation->type()) {
552 case SimpleEntryOperation::TYPE_OPEN:
553 OpenEntryInternal(operation->have_index(),
554 operation->callback(),
555 operation->out_entry());
556 break;
557 case SimpleEntryOperation::TYPE_CREATE:
558 CreateEntryInternal(operation->have_index(),
559 operation->callback(),
560 operation->out_entry());
561 break;
562 case SimpleEntryOperation::TYPE_CLOSE:
563 CloseInternal();
564 break;
565 case SimpleEntryOperation::TYPE_READ:
566 RecordReadIsParallelizable(*operation);
567 ReadDataInternal(operation->index(),
568 operation->offset(),
569 operation->buf(),
570 operation->length(),
571 operation->callback());
572 break;
573 case SimpleEntryOperation::TYPE_WRITE:
574 RecordWriteDependencyType(*operation);
575 WriteDataInternal(operation->index(),
576 operation->offset(),
577 operation->buf(),
578 operation->length(),
579 operation->callback(),
580 operation->truncate());
581 break;
582 default:
583 NOTREACHED();
585 // The operation is kept for histograms. Makes sure it does not leak
586 // resources.
587 executing_operation_.swap(operation);
588 executing_operation_->ReleaseReferences();
589 // |this| may have been deleted.
593 void SimpleEntryImpl::OpenEntryInternal(bool have_index,
594 const CompletionCallback& callback,
595 Entry** out_entry) {
596 ScopedOperationRunner operation_runner(this);
598 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN);
600 if (state_ == STATE_READY) {
601 ReturnEntryToCaller(out_entry);
602 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback,
603 net::OK));
604 net_log_.AddEvent(
605 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
606 CreateNetLogSimpleEntryCreationCallback(this, net::OK));
607 return;
608 } else if (state_ == STATE_FAILURE) {
609 if (!callback.is_null()) {
610 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
611 callback, net::ERR_FAILED));
613 net_log_.AddEvent(
614 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
615 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
616 return;
619 DCHECK_EQ(STATE_UNINITIALIZED, state_);
620 DCHECK(!synchronous_entry_);
621 state_ = STATE_IO_PENDING;
622 const base::TimeTicks start_time = base::TimeTicks::Now();
623 scoped_ptr<SimpleEntryCreationResults> results(
624 new SimpleEntryCreationResults(
625 SimpleEntryStat(last_used_, last_modified_, data_size_)));
626 Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry,
627 path_,
628 entry_hash_,
629 have_index,
630 results.get());
631 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
632 this,
633 callback,
634 start_time,
635 base::Passed(&results),
636 out_entry,
637 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END);
638 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
641 void SimpleEntryImpl::CreateEntryInternal(bool have_index,
642 const CompletionCallback& callback,
643 Entry** out_entry) {
644 ScopedOperationRunner operation_runner(this);
646 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_BEGIN);
648 if (state_ != STATE_UNINITIALIZED) {
649 // There is already an active normal entry.
650 net_log_.AddEvent(
651 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END,
652 CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
654 if (!callback.is_null()) {
655 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
656 callback, net::ERR_FAILED));
658 return;
660 DCHECK_EQ(STATE_UNINITIALIZED, state_);
661 DCHECK(!synchronous_entry_);
663 state_ = STATE_IO_PENDING;
665 // Since we don't know the correct values for |last_used_| and
666 // |last_modified_| yet, we make this approximation.
667 last_used_ = last_modified_ = base::Time::Now();
669 // If creation succeeds, we should mark all streams to be saved on close.
670 for (int i = 0; i < kSimpleEntryFileCount; ++i)
671 have_written_[i] = true;
673 const base::TimeTicks start_time = base::TimeTicks::Now();
674 scoped_ptr<SimpleEntryCreationResults> results(
675 new SimpleEntryCreationResults(
676 SimpleEntryStat(last_used_, last_modified_, data_size_)));
677 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
678 path_,
679 key_,
680 entry_hash_,
681 have_index,
682 results.get());
683 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
684 this,
685 callback,
686 start_time,
687 base::Passed(&results),
688 out_entry,
689 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END);
690 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
693 void SimpleEntryImpl::CloseInternal() {
694 DCHECK(io_thread_checker_.CalledOnValidThread());
695 typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
696 scoped_ptr<std::vector<CRCRecord> >
697 crc32s_to_write(new std::vector<CRCRecord>());
699 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
701 if (state_ == STATE_READY) {
702 DCHECK(synchronous_entry_);
703 state_ = STATE_IO_PENDING;
704 for (int i = 0; i < kSimpleEntryFileCount; ++i) {
705 if (have_written_[i]) {
706 if (GetDataSize(i) == crc32s_end_offset_[i]) {
707 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
708 crc32s_to_write->push_back(CRCRecord(i, true, crc));
709 } else {
710 crc32s_to_write->push_back(CRCRecord(i, false, 0));
714 } else {
715 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
718 if (synchronous_entry_) {
719 Closure task =
720 base::Bind(&SimpleSynchronousEntry::Close,
721 base::Unretained(synchronous_entry_),
722 SimpleEntryStat(last_used_, last_modified_, data_size_),
723 base::Passed(&crc32s_to_write));
724 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
725 synchronous_entry_ = NULL;
726 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
728 for (int i = 0; i < kSimpleEntryFileCount; ++i) {
729 if (!have_written_[i]) {
730 UMA_HISTOGRAM_ENUMERATION("SimpleCache.CheckCRCResult",
731 crc_check_state_[i], CRC_CHECK_MAX);
734 } else {
735 synchronous_entry_ = NULL;
736 CloseOperationComplete();
740 void SimpleEntryImpl::ReadDataInternal(int stream_index,
741 int offset,
742 net::IOBuffer* buf,
743 int buf_len,
744 const CompletionCallback& callback) {
745 DCHECK(io_thread_checker_.CalledOnValidThread());
746 ScopedOperationRunner operation_runner(this);
748 if (net_log_.IsLoggingAllEvents()) {
749 net_log_.AddEvent(
750 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN,
751 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
752 false));
755 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
756 if (!callback.is_null()) {
757 RecordReadResult(READ_RESULT_BAD_STATE);
758 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
759 callback, net::ERR_FAILED));
761 if (net_log_.IsLoggingAllEvents()) {
762 net_log_.AddEvent(
763 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
764 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
766 return;
768 DCHECK_EQ(STATE_READY, state_);
769 if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
770 RecordReadResult(READ_RESULT_FAST_EMPTY_RETURN);
771 // If there is nothing to read, we bail out before setting state_ to
772 // STATE_IO_PENDING.
773 if (!callback.is_null())
774 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
775 callback, 0));
776 return;
779 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
781 state_ = STATE_IO_PENDING;
782 if (backend_.get())
783 backend_->index()->UseIfExists(key_);
785 scoped_ptr<uint32> read_crc32(new uint32());
786 scoped_ptr<int> result(new int());
787 scoped_ptr<base::Time> last_used(new base::Time());
788 Closure task = base::Bind(
789 &SimpleSynchronousEntry::ReadData,
790 base::Unretained(synchronous_entry_),
791 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len),
792 make_scoped_refptr(buf),
793 read_crc32.get(),
794 last_used.get(),
795 result.get());
796 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete,
797 this,
798 stream_index,
799 offset,
800 callback,
801 base::Passed(&read_crc32),
802 base::Passed(&last_used),
803 base::Passed(&result));
804 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
807 void SimpleEntryImpl::WriteDataInternal(int stream_index,
808 int offset,
809 net::IOBuffer* buf,
810 int buf_len,
811 const CompletionCallback& callback,
812 bool truncate) {
813 DCHECK(io_thread_checker_.CalledOnValidThread());
814 ScopedOperationRunner operation_runner(this);
816 if (net_log_.IsLoggingAllEvents()) {
817 net_log_.AddEvent(
818 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN,
819 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
820 truncate));
823 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
824 RecordWriteResult(WRITE_RESULT_BAD_STATE);
825 if (net_log_.IsLoggingAllEvents()) {
826 net_log_.AddEvent(
827 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
828 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
830 if (!callback.is_null()) {
831 // We need to posttask so that we don't go in a loop when we call the
832 // callback directly.
833 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
834 callback, net::ERR_FAILED));
836 // |this| may be destroyed after return here.
837 return;
840 DCHECK_EQ(STATE_READY, state_);
841 state_ = STATE_IO_PENDING;
842 if (backend_.get())
843 backend_->index()->UseIfExists(key_);
844 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
845 // if |offset == 0| or we have already computed the CRC for [0 .. offset).
846 // We rely on most write operations being sequential, start to end to compute
847 // the crc of the data. When we write to an entry and close without having
848 // done a sequential write, we don't check the CRC on read.
849 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
850 uint32 initial_crc = (offset != 0) ? crc32s_[stream_index]
851 : crc32(0, Z_NULL, 0);
852 if (buf_len > 0) {
853 crc32s_[stream_index] = crc32(initial_crc,
854 reinterpret_cast<const Bytef*>(buf->data()),
855 buf_len);
857 crc32s_end_offset_[stream_index] = offset + buf_len;
860 // |entry_stat| needs to be initialized before modifying |data_size_|.
861 scoped_ptr<SimpleEntryStat> entry_stat(
862 new SimpleEntryStat(last_used_, last_modified_, data_size_));
863 if (truncate) {
864 data_size_[stream_index] = offset + buf_len;
865 } else {
866 data_size_[stream_index] = std::max(offset + buf_len,
867 GetDataSize(stream_index));
870 // Since we don't know the correct values for |last_used_| and
871 // |last_modified_| yet, we make this approximation.
872 last_used_ = last_modified_ = base::Time::Now();
874 have_written_[stream_index] = true;
876 scoped_ptr<int> result(new int());
877 Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
878 base::Unretained(synchronous_entry_),
879 SimpleSynchronousEntry::EntryOperationData(
880 stream_index, offset, buf_len, truncate),
881 make_scoped_refptr(buf),
882 entry_stat.get(),
883 result.get());
884 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete,
885 this,
886 stream_index,
887 callback,
888 base::Passed(&entry_stat),
889 base::Passed(&result));
890 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
893 void SimpleEntryImpl::CreationOperationComplete(
894 const CompletionCallback& completion_callback,
895 const base::TimeTicks& start_time,
896 scoped_ptr<SimpleEntryCreationResults> in_results,
897 Entry** out_entry,
898 net::NetLog::EventType end_event_type) {
899 DCHECK(io_thread_checker_.CalledOnValidThread());
900 DCHECK_EQ(state_, STATE_IO_PENDING);
901 DCHECK(in_results);
902 ScopedOperationRunner operation_runner(this);
903 UMA_HISTOGRAM_BOOLEAN(
904 "SimpleCache.EntryCreationResult", in_results->result == net::OK);
905 if (in_results->result != net::OK) {
906 if (in_results->result != net::ERR_FILE_EXISTS)
907 MarkAsDoomed();
909 net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED);
911 if (!completion_callback.is_null()) {
912 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
913 completion_callback, net::ERR_FAILED));
915 MakeUninitialized();
916 return;
918 // If out_entry is NULL, it means we already called ReturnEntryToCaller from
919 // the optimistic Create case.
920 if (out_entry)
921 ReturnEntryToCaller(out_entry);
923 state_ = STATE_READY;
924 synchronous_entry_ = in_results->sync_entry;
925 if (key_.empty()) {
926 SetKey(synchronous_entry_->key());
927 } else {
928 // This should only be triggered when creating an entry. The key check in
929 // the open case is handled in SimpleBackendImpl.
930 DCHECK_EQ(key_, synchronous_entry_->key());
932 UpdateDataFromEntryStat(in_results->entry_stat);
933 UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime",
934 (base::TimeTicks::Now() - start_time));
935 AdjustOpenEntryCountBy(1);
937 net_log_.AddEvent(end_event_type);
938 if (!completion_callback.is_null()) {
939 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
940 completion_callback, net::OK));
944 void SimpleEntryImpl::EntryOperationComplete(
945 int stream_index,
946 const CompletionCallback& completion_callback,
947 const SimpleEntryStat& entry_stat,
948 scoped_ptr<int> result) {
949 DCHECK(io_thread_checker_.CalledOnValidThread());
950 DCHECK(synchronous_entry_);
951 DCHECK_EQ(STATE_IO_PENDING, state_);
952 DCHECK(result);
953 state_ = STATE_READY;
954 if (*result < 0) {
955 MarkAsDoomed();
956 state_ = STATE_FAILURE;
957 crc32s_end_offset_[stream_index] = 0;
958 } else {
959 UpdateDataFromEntryStat(entry_stat);
962 if (!completion_callback.is_null()) {
963 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
964 completion_callback, *result));
966 RunNextOperationIfNeeded();
969 void SimpleEntryImpl::ReadOperationComplete(
970 int stream_index,
971 int offset,
972 const CompletionCallback& completion_callback,
973 scoped_ptr<uint32> read_crc32,
974 scoped_ptr<base::Time> last_used,
975 scoped_ptr<int> result) {
976 DCHECK(io_thread_checker_.CalledOnValidThread());
977 DCHECK(synchronous_entry_);
978 DCHECK_EQ(STATE_IO_PENDING, state_);
979 DCHECK(read_crc32);
980 DCHECK(result);
982 if (*result > 0 &&
983 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) {
984 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END;
987 if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
988 uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0)
989 : crc32s_[stream_index];
990 crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
991 crc32s_end_offset_[stream_index] += *result;
992 if (!have_written_[stream_index] &&
993 GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) {
994 // We have just read a file from start to finish, and so we have
995 // computed a crc of the entire file. We can check it now. If a cache
996 // entry has a single reader, the normal pattern is to read from start
997 // to finish.
999 // Other cases are possible. In the case of two readers on the same
1000 // entry, one reader can be behind the other. In this case we compute
1001 // the crc as the most advanced reader progresses, and check it for
1002 // both readers as they read the last byte.
1004 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN);
1006 scoped_ptr<int> new_result(new int());
1007 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
1008 base::Unretained(synchronous_entry_),
1009 stream_index,
1010 data_size_[stream_index],
1011 crc32s_[stream_index],
1012 new_result.get());
1013 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
1014 this, *result, stream_index,
1015 completion_callback,
1016 base::Passed(&new_result));
1017 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1018 crc_check_state_[stream_index] = CRC_CHECK_DONE;
1019 return;
1023 if (*result < 0) {
1024 RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
1025 } else {
1026 RecordReadResult(READ_RESULT_SUCCESS);
1027 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
1028 offset + *result == GetDataSize(stream_index)) {
1029 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
1032 if (net_log_.IsLoggingAllEvents()) {
1033 net_log_.AddEvent(
1034 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1035 CreateNetLogReadWriteCompleteCallback(*result));
1038 EntryOperationComplete(
1039 stream_index,
1040 completion_callback,
1041 SimpleEntryStat(*last_used, last_modified_, data_size_),
1042 result.Pass());
1045 void SimpleEntryImpl::WriteOperationComplete(
1046 int stream_index,
1047 const CompletionCallback& completion_callback,
1048 scoped_ptr<SimpleEntryStat> entry_stat,
1049 scoped_ptr<int> result) {
1050 if (*result >= 0)
1051 RecordWriteResult(WRITE_RESULT_SUCCESS);
1052 else
1053 RecordWriteResult(WRITE_RESULT_SYNC_WRITE_FAILURE);
1054 if (net_log_.IsLoggingAllEvents()) {
1055 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
1056 CreateNetLogReadWriteCompleteCallback(*result));
1059 EntryOperationComplete(
1060 stream_index, completion_callback, *entry_stat, result.Pass());
1063 void SimpleEntryImpl::ChecksumOperationComplete(
1064 int orig_result,
1065 int stream_index,
1066 const CompletionCallback& completion_callback,
1067 scoped_ptr<int> result) {
1068 DCHECK(io_thread_checker_.CalledOnValidThread());
1069 DCHECK(synchronous_entry_);
1070 DCHECK_EQ(STATE_IO_PENDING, state_);
1071 DCHECK(result);
1073 if (net_log_.IsLoggingAllEvents()) {
1074 net_log_.AddEventWithNetErrorCode(
1075 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END,
1076 *result);
1079 if (*result == net::OK) {
1080 *result = orig_result;
1081 if (orig_result >= 0)
1082 RecordReadResult(READ_RESULT_SUCCESS);
1083 else
1084 RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
1085 } else {
1086 RecordReadResult(READ_RESULT_SYNC_CHECKSUM_FAILURE);
1088 if (net_log_.IsLoggingAllEvents()) {
1089 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1090 CreateNetLogReadWriteCompleteCallback(*result));
1093 EntryOperationComplete(
1094 stream_index,
1095 completion_callback,
1096 SimpleEntryStat(last_used_, last_modified_, data_size_),
1097 result.Pass());
1100 void SimpleEntryImpl::CloseOperationComplete() {
1101 DCHECK(!synchronous_entry_);
1102 DCHECK_EQ(0, open_count_);
1103 DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
1104 STATE_UNINITIALIZED == state_);
1105 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END);
1106 AdjustOpenEntryCountBy(-1);
1107 MakeUninitialized();
1108 RunNextOperationIfNeeded();
1111 void SimpleEntryImpl::UpdateDataFromEntryStat(
1112 const SimpleEntryStat& entry_stat) {
1113 DCHECK(io_thread_checker_.CalledOnValidThread());
1114 DCHECK(synchronous_entry_);
1115 DCHECK_EQ(STATE_READY, state_);
1117 last_used_ = entry_stat.last_used;
1118 last_modified_ = entry_stat.last_modified;
1119 for (int i = 0; i < kSimpleEntryFileCount; ++i) {
1120 data_size_[i] = entry_stat.data_size[i];
1122 if (backend_.get())
1123 backend_->index()->UpdateEntrySize(key_, GetDiskUsage());
1126 int64 SimpleEntryImpl::GetDiskUsage() const {
1127 int64 file_size = 0;
1128 for (int i = 0; i < kSimpleEntryFileCount; ++i) {
1129 file_size +=
1130 simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]);
1132 return file_size;
1135 void SimpleEntryImpl::RecordReadIsParallelizable(
1136 const SimpleEntryOperation& operation) const {
1137 if (!executing_operation_)
1138 return;
1139 // TODO(clamy): The values of this histogram should be changed to something
1140 // more useful.
1141 bool parallelizable_read =
1142 !operation.alone_in_queue() &&
1143 executing_operation_->type() == SimpleEntryOperation::TYPE_READ;
1144 UMA_HISTOGRAM_BOOLEAN("SimpleCache.ReadIsParallelizable",
1145 parallelizable_read);
1148 void SimpleEntryImpl::RecordWriteDependencyType(
1149 const SimpleEntryOperation& operation) const {
1150 if (!executing_operation_)
1151 return;
1152 // Used in histograms, please only add entries at the end.
1153 enum WriteDependencyType {
1154 WRITE_OPTIMISTIC = 0,
1155 WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC = 1,
1156 WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC = 2,
1157 WRITE_FOLLOWS_CONFLICTING_WRITE = 3,
1158 WRITE_FOLLOWS_NON_CONFLICTING_WRITE = 4,
1159 WRITE_FOLLOWS_CONFLICTING_READ = 5,
1160 WRITE_FOLLOWS_NON_CONFLICTING_READ = 6,
1161 WRITE_FOLLOWS_OTHER = 7,
1162 WRITE_DEPENDENCY_TYPE_MAX = 8,
1165 WriteDependencyType type = WRITE_FOLLOWS_OTHER;
1166 if (operation.optimistic()) {
1167 type = WRITE_OPTIMISTIC;
1168 } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ ||
1169 executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1170 bool conflicting = executing_operation_->ConflictsWith(operation);
1172 if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1173 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_READ
1174 : WRITE_FOLLOWS_NON_CONFLICTING_READ;
1175 } else if (executing_operation_->optimistic()) {
1176 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
1177 : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC;
1178 } else {
1179 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE
1180 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE;
1183 UMA_HISTOGRAM_ENUMERATION(
1184 "SimpleCache.WriteDependencyType", type, WRITE_DEPENDENCY_TYPE_MAX);
1187 } // namespace disk_cache