1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/entry_impl.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/metrics/histogram.h"
10 #include "base/strings/string_util.h"
11 #include "net/base/io_buffer.h"
12 #include "net/base/net_errors.h"
13 #include "net/disk_cache/backend_impl.h"
14 #include "net/disk_cache/bitmap.h"
15 #include "net/disk_cache/cache_util.h"
16 #include "net/disk_cache/histogram_macros.h"
17 #include "net/disk_cache/net_log_parameters.h"
18 #include "net/disk_cache/sparse_control.h"
21 using base::TimeDelta
;
22 using base::TimeTicks
;
26 const int kMaxBufferSize
= 1024 * 1024; // 1 MB.
30 namespace disk_cache
{
32 // This class handles individual memory buffers that store data before it is
33 // sent to disk. The buffer can start at any offset, but if we try to write to
34 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
35 // zero. The buffer grows up to a size determined by the backend, to keep the
36 // total memory used under control.
37 class EntryImpl::UserBuffer
{
39 explicit UserBuffer(BackendImpl
* backend
)
40 : backend_(backend
->GetWeakPtr()), offset_(0), grow_allowed_(true) {
41 buffer_
.reserve(kMaxBlockSize
);
45 backend_
->BufferDeleted(capacity() - kMaxBlockSize
);
48 // Returns true if we can handle writing |len| bytes to |offset|.
49 bool PreWrite(int offset
, int len
);
51 // Truncates the buffer to |offset| bytes.
52 void Truncate(int offset
);
54 // Writes |len| bytes from |buf| at the given |offset|.
55 void Write(int offset
, IOBuffer
* buf
, int len
);
57 // Returns true if we can read |len| bytes from |offset|, given that the
58 // actual file has |eof| bytes stored. Note that the number of bytes to read
59 // may be modified by this method even though it returns false: that means we
60 // should do a smaller read from disk.
61 bool PreRead(int eof
, int offset
, int* len
);
63 // Read |len| bytes from |buf| at the given |offset|.
64 int Read(int offset
, IOBuffer
* buf
, int len
);
66 // Prepare this buffer for reuse.
69 char* Data() { return buffer_
.size() ? &buffer_
[0] : NULL
; }
70 int Size() { return static_cast<int>(buffer_
.size()); }
71 int Start() { return offset_
; }
72 int End() { return offset_
+ Size(); }
75 int capacity() { return static_cast<int>(buffer_
.capacity()); }
76 bool GrowBuffer(int required
, int limit
);
78 base::WeakPtr
<BackendImpl
> backend_
;
80 std::vector
<char> buffer_
;
82 DISALLOW_COPY_AND_ASSIGN(UserBuffer
);
85 bool EntryImpl::UserBuffer::PreWrite(int offset
, int len
) {
88 DCHECK_GE(offset
+ len
, 0);
90 // We don't want to write before our current start.
94 // Lets get the common case out of the way.
95 if (offset
+ len
<= capacity())
98 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
99 // buffer offset_ at 0.
100 if (!Size() && offset
> kMaxBlockSize
)
101 return GrowBuffer(len
, kMaxBufferSize
);
103 int required
= offset
- offset_
+ len
;
104 return GrowBuffer(required
, kMaxBufferSize
* 6 / 5);
107 void EntryImpl::UserBuffer::Truncate(int offset
) {
108 DCHECK_GE(offset
, 0);
109 DCHECK_GE(offset
, offset_
);
110 DVLOG(3) << "Buffer truncate at " << offset
<< " current " << offset_
;
113 if (Size() >= offset
)
114 buffer_
.resize(offset
);
117 void EntryImpl::UserBuffer::Write(int offset
, IOBuffer
* buf
, int len
) {
118 DCHECK_GE(offset
, 0);
120 DCHECK_GE(offset
+ len
, 0);
121 DCHECK_GE(offset
, offset_
);
122 DVLOG(3) << "Buffer write at " << offset
<< " current " << offset_
;
124 if (!Size() && offset
> kMaxBlockSize
)
130 buffer_
.resize(offset
);
135 char* buffer
= buf
->data();
136 int valid_len
= Size() - offset
;
137 int copy_len
= std::min(valid_len
, len
);
139 memcpy(&buffer_
[offset
], buffer
, copy_len
);
146 buffer_
.insert(buffer_
.end(), buffer
, buffer
+ len
);
149 bool EntryImpl::UserBuffer::PreRead(int eof
, int offset
, int* len
) {
150 DCHECK_GE(offset
, 0);
153 if (offset
< offset_
) {
154 // We are reading before this buffer.
158 // If the read overlaps with the buffer, change its length so that there is
160 *len
= std::min(*len
, offset_
- offset
);
161 *len
= std::min(*len
, eof
- offset
);
163 // We should read from disk.
170 // See if we can fulfill the first part of the operation.
171 return (offset
- offset_
< Size());
174 int EntryImpl::UserBuffer::Read(int offset
, IOBuffer
* buf
, int len
) {
175 DCHECK_GE(offset
, 0);
177 DCHECK(Size() || offset
< offset_
);
180 if (offset
< offset_
) {
181 // We don't have a file so lets fill the first part with 0.
182 clean_bytes
= std::min(offset_
- offset
, len
);
183 memset(buf
->data(), 0, clean_bytes
);
184 if (len
== clean_bytes
)
190 int start
= offset
- offset_
;
191 int available
= Size() - start
;
193 DCHECK_GE(available
, 0);
194 len
= std::min(len
, available
);
195 memcpy(buf
->data() + clean_bytes
, &buffer_
[start
], len
);
196 return len
+ clean_bytes
;
199 void EntryImpl::UserBuffer::Reset() {
200 if (!grow_allowed_
) {
202 backend_
->BufferDeleted(capacity() - kMaxBlockSize
);
203 grow_allowed_
= true;
204 std::vector
<char> tmp
;
206 buffer_
.reserve(kMaxBlockSize
);
212 bool EntryImpl::UserBuffer::GrowBuffer(int required
, int limit
) {
213 DCHECK_GE(required
, 0);
214 int current_size
= capacity();
215 if (required
<= current_size
)
218 if (required
> limit
)
224 int to_add
= std::max(required
- current_size
, kMaxBlockSize
* 4);
225 to_add
= std::max(current_size
, to_add
);
226 required
= std::min(current_size
+ to_add
, limit
);
228 grow_allowed_
= backend_
->IsAllocAllowed(current_size
, required
);
232 DVLOG(3) << "Buffer grow to " << required
;
234 buffer_
.reserve(required
);
238 // ------------------------------------------------------------------------
240 EntryImpl::EntryImpl(BackendImpl
* backend
, Addr address
, bool read_only
)
241 : entry_(NULL
, Addr(0)), node_(NULL
, Addr(0)),
242 backend_(backend
->GetWeakPtr()), doomed_(false), read_only_(read_only
),
244 entry_
.LazyInit(backend
->File(address
), address
);
245 for (int i
= 0; i
< kNumStreams
; i
++) {
246 unreported_size_
[i
] = 0;
250 bool EntryImpl::CreateEntry(Addr node_address
, const std::string
& key
,
252 Trace("Create entry In");
253 EntryStore
* entry_store
= entry_
.Data();
254 RankingsNode
* node
= node_
.Data();
255 memset(entry_store
, 0, sizeof(EntryStore
) * entry_
.address().num_blocks());
256 memset(node
, 0, sizeof(RankingsNode
));
257 if (!node_
.LazyInit(backend_
->File(node_address
), node_address
))
260 entry_store
->rankings_node
= node_address
.value();
261 node
->contents
= entry_
.address().value();
263 entry_store
->hash
= hash
;
264 entry_store
->creation_time
= Time::Now().ToInternalValue();
265 entry_store
->key_len
= static_cast<int32
>(key
.size());
266 if (entry_store
->key_len
> kMaxInternalKeyLength
) {
268 if (!CreateBlock(entry_store
->key_len
+ 1, &address
))
271 entry_store
->long_key
= address
.value();
272 File
* key_file
= GetBackingFile(address
, kKeyFileIndex
);
276 if (address
.is_block_file())
277 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
279 if (!key_file
|| !key_file
->Write(key
.data(), key
.size(), offset
)) {
280 DeleteData(address
, kKeyFileIndex
);
284 if (address
.is_separate_file())
285 key_file
->SetLength(key
.size() + 1);
287 memcpy(entry_store
->key
, key
.data(), key
.size());
288 entry_store
->key
[key
.size()] = '\0';
290 backend_
->ModifyStorageSize(0, static_cast<int32
>(key
.size()));
291 CACHE_UMA(COUNTS
, "KeySize", 0, static_cast<int32
>(key
.size()));
292 node
->dirty
= backend_
->GetCurrentEntryId();
293 Log("Create Entry ");
297 uint32
EntryImpl::GetHash() {
298 return entry_
.Data()->hash
;
301 bool EntryImpl::IsSameEntry(const std::string
& key
, uint32 hash
) {
302 if (entry_
.Data()->hash
!= hash
||
303 static_cast<size_t>(entry_
.Data()->key_len
) != key
.size())
306 return (key
.compare(GetKey()) == 0);
309 void EntryImpl::InternalDoom() {
310 net_log_
.AddEvent(net::NetLog::TYPE_ENTRY_DOOM
);
311 DCHECK(node_
.HasData());
312 if (!node_
.Data()->dirty
) {
313 node_
.Data()->dirty
= backend_
->GetCurrentEntryId();
319 // This only includes checks that relate to the first block of the entry (the
320 // first 256 bytes), and values that should be set from the entry creation.
321 // Basically, even if there is something wrong with this entry, we want to see
322 // if it is possible to load the rankings node and delete them together.
323 bool EntryImpl::SanityCheck() {
324 if (!entry_
.VerifyHash())
327 EntryStore
* stored
= entry_
.Data();
328 if (!stored
->rankings_node
|| stored
->key_len
<= 0)
331 if (stored
->reuse_count
< 0 || stored
->refetch_count
< 0)
334 Addr
rankings_addr(stored
->rankings_node
);
335 if (!rankings_addr
.SanityCheckForRankings())
338 Addr
next_addr(stored
->next
);
339 if (next_addr
.is_initialized() && !next_addr
.SanityCheckForEntry()) {
343 STRESS_DCHECK(next_addr
.value() != entry_
.address().value());
345 if (stored
->state
> ENTRY_DOOMED
|| stored
->state
< ENTRY_NORMAL
)
348 Addr
key_addr(stored
->long_key
);
349 if ((stored
->key_len
<= kMaxInternalKeyLength
&& key_addr
.is_initialized()) ||
350 (stored
->key_len
> kMaxInternalKeyLength
&& !key_addr
.is_initialized()))
353 if (!key_addr
.SanityCheck())
356 if (key_addr
.is_initialized() &&
357 ((stored
->key_len
< kMaxBlockSize
&& key_addr
.is_separate_file()) ||
358 (stored
->key_len
>= kMaxBlockSize
&& key_addr
.is_block_file())))
361 int num_blocks
= NumBlocksForEntry(stored
->key_len
);
362 if (entry_
.address().num_blocks() != num_blocks
)
368 bool EntryImpl::DataSanityCheck() {
369 EntryStore
* stored
= entry_
.Data();
370 Addr
key_addr(stored
->long_key
);
372 // The key must be NULL terminated.
373 if (!key_addr
.is_initialized() && stored
->key
[stored
->key_len
])
376 if (stored
->hash
!= base::Hash(GetKey()))
379 for (int i
= 0; i
< kNumStreams
; i
++) {
380 Addr
data_addr(stored
->data_addr
[i
]);
381 int data_size
= stored
->data_size
[i
];
384 if (!data_size
&& data_addr
.is_initialized())
386 if (!data_addr
.SanityCheck())
390 if (data_size
<= kMaxBlockSize
&& data_addr
.is_separate_file())
392 if (data_size
> kMaxBlockSize
&& data_addr
.is_block_file())
398 void EntryImpl::FixForDelete() {
399 EntryStore
* stored
= entry_
.Data();
400 Addr
key_addr(stored
->long_key
);
402 if (!key_addr
.is_initialized())
403 stored
->key
[stored
->key_len
] = '\0';
405 for (int i
= 0; i
< kNumStreams
; i
++) {
406 Addr
data_addr(stored
->data_addr
[i
]);
407 int data_size
= stored
->data_size
[i
];
408 if (data_addr
.is_initialized()) {
409 if ((data_size
<= kMaxBlockSize
&& data_addr
.is_separate_file()) ||
410 (data_size
> kMaxBlockSize
&& data_addr
.is_block_file()) ||
411 !data_addr
.SanityCheck()) {
413 // The address is weird so don't attempt to delete it.
414 stored
->data_addr
[i
] = 0;
415 // In general, trust the stored size as it should be in sync with the
416 // total size tracked by the backend.
420 stored
->data_size
[i
] = 0;
425 void EntryImpl::SetTimes(base::Time last_used
, base::Time last_modified
) {
426 node_
.Data()->last_used
= last_used
.ToInternalValue();
427 node_
.Data()->last_modified
= last_modified
.ToInternalValue();
428 node_
.set_modified();
431 void EntryImpl::BeginLogging(net::NetLog
* net_log
, bool created
) {
432 DCHECK(!net_log_
.net_log());
433 net_log_
= net::BoundNetLog::Make(
434 net_log
, net::NetLog::SOURCE_DISK_CACHE_ENTRY
);
436 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL
,
437 CreateNetLogEntryCreationCallback(this, created
));
440 const net::BoundNetLog
& EntryImpl::net_log() const {
444 // ------------------------------------------------------------------------
446 void EntryImpl::Doom() {
447 if (background_queue_
)
448 background_queue_
->DoomEntryImpl(this);
451 void EntryImpl::DoomImpl() {
452 if (doomed_
|| !backend_
)
455 SetPointerForInvalidEntry(backend_
->GetCurrentEntryId());
456 backend_
->InternalDoomEntry(this);
459 void EntryImpl::Close() {
460 if (background_queue_
)
461 background_queue_
->CloseEntryImpl(this);
464 std::string
EntryImpl::GetKey() const {
465 CacheEntryBlock
* entry
= const_cast<CacheEntryBlock
*>(&entry_
);
466 int key_len
= entry
->Data()->key_len
;
467 if (key_len
<= kMaxInternalKeyLength
)
468 return std::string(entry
->Data()->key
);
470 // We keep a copy of the key so that we can always return it, even if the
471 // backend is disabled.
475 Addr
address(entry
->Data()->long_key
);
476 DCHECK(address
.is_initialized());
478 if (address
.is_block_file())
479 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
481 COMPILE_ASSERT(kNumStreams
== kKeyFileIndex
, invalid_key_index
);
482 File
* key_file
= const_cast<EntryImpl
*>(this)->GetBackingFile(address
,
485 return std::string();
487 ++key_len
; // We store a trailing \0 on disk that we read back below.
488 if (!offset
&& key_file
->GetLength() != static_cast<size_t>(key_len
))
489 return std::string();
491 if (!key_file
->Read(WriteInto(&key_
, key_len
), key_len
, offset
))
496 Time
EntryImpl::GetLastUsed() const {
497 CacheRankingsBlock
* node
= const_cast<CacheRankingsBlock
*>(&node_
);
498 return Time::FromInternalValue(node
->Data()->last_used
);
501 Time
EntryImpl::GetLastModified() const {
502 CacheRankingsBlock
* node
= const_cast<CacheRankingsBlock
*>(&node_
);
503 return Time::FromInternalValue(node
->Data()->last_modified
);
506 int32
EntryImpl::GetDataSize(int index
) const {
507 if (index
< 0 || index
>= kNumStreams
)
510 CacheEntryBlock
* entry
= const_cast<CacheEntryBlock
*>(&entry_
);
511 return entry
->Data()->data_size
[index
];
514 int EntryImpl::ReadData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
515 const CompletionCallback
& callback
) {
516 if (callback
.is_null())
517 return ReadDataImpl(index
, offset
, buf
, buf_len
, callback
);
519 DCHECK(node_
.Data()->dirty
|| read_only_
);
520 if (index
< 0 || index
>= kNumStreams
)
521 return net::ERR_INVALID_ARGUMENT
;
523 int entry_size
= entry_
.Data()->data_size
[index
];
524 if (offset
>= entry_size
|| offset
< 0 || !buf_len
)
528 return net::ERR_INVALID_ARGUMENT
;
530 if (!background_queue_
)
531 return net::ERR_UNEXPECTED
;
533 background_queue_
->ReadData(this, index
, offset
, buf
, buf_len
, callback
);
534 return net::ERR_IO_PENDING
;
537 int EntryImpl::ReadDataImpl(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
538 const CompletionCallback
& callback
) {
539 if (net_log_
.IsLoggingAllEvents()) {
541 net::NetLog::TYPE_ENTRY_READ_DATA
,
542 CreateNetLogReadWriteDataCallback(index
, offset
, buf_len
, false));
545 int result
= InternalReadData(index
, offset
, buf
, buf_len
, callback
);
547 if (result
!= net::ERR_IO_PENDING
&& net_log_
.IsLoggingAllEvents()) {
549 net::NetLog::TYPE_ENTRY_READ_DATA
,
550 CreateNetLogReadWriteCompleteCallback(result
));
555 int EntryImpl::WriteData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
556 const CompletionCallback
& callback
, bool truncate
) {
557 if (callback
.is_null())
558 return WriteDataImpl(index
, offset
, buf
, buf_len
, callback
, truncate
);
560 DCHECK(node_
.Data()->dirty
|| read_only_
);
561 if (index
< 0 || index
>= kNumStreams
)
562 return net::ERR_INVALID_ARGUMENT
;
564 if (offset
< 0 || buf_len
< 0)
565 return net::ERR_INVALID_ARGUMENT
;
567 if (!background_queue_
)
568 return net::ERR_UNEXPECTED
;
570 background_queue_
->WriteData(this, index
, offset
, buf
, buf_len
, truncate
,
572 return net::ERR_IO_PENDING
;
575 int EntryImpl::WriteDataImpl(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
576 const CompletionCallback
& callback
,
578 if (net_log_
.IsLoggingAllEvents()) {
580 net::NetLog::TYPE_ENTRY_WRITE_DATA
,
581 CreateNetLogReadWriteDataCallback(index
, offset
, buf_len
, truncate
));
584 int result
= InternalWriteData(index
, offset
, buf
, buf_len
, callback
,
587 if (result
!= net::ERR_IO_PENDING
&& net_log_
.IsLoggingAllEvents()) {
589 net::NetLog::TYPE_ENTRY_WRITE_DATA
,
590 CreateNetLogReadWriteCompleteCallback(result
));
595 int EntryImpl::ReadSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
596 const CompletionCallback
& callback
) {
597 if (callback
.is_null())
598 return ReadSparseDataImpl(offset
, buf
, buf_len
, callback
);
600 if (!background_queue_
)
601 return net::ERR_UNEXPECTED
;
603 background_queue_
->ReadSparseData(this, offset
, buf
, buf_len
, callback
);
604 return net::ERR_IO_PENDING
;
607 int EntryImpl::ReadSparseDataImpl(int64 offset
, IOBuffer
* buf
, int buf_len
,
608 const CompletionCallback
& callback
) {
609 DCHECK(node_
.Data()->dirty
|| read_only_
);
610 int result
= InitSparseData();
611 if (net::OK
!= result
)
614 TimeTicks start
= TimeTicks::Now();
615 result
= sparse_
->StartIO(SparseControl::kReadOperation
, offset
, buf
, buf_len
,
617 ReportIOTime(kSparseRead
, start
);
621 int EntryImpl::WriteSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
622 const CompletionCallback
& callback
) {
623 if (callback
.is_null())
624 return WriteSparseDataImpl(offset
, buf
, buf_len
, callback
);
626 if (!background_queue_
)
627 return net::ERR_UNEXPECTED
;
629 background_queue_
->WriteSparseData(this, offset
, buf
, buf_len
, callback
);
630 return net::ERR_IO_PENDING
;
633 int EntryImpl::WriteSparseDataImpl(int64 offset
, IOBuffer
* buf
, int buf_len
,
634 const CompletionCallback
& callback
) {
635 DCHECK(node_
.Data()->dirty
|| read_only_
);
636 int result
= InitSparseData();
637 if (net::OK
!= result
)
640 TimeTicks start
= TimeTicks::Now();
641 result
= sparse_
->StartIO(SparseControl::kWriteOperation
, offset
, buf
,
643 ReportIOTime(kSparseWrite
, start
);
647 int EntryImpl::GetAvailableRange(int64 offset
, int len
, int64
* start
,
648 const CompletionCallback
& callback
) {
649 if (!background_queue_
)
650 return net::ERR_UNEXPECTED
;
652 background_queue_
->GetAvailableRange(this, offset
, len
, start
, callback
);
653 return net::ERR_IO_PENDING
;
656 int EntryImpl::GetAvailableRangeImpl(int64 offset
, int len
, int64
* start
) {
657 int result
= InitSparseData();
658 if (net::OK
!= result
)
661 return sparse_
->GetAvailableRange(offset
, len
, start
);
664 bool EntryImpl::CouldBeSparse() const {
668 scoped_ptr
<SparseControl
> sparse
;
669 sparse
.reset(new SparseControl(const_cast<EntryImpl
*>(this)));
670 return sparse
->CouldBeSparse();
673 void EntryImpl::CancelSparseIO() {
674 if (background_queue_
)
675 background_queue_
->CancelSparseIO(this);
678 void EntryImpl::CancelSparseIOImpl() {
685 int EntryImpl::ReadyForSparseIO(const CompletionCallback
& callback
) {
689 if (!background_queue_
)
690 return net::ERR_UNEXPECTED
;
692 background_queue_
->ReadyForSparseIO(this, callback
);
693 return net::ERR_IO_PENDING
;
696 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback
& callback
) {
697 DCHECK(sparse_
.get());
698 return sparse_
->ReadyToUse(callback
);
701 // ------------------------------------------------------------------------
703 // When an entry is deleted from the cache, we clean up all the data associated
704 // with it for two reasons: to simplify the reuse of the block (we know that any
705 // unused block is filled with zeros), and to simplify the handling of write /
706 // read partial information from an entry (don't have to worry about returning
707 // data related to a previous cache entry because the range was not fully
709 EntryImpl::~EntryImpl() {
711 entry_
.clear_modified();
712 node_
.clear_modified();
715 Log("~EntryImpl in");
717 // Save the sparse info to disk. This will generate IO for this entry and
718 // maybe for a child entry, so it is important to do it before deleting this
722 // Remove this entry from the list of open entries.
723 backend_
->OnEntryDestroyBegin(entry_
.address());
726 DeleteEntryData(true);
728 #if defined(NET_BUILD_STRESS_CACHE)
731 net_log_
.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE
);
733 for (int index
= 0; index
< kNumStreams
; index
++) {
734 if (user_buffers_
[index
].get()) {
735 if (!(ret
= Flush(index
, 0)))
736 LOG(ERROR
) << "Failed to save user data";
738 if (unreported_size_
[index
]) {
739 backend_
->ModifyStorageSize(
740 entry_
.Data()->data_size
[index
] - unreported_size_
[index
],
741 entry_
.Data()->data_size
[index
]);
746 // There was a failure writing the actual data. Mark the entry as dirty.
747 int current_id
= backend_
->GetCurrentEntryId();
748 node_
.Data()->dirty
= current_id
== 1 ? -1 : current_id
- 1;
750 } else if (node_
.HasData() && !dirty_
&& node_
.Data()->dirty
) {
751 node_
.Data()->dirty
= 0;
756 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
757 net_log_
.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL
);
758 backend_
->OnEntryDestroyEnd();
761 int EntryImpl::InternalReadData(int index
, int offset
,
762 IOBuffer
* buf
, int buf_len
,
763 const CompletionCallback
& callback
) {
764 DCHECK(node_
.Data()->dirty
|| read_only_
);
765 DVLOG(2) << "Read from " << index
<< " at " << offset
<< " : " << buf_len
;
766 if (index
< 0 || index
>= kNumStreams
)
767 return net::ERR_INVALID_ARGUMENT
;
769 int entry_size
= entry_
.Data()->data_size
[index
];
770 if (offset
>= entry_size
|| offset
< 0 || !buf_len
)
774 return net::ERR_INVALID_ARGUMENT
;
777 return net::ERR_UNEXPECTED
;
779 TimeTicks start
= TimeTicks::Now();
781 if (offset
+ buf_len
> entry_size
)
782 buf_len
= entry_size
- offset
;
786 backend_
->OnEvent(Stats::READ_DATA
);
787 backend_
->OnRead(buf_len
);
789 Addr
address(entry_
.Data()->data_addr
[index
]);
790 int eof
= address
.is_initialized() ? entry_size
: 0;
791 if (user_buffers_
[index
].get() &&
792 user_buffers_
[index
]->PreRead(eof
, offset
, &buf_len
)) {
793 // Complete the operation locally.
794 buf_len
= user_buffers_
[index
]->Read(offset
, buf
, buf_len
);
795 ReportIOTime(kRead
, start
);
799 address
.set_value(entry_
.Data()->data_addr
[index
]);
800 DCHECK(address
.is_initialized());
801 if (!address
.is_initialized()) {
803 return net::ERR_FAILED
;
806 File
* file
= GetBackingFile(address
, index
);
809 LOG(ERROR
) << "No file for " << std::hex
<< address
.value();
810 return net::ERR_FILE_NOT_FOUND
;
813 size_t file_offset
= offset
;
814 if (address
.is_block_file()) {
815 DCHECK_LE(offset
+ buf_len
, kMaxBlockSize
);
816 file_offset
+= address
.start_block() * address
.BlockSize() +
820 SyncCallback
* io_callback
= NULL
;
821 if (!callback
.is_null()) {
822 io_callback
= new SyncCallback(this, buf
, callback
,
823 net::NetLog::TYPE_ENTRY_READ_DATA
);
826 TimeTicks start_async
= TimeTicks::Now();
829 if (!file
->Read(buf
->data(), buf_len
, file_offset
, io_callback
, &completed
)) {
831 io_callback
->Discard();
833 return net::ERR_CACHE_READ_FAILURE
;
836 if (io_callback
&& completed
)
837 io_callback
->Discard();
840 ReportIOTime(kReadAsync1
, start_async
);
842 ReportIOTime(kRead
, start
);
843 return (completed
|| callback
.is_null()) ? buf_len
: net::ERR_IO_PENDING
;
846 int EntryImpl::InternalWriteData(int index
, int offset
,
847 IOBuffer
* buf
, int buf_len
,
848 const CompletionCallback
& callback
,
850 DCHECK(node_
.Data()->dirty
|| read_only_
);
851 DVLOG(2) << "Write to " << index
<< " at " << offset
<< " : " << buf_len
;
852 if (index
< 0 || index
>= kNumStreams
)
853 return net::ERR_INVALID_ARGUMENT
;
855 if (offset
< 0 || buf_len
< 0)
856 return net::ERR_INVALID_ARGUMENT
;
859 return net::ERR_UNEXPECTED
;
861 int max_file_size
= backend_
->MaxFileSize();
863 // offset or buf_len could be negative numbers.
864 if (offset
> max_file_size
|| buf_len
> max_file_size
||
865 offset
+ buf_len
> max_file_size
) {
866 int size
= offset
+ buf_len
;
867 if (size
<= max_file_size
)
869 backend_
->TooMuchStorageRequested(size
);
870 return net::ERR_FAILED
;
873 TimeTicks start
= TimeTicks::Now();
875 // Read the size at this point (it may change inside prepare).
876 int entry_size
= entry_
.Data()->data_size
[index
];
877 bool extending
= entry_size
< offset
+ buf_len
;
878 truncate
= truncate
&& entry_size
> offset
+ buf_len
;
879 Trace("To PrepareTarget 0x%x", entry_
.address().value());
880 if (!PrepareTarget(index
, offset
, buf_len
, truncate
))
881 return net::ERR_FAILED
;
883 Trace("From PrepareTarget 0x%x", entry_
.address().value());
884 if (extending
|| truncate
)
885 UpdateSize(index
, entry_size
, offset
+ buf_len
);
889 backend_
->OnEvent(Stats::WRITE_DATA
);
890 backend_
->OnWrite(buf_len
);
892 if (user_buffers_
[index
].get()) {
893 // Complete the operation locally.
894 user_buffers_
[index
]->Write(offset
, buf
, buf_len
);
895 ReportIOTime(kWrite
, start
);
899 Addr
address(entry_
.Data()->data_addr
[index
]);
900 if (offset
+ buf_len
== 0) {
902 DCHECK(!address
.is_initialized());
907 File
* file
= GetBackingFile(address
, index
);
909 return net::ERR_FILE_NOT_FOUND
;
911 size_t file_offset
= offset
;
912 if (address
.is_block_file()) {
913 DCHECK_LE(offset
+ buf_len
, kMaxBlockSize
);
914 file_offset
+= address
.start_block() * address
.BlockSize() +
916 } else if (truncate
|| (extending
&& !buf_len
)) {
917 if (!file
->SetLength(offset
+ buf_len
))
918 return net::ERR_FAILED
;
924 SyncCallback
* io_callback
= NULL
;
925 if (!callback
.is_null()) {
926 io_callback
= new SyncCallback(this, buf
, callback
,
927 net::NetLog::TYPE_ENTRY_WRITE_DATA
);
930 TimeTicks start_async
= TimeTicks::Now();
933 if (!file
->Write(buf
->data(), buf_len
, file_offset
, io_callback
,
936 io_callback
->Discard();
937 return net::ERR_CACHE_WRITE_FAILURE
;
940 if (io_callback
&& completed
)
941 io_callback
->Discard();
944 ReportIOTime(kWriteAsync1
, start_async
);
946 ReportIOTime(kWrite
, start
);
947 return (completed
|| callback
.is_null()) ? buf_len
: net::ERR_IO_PENDING
;
950 // ------------------------------------------------------------------------
952 bool EntryImpl::CreateDataBlock(int index
, int size
) {
953 DCHECK(index
>= 0 && index
< kNumStreams
);
955 Addr
address(entry_
.Data()->data_addr
[index
]);
956 if (!CreateBlock(size
, &address
))
959 entry_
.Data()->data_addr
[index
] = address
.value();
964 bool EntryImpl::CreateBlock(int size
, Addr
* address
) {
965 DCHECK(!address
->is_initialized());
969 FileType file_type
= Addr::RequiredFileType(size
);
970 if (EXTERNAL
== file_type
) {
971 if (size
> backend_
->MaxFileSize())
973 if (!backend_
->CreateExternalFile(address
))
976 int num_blocks
= Addr::RequiredBlocks(size
, file_type
);
978 if (!backend_
->CreateBlock(file_type
, num_blocks
, address
))
984 // Note that this method may end up modifying a block file so upon return the
985 // involved block will be free, and could be reused for something else. If there
986 // is a crash after that point (and maybe before returning to the caller), the
987 // entry will be left dirty... and at some point it will be discarded; it is
988 // important that the entry doesn't keep a reference to this address, or we'll
989 // end up deleting the contents of |address| once again.
990 void EntryImpl::DeleteData(Addr address
, int index
) {
992 if (!address
.is_initialized())
994 if (address
.is_separate_file()) {
995 int failure
= !DeleteCacheFile(backend_
->GetFileName(address
));
996 CACHE_UMA(COUNTS
, "DeleteFailed", 0, failure
);
998 LOG(ERROR
) << "Failed to delete " <<
999 backend_
->GetFileName(address
).value() << " from the cache.";
1002 files_
[index
] = NULL
; // Releases the object.
1004 backend_
->DeleteBlock(address
, true);
1008 void EntryImpl::UpdateRank(bool modified
) {
1013 // Everything is handled by the backend.
1014 backend_
->UpdateRank(this, modified
);
1018 Time current
= Time::Now();
1019 node_
.Data()->last_used
= current
.ToInternalValue();
1022 node_
.Data()->last_modified
= current
.ToInternalValue();
1025 void EntryImpl::DeleteEntryData(bool everything
) {
1026 DCHECK(doomed_
|| !everything
);
1028 if (GetEntryFlags() & PARENT_ENTRY
) {
1029 // We have some child entries that must go away.
1030 SparseControl::DeleteChildren(this);
1034 CACHE_UMA(COUNTS
, "DeleteHeader", 0, GetDataSize(0));
1036 CACHE_UMA(COUNTS
, "DeleteData", 0, GetDataSize(1));
1037 for (int index
= 0; index
< kNumStreams
; index
++) {
1038 Addr
address(entry_
.Data()->data_addr
[index
]);
1039 if (address
.is_initialized()) {
1040 backend_
->ModifyStorageSize(entry_
.Data()->data_size
[index
] -
1041 unreported_size_
[index
], 0);
1042 entry_
.Data()->data_addr
[index
] = 0;
1043 entry_
.Data()->data_size
[index
] = 0;
1045 DeleteData(address
, index
);
1052 // Remove all traces of this entry.
1053 backend_
->RemoveEntry(this);
1055 // Note that at this point node_ and entry_ are just two blocks of data, and
1056 // even if they reference each other, nobody should be referencing them.
1058 Addr
address(entry_
.Data()->long_key
);
1059 DeleteData(address
, kKeyFileIndex
);
1060 backend_
->ModifyStorageSize(entry_
.Data()->key_len
, 0);
1062 backend_
->DeleteBlock(entry_
.address(), true);
1065 if (!LeaveRankingsBehind()) {
1066 backend_
->DeleteBlock(node_
.address(), true);
1071 // We keep a memory buffer for everything that ends up stored on a block file
1072 // (because we don't know yet the final data size), and for some of the data
1073 // that end up on external files. This function will initialize that memory
1074 // buffer and / or the files needed to store the data.
1076 // In general, a buffer may overlap data already stored on disk, and in that
1077 // case, the contents of the buffer are the most accurate. It may also extend
1078 // the file, but we don't want to read from disk just to keep the buffer up to
1079 // date. This means that as soon as there is a chance to get confused about what
1080 // is the most recent version of some part of a file, we'll flush the buffer and
1081 // reuse it for the new data. Keep in mind that the normal use pattern is quite
1082 // simple (write sequentially from the beginning), so we optimize for handling
1084 bool EntryImpl::PrepareTarget(int index
, int offset
, int buf_len
,
1087 return HandleTruncation(index
, offset
, buf_len
);
1089 if (!offset
&& !buf_len
)
1092 Addr
address(entry_
.Data()->data_addr
[index
]);
1093 if (address
.is_initialized()) {
1094 if (address
.is_block_file() && !MoveToLocalBuffer(index
))
1097 if (!user_buffers_
[index
].get() && offset
< kMaxBlockSize
) {
1098 // We are about to create a buffer for the first 16KB, make sure that we
1099 // preserve existing data.
1100 if (!CopyToLocalBuffer(index
))
1105 if (!user_buffers_
[index
].get())
1106 user_buffers_
[index
].reset(new UserBuffer(backend_
.get()));
1108 return PrepareBuffer(index
, offset
, buf_len
);
1111 // We get to this function with some data already stored. If there is a
1112 // truncation that results on data stored internally, we'll explicitly
1113 // handle the case here.
1114 bool EntryImpl::HandleTruncation(int index
, int offset
, int buf_len
) {
1115 Addr
address(entry_
.Data()->data_addr
[index
]);
1117 int current_size
= entry_
.Data()->data_size
[index
];
1118 int new_size
= offset
+ buf_len
;
1121 // This is by far the most common scenario.
1122 backend_
->ModifyStorageSize(current_size
- unreported_size_
[index
], 0);
1123 entry_
.Data()->data_addr
[index
] = 0;
1124 entry_
.Data()->data_size
[index
] = 0;
1125 unreported_size_
[index
] = 0;
1127 DeleteData(address
, index
);
1129 user_buffers_
[index
].reset();
1133 // We never postpone truncating a file, if there is one, but we may postpone
1134 // telling the backend about the size reduction.
1135 if (user_buffers_
[index
].get()) {
1136 DCHECK_GE(current_size
, user_buffers_
[index
]->Start());
1137 if (!address
.is_initialized()) {
1138 // There is no overlap between the buffer and disk.
1139 if (new_size
> user_buffers_
[index
]->Start()) {
1140 // Just truncate our buffer.
1141 DCHECK_LT(new_size
, user_buffers_
[index
]->End());
1142 user_buffers_
[index
]->Truncate(new_size
);
1146 // Just discard our buffer.
1147 user_buffers_
[index
]->Reset();
1148 return PrepareBuffer(index
, offset
, buf_len
);
1151 // There is some overlap or we need to extend the file before the
1153 if (offset
> user_buffers_
[index
]->Start())
1154 user_buffers_
[index
]->Truncate(new_size
);
1155 UpdateSize(index
, current_size
, new_size
);
1156 if (!Flush(index
, 0))
1158 user_buffers_
[index
].reset();
1161 // We have data somewhere, and it is not in a buffer.
1162 DCHECK(!user_buffers_
[index
].get());
1163 DCHECK(address
.is_initialized());
1165 if (new_size
> kMaxBlockSize
)
1166 return true; // Let the operation go directly to disk.
1168 return ImportSeparateFile(index
, offset
+ buf_len
);
1171 bool EntryImpl::CopyToLocalBuffer(int index
) {
1172 Addr
address(entry_
.Data()->data_addr
[index
]);
1173 DCHECK(!user_buffers_
[index
].get());
1174 DCHECK(address
.is_initialized());
1176 int len
= std::min(entry_
.Data()->data_size
[index
], kMaxBlockSize
);
1177 user_buffers_
[index
].reset(new UserBuffer(backend_
.get()));
1178 user_buffers_
[index
]->Write(len
, NULL
, 0);
1180 File
* file
= GetBackingFile(address
, index
);
1183 if (address
.is_block_file())
1184 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
1187 !file
->Read(user_buffers_
[index
]->Data(), len
, offset
, NULL
, NULL
)) {
1188 user_buffers_
[index
].reset();
1194 bool EntryImpl::MoveToLocalBuffer(int index
) {
1195 if (!CopyToLocalBuffer(index
))
1198 Addr
address(entry_
.Data()->data_addr
[index
]);
1199 entry_
.Data()->data_addr
[index
] = 0;
1201 DeleteData(address
, index
);
1203 // If we lose this entry we'll see it as zero sized.
1204 int len
= entry_
.Data()->data_size
[index
];
1205 backend_
->ModifyStorageSize(len
- unreported_size_
[index
], 0);
1206 unreported_size_
[index
] = len
;
1210 bool EntryImpl::ImportSeparateFile(int index
, int new_size
) {
1211 if (entry_
.Data()->data_size
[index
] > new_size
)
1212 UpdateSize(index
, entry_
.Data()->data_size
[index
], new_size
);
1214 return MoveToLocalBuffer(index
);
1217 bool EntryImpl::PrepareBuffer(int index
, int offset
, int buf_len
) {
1218 DCHECK(user_buffers_
[index
].get());
1219 if ((user_buffers_
[index
]->End() && offset
> user_buffers_
[index
]->End()) ||
1220 offset
> entry_
.Data()->data_size
[index
]) {
1221 // We are about to extend the buffer or the file (with zeros), so make sure
1222 // that we are not overwriting anything.
1223 Addr
address(entry_
.Data()->data_addr
[index
]);
1224 if (address
.is_initialized() && address
.is_separate_file()) {
1225 if (!Flush(index
, 0))
1227 // There is an actual file already, and we don't want to keep track of
1228 // its length so we let this operation go straight to disk.
1229 // The only case when a buffer is allowed to extend the file (as in fill
1230 // with zeros before the start) is when there is no file yet to extend.
1231 user_buffers_
[index
].reset();
1236 if (!user_buffers_
[index
]->PreWrite(offset
, buf_len
)) {
1237 if (!Flush(index
, offset
+ buf_len
))
1241 if (offset
> user_buffers_
[index
]->End() ||
1242 !user_buffers_
[index
]->PreWrite(offset
, buf_len
)) {
1243 // We cannot complete the operation with a buffer.
1244 DCHECK(!user_buffers_
[index
]->Size());
1245 DCHECK(!user_buffers_
[index
]->Start());
1246 user_buffers_
[index
].reset();
1252 bool EntryImpl::Flush(int index
, int min_len
) {
1253 Addr
address(entry_
.Data()->data_addr
[index
]);
1254 DCHECK(user_buffers_
[index
].get());
1255 DCHECK(!address
.is_initialized() || address
.is_separate_file());
1256 DVLOG(3) << "Flush";
1258 int size
= std::max(entry_
.Data()->data_size
[index
], min_len
);
1259 if (size
&& !address
.is_initialized() && !CreateDataBlock(index
, size
))
1262 if (!entry_
.Data()->data_size
[index
]) {
1263 DCHECK(!user_buffers_
[index
]->Size());
1267 address
.set_value(entry_
.Data()->data_addr
[index
]);
1269 int len
= user_buffers_
[index
]->Size();
1270 int offset
= user_buffers_
[index
]->Start();
1271 if (!len
&& !offset
)
1274 if (address
.is_block_file()) {
1275 DCHECK_EQ(len
, entry_
.Data()->data_size
[index
]);
1277 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
1280 File
* file
= GetBackingFile(address
, index
);
1284 if (!file
->Write(user_buffers_
[index
]->Data(), len
, offset
, NULL
, NULL
))
1286 user_buffers_
[index
]->Reset();
1291 void EntryImpl::UpdateSize(int index
, int old_size
, int new_size
) {
1292 if (entry_
.Data()->data_size
[index
] == new_size
)
1295 unreported_size_
[index
] += new_size
- old_size
;
1296 entry_
.Data()->data_size
[index
] = new_size
;
1297 entry_
.set_modified();
1300 int EntryImpl::InitSparseData() {
1304 // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1305 scoped_ptr
<SparseControl
> sparse(new SparseControl(this));
1306 int result
= sparse
->Init();
1307 if (net::OK
== result
)
1308 sparse_
.swap(sparse
);
1313 void EntryImpl::SetEntryFlags(uint32 flags
) {
1314 entry_
.Data()->flags
|= flags
;
1315 entry_
.set_modified();
1318 uint32
EntryImpl::GetEntryFlags() {
1319 return entry_
.Data()->flags
;
1322 void EntryImpl::GetData(int index
, char** buffer
, Addr
* address
) {
1324 if (user_buffers_
[index
].get() && user_buffers_
[index
]->Size() &&
1325 !user_buffers_
[index
]->Start()) {
1326 // The data is already in memory, just copy it and we're done.
1327 int data_len
= entry_
.Data()->data_size
[index
];
1328 if (data_len
<= user_buffers_
[index
]->Size()) {
1329 DCHECK(!user_buffers_
[index
]->Start());
1330 *buffer
= new char[data_len
];
1331 memcpy(*buffer
, user_buffers_
[index
]->Data(), data_len
);
1336 // Bad news: we'd have to read the info from disk so instead we'll just tell
1337 // the caller where to read from.
1339 address
->set_value(entry_
.Data()->data_addr
[index
]);
1340 if (address
->is_initialized()) {
1341 // Prevent us from deleting the block from the backing store.
1342 backend_
->ModifyStorageSize(entry_
.Data()->data_size
[index
] -
1343 unreported_size_
[index
], 0);
1344 entry_
.Data()->data_addr
[index
] = 0;
1345 entry_
.Data()->data_size
[index
] = 0;
1349 void EntryImpl::ReportIOTime(Operation op
, const base::TimeTicks
& start
) {
1355 CACHE_UMA(AGE_MS
, "ReadTime", 0, start
);
1358 CACHE_UMA(AGE_MS
, "WriteTime", 0, start
);
1361 CACHE_UMA(AGE_MS
, "SparseReadTime", 0, start
);
1364 CACHE_UMA(AGE_MS
, "SparseWriteTime", 0, start
);
1367 CACHE_UMA(AGE_MS
, "AsyncIOTime", 0, start
);
1370 CACHE_UMA(AGE_MS
, "AsyncReadDispatchTime", 0, start
);
1373 CACHE_UMA(AGE_MS
, "AsyncWriteDispatchTime", 0, start
);
1380 void EntryImpl::Log(const char* msg
) {
1382 if (node_
.HasData()) {
1383 dirty
= node_
.Data()->dirty
;
1386 Trace("%s 0x%p 0x%x 0x%x", msg
, reinterpret_cast<void*>(this),
1387 entry_
.address().value(), node_
.address().value());
1389 Trace(" data: 0x%x 0x%x 0x%x", entry_
.Data()->data_addr
[0],
1390 entry_
.Data()->data_addr
[1], entry_
.Data()->long_key
);
1392 Trace(" doomed: %d 0x%x", doomed_
, dirty
);
1395 } // namespace disk_cache