1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/entry_impl_v3.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/strings/string_util.h"
10 #include "net/base/io_buffer.h"
11 #include "net/base/net_errors.h"
12 #include "net/disk_cache/blockfile/backend_impl_v3.h"
13 #include "net/disk_cache/blockfile/bitmap.h"
14 #include "net/disk_cache/blockfile/disk_format_v3.h"
15 #include "net/disk_cache/blockfile/histogram_macros_v3.h"
16 #include "net/disk_cache/cache_util.h"
17 #include "net/disk_cache/net_log_parameters.h"
18 // #include "net/disk_cache/blockfile/sparse_control_v3.h"
20 // Provide a BackendImpl object to macros from histogram_macros.h.
21 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
24 using base::TimeDelta
;
25 using base::TimeTicks
;
29 const int kMaxBufferSize
= 1024 * 1024; // 1 MB.
33 namespace disk_cache
{
35 typedef StorageBlock
<EntryRecord
> CacheEntryBlockV3
;
36 typedef StorageBlock
<ShortEntryRecord
> CacheShortEntryBlock
;
38 // This class handles individual memory buffers that store data before it is
39 // sent to disk. The buffer can start at any offset, but if we try to write to
40 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
41 // zero. The buffer grows up to a size determined by the backend, to keep the
42 // total memory used under control.
43 class EntryImplV3::UserBuffer
{
45 explicit UserBuffer(BackendImplV3
* backend
)
46 : backend_(backend
->GetWeakPtr()), offset_(0), grow_allowed_(true) {
47 buffer_
.reserve(kMaxBlockSize
);
51 backend_
->BufferDeleted(capacity() - kMaxBlockSize
);
54 // Returns true if we can handle writing |len| bytes to |offset|.
55 bool PreWrite(int offset
, int len
);
57 // Truncates the buffer to |offset| bytes.
58 void Truncate(int offset
);
60 // Writes |len| bytes from |buf| at the given |offset|.
61 void Write(int offset
, IOBuffer
* buf
, int len
);
63 // Returns true if we can read |len| bytes from |offset|, given that the
64 // actual file has |eof| bytes stored. Note that the number of bytes to read
65 // may be modified by this method even though it returns false: that means we
66 // should do a smaller read from disk.
67 bool PreRead(int eof
, int offset
, int* len
);
69 // Read |len| bytes from |buf| at the given |offset|.
70 int Read(int offset
, IOBuffer
* buf
, int len
);
72 // Prepare this buffer for reuse.
75 char* Data() { return buffer_
.size() ? &buffer_
[0] : NULL
; }
76 int Size() { return static_cast<int>(buffer_
.size()); }
77 int Start() { return offset_
; }
78 int End() { return offset_
+ Size(); }
81 int capacity() { return static_cast<int>(buffer_
.capacity()); }
82 bool GrowBuffer(int required
, int limit
);
84 base::WeakPtr
<BackendImplV3
> backend_
;
86 std::vector
<char> buffer_
;
88 DISALLOW_COPY_AND_ASSIGN(UserBuffer
);
91 bool EntryImplV3::UserBuffer::PreWrite(int offset
, int len
) {
94 DCHECK_GE(offset
+ len
, 0);
96 // We don't want to write before our current start.
100 // Lets get the common case out of the way.
101 if (offset
+ len
<= capacity())
104 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
105 // buffer offset_ at 0.
106 if (!Size() && offset
> kMaxBlockSize
)
107 return GrowBuffer(len
, kMaxBufferSize
);
109 int required
= offset
- offset_
+ len
;
110 return GrowBuffer(required
, kMaxBufferSize
* 6 / 5);
113 void EntryImplV3::UserBuffer::Truncate(int offset
) {
114 DCHECK_GE(offset
, 0);
115 DCHECK_GE(offset
, offset_
);
116 DVLOG(3) << "Buffer truncate at " << offset
<< " current " << offset_
;
119 if (Size() >= offset
)
120 buffer_
.resize(offset
);
123 void EntryImplV3::UserBuffer::Write(int offset
, IOBuffer
* buf
, int len
) {
124 DCHECK_GE(offset
, 0);
126 DCHECK_GE(offset
+ len
, 0);
127 DCHECK_GE(offset
, offset_
);
128 DVLOG(3) << "Buffer write at " << offset
<< " current " << offset_
;
130 if (!Size() && offset
> kMaxBlockSize
)
136 buffer_
.resize(offset
);
141 char* buffer
= buf
->data();
142 int valid_len
= Size() - offset
;
143 int copy_len
= std::min(valid_len
, len
);
145 memcpy(&buffer_
[offset
], buffer
, copy_len
);
152 buffer_
.insert(buffer_
.end(), buffer
, buffer
+ len
);
155 bool EntryImplV3::UserBuffer::PreRead(int eof
, int offset
, int* len
) {
156 DCHECK_GE(offset
, 0);
159 if (offset
< offset_
) {
160 // We are reading before this buffer.
164 // If the read overlaps with the buffer, change its length so that there is
166 *len
= std::min(*len
, offset_
- offset
);
167 *len
= std::min(*len
, eof
- offset
);
169 // We should read from disk.
176 // See if we can fulfill the first part of the operation.
177 return (offset
- offset_
< Size());
180 int EntryImplV3::UserBuffer::Read(int offset
, IOBuffer
* buf
, int len
) {
181 DCHECK_GE(offset
, 0);
183 DCHECK(Size() || offset
< offset_
);
186 if (offset
< offset_
) {
187 // We don't have a file so lets fill the first part with 0.
188 clean_bytes
= std::min(offset_
- offset
, len
);
189 memset(buf
->data(), 0, clean_bytes
);
190 if (len
== clean_bytes
)
196 int start
= offset
- offset_
;
197 int available
= Size() - start
;
199 DCHECK_GE(available
, 0);
200 len
= std::min(len
, available
);
201 memcpy(buf
->data() + clean_bytes
, &buffer_
[start
], len
);
202 return len
+ clean_bytes
;
205 void EntryImplV3::UserBuffer::Reset() {
206 if (!grow_allowed_
) {
208 backend_
->BufferDeleted(capacity() - kMaxBlockSize
);
209 grow_allowed_
= true;
210 std::vector
<char> tmp
;
212 buffer_
.reserve(kMaxBlockSize
);
218 bool EntryImplV3::UserBuffer::GrowBuffer(int required
, int limit
) {
219 DCHECK_GE(required
, 0);
220 int current_size
= capacity();
221 if (required
<= current_size
)
224 if (required
> limit
)
230 int to_add
= std::max(required
- current_size
, kMaxBlockSize
* 4);
231 to_add
= std::max(current_size
, to_add
);
232 required
= std::min(current_size
+ to_add
, limit
);
234 grow_allowed_
= backend_
->IsAllocAllowed(current_size
, required
);
238 DVLOG(3) << "Buffer grow to " << required
;
240 buffer_
.reserve(required
);
244 // ------------------------------------------------------------------------
246 EntryImplV3::EntryImplV3(BackendImplV3
* backend
, Addr address
, bool read_only
)
247 : backend_(backend
->GetWeakPtr()),
250 read_only_(read_only
),
253 for (int i
= 0; i
< kNumStreams
; i
++) {
254 unreported_size_
[i
] = 0;
258 #if defined(V3_NOT_JUST_YET_READY)
260 bool EntryImplV3::CreateEntry(Addr node_address
, const std::string
& key
,
262 Trace("Create entry In");
263 EntryStore
* entry_store
= entry_
.Data();
264 RankingsNode
* node
= node_
.Data();
265 memset(entry_store
, 0, sizeof(EntryStore
) * entry_
.address().num_blocks());
266 memset(node
, 0, sizeof(RankingsNode
));
267 if (!node_
.LazyInit(backend_
->File(node_address
), node_address
))
270 entry_store
->rankings_node
= node_address
.value();
271 node
->contents
= entry_
.address().value();
273 entry_store
->hash
= hash
;
274 entry_store
->creation_time
= Time::Now().ToInternalValue();
275 entry_store
->key_len
= static_cast<int32
>(key
.size());
276 if (entry_store
->key_len
> kMaxInternalKeyLength
) {
278 if (!CreateBlock(entry_store
->key_len
+ 1, &address
))
281 entry_store
->long_key
= address
.value();
282 File
* key_file
= GetBackingFile(address
, kKeyFileIndex
);
286 if (address
.is_block_file())
287 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
289 if (!key_file
|| !key_file
->Write(key
.data(), key
.size(), offset
)) {
290 DeleteData(address
, kKeyFileIndex
);
294 if (address
.is_separate_file())
295 key_file
->SetLength(key
.size() + 1);
297 memcpy(entry_store
->key
, key
.data(), key
.size());
298 entry_store
->key
[key
.size()] = '\0';
300 backend_
->ModifyStorageSize(0, static_cast<int32
>(key
.size()));
301 CACHE_UMA(COUNTS
, "KeySize", 0, static_cast<int32
>(key
.size()));
302 node
->dirty
= backend_
->GetCurrentEntryId();
303 Log("Create Entry ");
307 uint32
EntryImplV3::GetHash() {
308 return entry_
.Data()->hash
;
311 bool EntryImplV3::IsSameEntry(const std::string
& key
, uint32 hash
) {
312 if (entry_
.Data()->hash
!= hash
||
313 static_cast<size_t>(entry_
.Data()->key_len
) != key
.size())
316 return (key
.compare(GetKey()) == 0);
319 void EntryImplV3::InternalDoom() {
320 net_log_
.AddEvent(net::NetLog::TYPE_ENTRY_DOOM
);
321 DCHECK(node_
.HasData());
322 if (!node_
.Data()->dirty
) {
323 node_
.Data()->dirty
= backend_
->GetCurrentEntryId();
329 // This only includes checks that relate to the first block of the entry (the
330 // first 256 bytes), and values that should be set from the entry creation.
331 // Basically, even if there is something wrong with this entry, we want to see
332 // if it is possible to load the rankings node and delete them together.
333 bool EntryImplV3::SanityCheck() {
334 if (!entry_
.VerifyHash())
337 EntryStore
* stored
= entry_
.Data();
338 if (!stored
->rankings_node
|| stored
->key_len
<= 0)
341 if (stored
->reuse_count
< 0 || stored
->refetch_count
< 0)
344 Addr
rankings_addr(stored
->rankings_node
);
345 if (!rankings_addr
.SanityCheckForRankings())
348 Addr
next_addr(stored
->next
);
349 if (next_addr
.is_initialized() && !next_addr
.SanityCheckForEntry()) {
353 STRESS_DCHECK(next_addr
.value() != entry_
.address().value());
355 if (stored
->state
> ENTRY_DOOMED
|| stored
->state
< ENTRY_NORMAL
)
358 Addr
key_addr(stored
->long_key
);
359 if ((stored
->key_len
<= kMaxInternalKeyLength
&& key_addr
.is_initialized()) ||
360 (stored
->key_len
> kMaxInternalKeyLength
&& !key_addr
.is_initialized()))
363 if (!key_addr
.SanityCheck())
366 if (key_addr
.is_initialized() &&
367 ((stored
->key_len
< kMaxBlockSize
&& key_addr
.is_separate_file()) ||
368 (stored
->key_len
>= kMaxBlockSize
&& key_addr
.is_block_file())))
371 int num_blocks
= NumBlocksForEntry(stored
->key_len
);
372 if (entry_
.address().num_blocks() != num_blocks
)
378 bool EntryImplV3::DataSanityCheck() {
379 EntryStore
* stored
= entry_
.Data();
380 Addr
key_addr(stored
->long_key
);
382 // The key must be NULL terminated.
383 if (!key_addr
.is_initialized() && stored
->key
[stored
->key_len
])
386 if (stored
->hash
!= base::Hash(GetKey()))
389 for (int i
= 0; i
< kNumStreams
; i
++) {
390 Addr
data_addr(stored
->data_addr
[i
]);
391 int data_size
= stored
->data_size
[i
];
394 if (!data_size
&& data_addr
.is_initialized())
396 if (!data_addr
.SanityCheck())
400 if (data_size
<= kMaxBlockSize
&& data_addr
.is_separate_file())
402 if (data_size
> kMaxBlockSize
&& data_addr
.is_block_file())
408 void EntryImplV3::FixForDelete() {
409 EntryStore
* stored
= entry_
.Data();
410 Addr
key_addr(stored
->long_key
);
412 if (!key_addr
.is_initialized())
413 stored
->key
[stored
->key_len
] = '\0';
415 for (int i
= 0; i
< kNumStreams
; i
++) {
416 Addr
data_addr(stored
->data_addr
[i
]);
417 int data_size
= stored
->data_size
[i
];
418 if (data_addr
.is_initialized()) {
419 if ((data_size
<= kMaxBlockSize
&& data_addr
.is_separate_file()) ||
420 (data_size
> kMaxBlockSize
&& data_addr
.is_block_file()) ||
421 !data_addr
.SanityCheck()) {
423 // The address is weird so don't attempt to delete it.
424 stored
->data_addr
[i
] = 0;
425 // In general, trust the stored size as it should be in sync with the
426 // total size tracked by the backend.
430 stored
->data_size
[i
] = 0;
435 void EntryImplV3::SetTimes(base::Time last_used
, base::Time last_modified
) {
436 node_
.Data()->last_used
= last_used
.ToInternalValue();
437 node_
.Data()->last_modified
= last_modified
.ToInternalValue();
438 node_
.set_modified();
441 void EntryImplV3::BeginLogging(net::NetLog
* net_log
, bool created
) {
442 DCHECK(!net_log_
.net_log());
443 net_log_
= net::BoundNetLog::Make(
444 net_log
, net::NetLog::SOURCE_DISK_CACHE_ENTRY
);
446 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL
,
447 CreateNetLogEntryCreationCallback(this, created
));
450 const net::BoundNetLog
& EntryImplV3::net_log() const {
454 // ------------------------------------------------------------------------
456 void EntryImplV3::Doom() {
457 if (background_queue_
)
458 background_queue_
->DoomEntryImpl(this);
461 void EntryImplV3::DoomImpl() {
462 if (doomed_
|| !backend_
)
465 SetPointerForInvalidEntry(backend_
->GetCurrentEntryId());
466 backend_
->InternalDoomEntry(this);
469 void EntryImplV3::Close() {
470 if (background_queue_
)
471 background_queue_
->CloseEntryImpl(this);
474 std::string
EntryImplV3::GetKey() const {
475 CacheEntryBlock
* entry
= const_cast<CacheEntryBlock
*>(&entry_
);
476 int key_len
= entry
->Data()->key_len
;
477 if (key_len
<= kMaxInternalKeyLength
)
478 return std::string(entry
->Data()->key
);
480 // We keep a copy of the key so that we can always return it, even if the
481 // backend is disabled.
485 Addr
address(entry
->Data()->long_key
);
486 DCHECK(address
.is_initialized());
488 if (address
.is_block_file())
489 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
491 static_assert(kNumStreams
== kKeyFileIndex
, "invalid key index");
492 File
* key_file
= const_cast<EntryImpl
*>(this)->GetBackingFile(address
,
495 return std::string();
497 ++key_len
; // We store a trailing \0 on disk that we read back below.
498 if (!offset
&& key_file
->GetLength() != static_cast<size_t>(key_len
))
499 return std::string();
501 if (!key_file
->Read(base::WriteInto(&key_
, key_len
), key_len
, offset
))
506 Time
EntryImplV3::GetLastUsed() const {
507 CacheRankingsBlock
* node
= const_cast<CacheRankingsBlock
*>(&node_
);
508 return Time::FromInternalValue(node
->Data()->last_used
);
511 Time
EntryImplV3::GetLastModified() const {
512 CacheRankingsBlock
* node
= const_cast<CacheRankingsBlock
*>(&node_
);
513 return Time::FromInternalValue(node
->Data()->last_modified
);
516 int32
EntryImplV3::GetDataSize(int index
) const {
517 if (index
< 0 || index
>= kNumStreams
)
520 CacheEntryBlock
* entry
= const_cast<CacheEntryBlock
*>(&entry_
);
521 return entry
->Data()->data_size
[index
];
524 int EntryImplV3::ReadData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
525 const CompletionCallback
& callback
) {
526 if (callback
.is_null())
527 return ReadDataImpl(index
, offset
, buf
, buf_len
, callback
);
529 DCHECK(node_
.Data()->dirty
|| read_only_
);
530 if (index
< 0 || index
>= kNumStreams
)
531 return net::ERR_INVALID_ARGUMENT
;
533 int entry_size
= entry_
.Data()->data_size
[index
];
534 if (offset
>= entry_size
|| offset
< 0 || !buf_len
)
538 return net::ERR_INVALID_ARGUMENT
;
540 if (!background_queue_
)
541 return net::ERR_UNEXPECTED
;
543 background_queue_
->ReadData(this, index
, offset
, buf
, buf_len
, callback
);
544 return net::ERR_IO_PENDING
;
547 int EntryImpl::ReadDataImpl(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
548 const CompletionCallback
& callback
) {
549 if (net_log_
.IsCapturing()) {
551 net::NetLog::TYPE_ENTRY_READ_DATA
,
552 CreateNetLogReadWriteDataCallback(index
, offset
, buf_len
, false));
555 int result
= InternalReadData(index
, offset
, buf
, buf_len
, callback
);
557 if (result
!= net::ERR_IO_PENDING
&& net_log_
.IsCapturing()) {
559 net::NetLog::TYPE_ENTRY_READ_DATA
,
560 CreateNetLogReadWriteCompleteCallback(result
));
565 int EntryImplV3::WriteData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
566 const CompletionCallback
& callback
, bool truncate
) {
567 if (callback
.is_null())
568 return WriteDataImpl(index
, offset
, buf
, buf_len
, callback
, truncate
);
570 DCHECK(node_
.Data()->dirty
|| read_only_
);
571 if (index
< 0 || index
>= kNumStreams
)
572 return net::ERR_INVALID_ARGUMENT
;
574 if (offset
< 0 || buf_len
< 0)
575 return net::ERR_INVALID_ARGUMENT
;
577 if (!background_queue_
)
578 return net::ERR_UNEXPECTED
;
580 background_queue_
->WriteData(this, index
, offset
, buf
, buf_len
, truncate
,
582 return net::ERR_IO_PENDING
;
585 int EntryImpl::WriteDataImpl(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
586 const CompletionCallback
& callback
,
588 if (net_log_
.IsCapturing()) {
590 net::NetLog::TYPE_ENTRY_WRITE_DATA
,
591 CreateNetLogReadWriteDataCallback(index
, offset
, buf_len
, truncate
));
594 int result
= InternalWriteData(index
, offset
, buf
, buf_len
, callback
,
597 if (result
!= net::ERR_IO_PENDING
&& net_log_
.IsCapturing()) {
599 net::NetLog::TYPE_ENTRY_WRITE_DATA
,
600 CreateNetLogReadWriteCompleteCallback(result
));
605 int EntryImplV3::ReadSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
606 const CompletionCallback
& callback
) {
607 if (callback
.is_null())
608 return ReadSparseDataImpl(offset
, buf
, buf_len
, callback
);
610 if (!background_queue_
)
611 return net::ERR_UNEXPECTED
;
613 background_queue_
->ReadSparseData(this, offset
, buf
, buf_len
, callback
);
614 return net::ERR_IO_PENDING
;
617 int EntryImpl::ReadSparseDataImpl(int64 offset
, IOBuffer
* buf
, int buf_len
,
618 const CompletionCallback
& callback
) {
619 DCHECK(node_
.Data()->dirty
|| read_only_
);
620 int result
= InitSparseData();
621 if (net::OK
!= result
)
624 TimeTicks start
= TimeTicks::Now();
625 result
= sparse_
->StartIO(SparseControl::kReadOperation
, offset
, buf
, buf_len
,
627 ReportIOTime(kSparseRead
, start
);
631 int EntryImplV3::WriteSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
632 const CompletionCallback
& callback
) {
633 if (callback
.is_null())
634 return WriteSparseDataImpl(offset
, buf
, buf_len
, callback
);
636 if (!background_queue_
)
637 return net::ERR_UNEXPECTED
;
639 background_queue_
->WriteSparseData(this, offset
, buf
, buf_len
, callback
);
640 return net::ERR_IO_PENDING
;
643 int EntryImpl::WriteSparseDataImpl(int64 offset
, IOBuffer
* buf
, int buf_len
,
644 const CompletionCallback
& callback
) {
645 DCHECK(node_
.Data()->dirty
|| read_only_
);
646 int result
= InitSparseData();
647 if (net::OK
!= result
)
650 TimeTicks start
= TimeTicks::Now();
651 result
= sparse_
->StartIO(SparseControl::kWriteOperation
, offset
, buf
,
653 ReportIOTime(kSparseWrite
, start
);
657 int EntryImplV3::GetAvailableRange(int64 offset
, int len
, int64
* start
,
658 const CompletionCallback
& callback
) {
659 if (!background_queue_
)
660 return net::ERR_UNEXPECTED
;
662 background_queue_
->GetAvailableRange(this, offset
, len
, start
, callback
);
663 return net::ERR_IO_PENDING
;
666 int EntryImpl::GetAvailableRangeImpl(int64 offset
, int len
, int64
* start
) {
667 int result
= InitSparseData();
668 if (net::OK
!= result
)
671 return sparse_
->GetAvailableRange(offset
, len
, start
);
674 bool EntryImplV3::CouldBeSparse() const {
678 scoped_ptr
<SparseControl
> sparse
;
679 sparse
.reset(new SparseControl(const_cast<EntryImpl
*>(this)));
680 return sparse
->CouldBeSparse();
683 void EntryImplV3::CancelSparseIO() {
684 if (background_queue_
)
685 background_queue_
->CancelSparseIO(this);
688 void EntryImplV3::CancelSparseIOImpl() {
695 int EntryImplV3::ReadyForSparseIO(const CompletionCallback
& callback
) {
699 if (!background_queue_
)
700 return net::ERR_UNEXPECTED
;
702 background_queue_
->ReadyForSparseIO(this, callback
);
703 return net::ERR_IO_PENDING
;
706 int EntryImplV3::ReadyForSparseIOImpl(const CompletionCallback
& callback
) {
707 DCHECK(sparse_
.get());
708 return sparse_
->ReadyToUse(callback
);
711 // ------------------------------------------------------------------------
713 // When an entry is deleted from the cache, we clean up all the data associated
714 // with it for two reasons: to simplify the reuse of the block (we know that any
715 // unused block is filled with zeros), and to simplify the handling of write /
716 // read partial information from an entry (don't have to worry about returning
717 // data related to a previous cache entry because the range was not fully
719 EntryImplV3::~EntryImplV3() {
721 entry_
.clear_modified();
722 node_
.clear_modified();
725 Log("~EntryImpl in");
727 // Save the sparse info to disk. This will generate IO for this entry and
728 // maybe for a child entry, so it is important to do it before deleting this
732 // Remove this entry from the list of open entries.
733 backend_
->OnEntryDestroyBegin(entry_
.address());
736 DeleteEntryData(true);
738 #if defined(NET_BUILD_STRESS_CACHE)
741 net_log_
.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE
);
743 for (int index
= 0; index
< kNumStreams
; index
++) {
744 if (user_buffers_
[index
].get()) {
745 if (!(ret
= Flush(index
, 0)))
746 LOG(ERROR
) << "Failed to save user data";
748 if (unreported_size_
[index
]) {
749 backend_
->ModifyStorageSize(
750 entry_
.Data()->data_size
[index
] - unreported_size_
[index
],
751 entry_
.Data()->data_size
[index
]);
756 // There was a failure writing the actual data. Mark the entry as dirty.
757 int current_id
= backend_
->GetCurrentEntryId();
758 node_
.Data()->dirty
= current_id
== 1 ? -1 : current_id
- 1;
760 } else if (node_
.HasData() && !dirty_
&& node_
.Data()->dirty
) {
761 node_
.Data()->dirty
= 0;
766 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
767 net_log_
.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL
);
768 backend_
->OnEntryDestroyEnd();
771 int EntryImpl::InternalReadData(int index
, int offset
,
772 IOBuffer
* buf
, int buf_len
,
773 const CompletionCallback
& callback
) {
774 DCHECK(node_
.Data()->dirty
|| read_only_
);
775 DVLOG(2) << "Read from " << index
<< " at " << offset
<< " : " << buf_len
;
776 if (index
< 0 || index
>= kNumStreams
)
777 return net::ERR_INVALID_ARGUMENT
;
779 int entry_size
= entry_
.Data()->data_size
[index
];
780 if (offset
>= entry_size
|| offset
< 0 || !buf_len
)
784 return net::ERR_INVALID_ARGUMENT
;
787 return net::ERR_UNEXPECTED
;
789 TimeTicks start
= TimeTicks::Now();
791 if (offset
+ buf_len
> entry_size
)
792 buf_len
= entry_size
- offset
;
796 backend_
->OnEvent(Stats::READ_DATA
);
797 backend_
->OnRead(buf_len
);
799 Addr
address(entry_
.Data()->data_addr
[index
]);
800 int eof
= address
.is_initialized() ? entry_size
: 0;
801 if (user_buffers_
[index
].get() &&
802 user_buffers_
[index
]->PreRead(eof
, offset
, &buf_len
)) {
803 // Complete the operation locally.
804 buf_len
= user_buffers_
[index
]->Read(offset
, buf
, buf_len
);
805 ReportIOTime(kRead
, start
);
809 address
.set_value(entry_
.Data()->data_addr
[index
]);
810 DCHECK(address
.is_initialized());
811 if (!address
.is_initialized()) {
813 return net::ERR_FAILED
;
816 File
* file
= GetBackingFile(address
, index
);
819 LOG(ERROR
) << "No file for " << std::hex
<< address
.value();
820 return net::ERR_FILE_NOT_FOUND
;
823 size_t file_offset
= offset
;
824 if (address
.is_block_file()) {
825 DCHECK_LE(offset
+ buf_len
, kMaxBlockSize
);
826 file_offset
+= address
.start_block() * address
.BlockSize() +
830 SyncCallback
* io_callback
= NULL
;
831 if (!callback
.is_null()) {
832 io_callback
= new SyncCallback(this, buf
, callback
,
833 net::NetLog::TYPE_ENTRY_READ_DATA
);
836 TimeTicks start_async
= TimeTicks::Now();
839 if (!file
->Read(buf
->data(), buf_len
, file_offset
, io_callback
, &completed
)) {
841 io_callback
->Discard();
843 return net::ERR_CACHE_READ_FAILURE
;
846 if (io_callback
&& completed
)
847 io_callback
->Discard();
850 ReportIOTime(kReadAsync1
, start_async
);
852 ReportIOTime(kRead
, start
);
853 return (completed
|| callback
.is_null()) ? buf_len
: net::ERR_IO_PENDING
;
856 int EntryImpl::InternalWriteData(int index
, int offset
,
857 IOBuffer
* buf
, int buf_len
,
858 const CompletionCallback
& callback
,
860 DCHECK(node_
.Data()->dirty
|| read_only_
);
861 DVLOG(2) << "Write to " << index
<< " at " << offset
<< " : " << buf_len
;
862 if (index
< 0 || index
>= kNumStreams
)
863 return net::ERR_INVALID_ARGUMENT
;
865 if (offset
< 0 || buf_len
< 0)
866 return net::ERR_INVALID_ARGUMENT
;
869 return net::ERR_UNEXPECTED
;
871 int max_file_size
= backend_
->MaxFileSize();
873 // offset or buf_len could be negative numbers.
874 if (offset
> max_file_size
|| buf_len
> max_file_size
||
875 offset
+ buf_len
> max_file_size
) {
876 int size
= offset
+ buf_len
;
877 if (size
<= max_file_size
)
879 backend_
->TooMuchStorageRequested(size
);
880 return net::ERR_FAILED
;
883 TimeTicks start
= TimeTicks::Now();
885 // Read the size at this point (it may change inside prepare).
886 int entry_size
= entry_
.Data()->data_size
[index
];
887 bool extending
= entry_size
< offset
+ buf_len
;
888 truncate
= truncate
&& entry_size
> offset
+ buf_len
;
889 Trace("To PrepareTarget 0x%x", entry_
.address().value());
890 if (!PrepareTarget(index
, offset
, buf_len
, truncate
))
891 return net::ERR_FAILED
;
893 Trace("From PrepareTarget 0x%x", entry_
.address().value());
894 if (extending
|| truncate
)
895 UpdateSize(index
, entry_size
, offset
+ buf_len
);
899 backend_
->OnEvent(Stats::WRITE_DATA
);
900 backend_
->OnWrite(buf_len
);
902 if (user_buffers_
[index
].get()) {
903 // Complete the operation locally.
904 user_buffers_
[index
]->Write(offset
, buf
, buf_len
);
905 ReportIOTime(kWrite
, start
);
909 Addr
address(entry_
.Data()->data_addr
[index
]);
910 if (offset
+ buf_len
== 0) {
912 DCHECK(!address
.is_initialized());
917 File
* file
= GetBackingFile(address
, index
);
919 return net::ERR_FILE_NOT_FOUND
;
921 size_t file_offset
= offset
;
922 if (address
.is_block_file()) {
923 DCHECK_LE(offset
+ buf_len
, kMaxBlockSize
);
924 file_offset
+= address
.start_block() * address
.BlockSize() +
926 } else if (truncate
|| (extending
&& !buf_len
)) {
927 if (!file
->SetLength(offset
+ buf_len
))
928 return net::ERR_FAILED
;
934 SyncCallback
* io_callback
= NULL
;
935 if (!callback
.is_null()) {
936 io_callback
= new SyncCallback(this, buf
, callback
,
937 net::NetLog::TYPE_ENTRY_WRITE_DATA
);
940 TimeTicks start_async
= TimeTicks::Now();
943 if (!file
->Write(buf
->data(), buf_len
, file_offset
, io_callback
,
946 io_callback
->Discard();
947 return net::ERR_CACHE_WRITE_FAILURE
;
950 if (io_callback
&& completed
)
951 io_callback
->Discard();
954 ReportIOTime(kWriteAsync1
, start_async
);
956 ReportIOTime(kWrite
, start
);
957 return (completed
|| callback
.is_null()) ? buf_len
: net::ERR_IO_PENDING
;
960 // ------------------------------------------------------------------------
962 bool EntryImpl::CreateDataBlock(int index
, int size
) {
963 DCHECK(index
>= 0 && index
< kNumStreams
);
965 Addr
address(entry_
.Data()->data_addr
[index
]);
966 if (!CreateBlock(size
, &address
))
969 entry_
.Data()->data_addr
[index
] = address
.value();
974 bool EntryImpl::CreateBlock(int size
, Addr
* address
) {
975 DCHECK(!address
->is_initialized());
979 FileType file_type
= Addr::RequiredFileType(size
);
980 if (EXTERNAL
== file_type
) {
981 if (size
> backend_
->MaxFileSize())
983 if (!backend_
->CreateExternalFile(address
))
986 int num_blocks
= Addr::RequiredBlocks(size
, file_type
);
988 if (!backend_
->CreateBlock(file_type
, num_blocks
, address
))
994 // Note that this method may end up modifying a block file so upon return the
995 // involved block will be free, and could be reused for something else. If there
996 // is a crash after that point (and maybe before returning to the caller), the
997 // entry will be left dirty... and at some point it will be discarded; it is
998 // important that the entry doesn't keep a reference to this address, or we'll
999 // end up deleting the contents of |address| once again.
1000 void EntryImpl::DeleteData(Addr address
, int index
) {
1002 if (!address
.is_initialized())
1004 if (address
.is_separate_file()) {
1005 int failure
= !DeleteCacheFile(backend_
->GetFileName(address
));
1006 CACHE_UMA(COUNTS
, "DeleteFailed", 0, failure
);
1008 LOG(ERROR
) << "Failed to delete " <<
1009 backend_
->GetFileName(address
).value() << " from the cache.";
1012 files_
[index
] = NULL
; // Releases the object.
1014 backend_
->DeleteBlock(address
, true);
1018 void EntryImpl::UpdateRank(bool modified
) {
1023 // Everything is handled by the backend.
1024 backend_
->UpdateRank(this, modified
);
1028 Time current
= Time::Now();
1029 node_
.Data()->last_used
= current
.ToInternalValue();
1032 node_
.Data()->last_modified
= current
.ToInternalValue();
1035 void EntryImpl::DeleteEntryData(bool everything
) {
1036 DCHECK(doomed_
|| !everything
);
1038 if (GetEntryFlags() & PARENT_ENTRY
) {
1039 // We have some child entries that must go away.
1040 SparseControl::DeleteChildren(this);
1044 CACHE_UMA(COUNTS
, "DeleteHeader", 0, GetDataSize(0));
1046 CACHE_UMA(COUNTS
, "DeleteData", 0, GetDataSize(1));
1047 for (int index
= 0; index
< kNumStreams
; index
++) {
1048 Addr
address(entry_
.Data()->data_addr
[index
]);
1049 if (address
.is_initialized()) {
1050 backend_
->ModifyStorageSize(entry_
.Data()->data_size
[index
] -
1051 unreported_size_
[index
], 0);
1052 entry_
.Data()->data_addr
[index
] = 0;
1053 entry_
.Data()->data_size
[index
] = 0;
1055 DeleteData(address
, index
);
1062 // Remove all traces of this entry.
1063 backend_
->RemoveEntry(this);
1065 // Note that at this point node_ and entry_ are just two blocks of data, and
1066 // even if they reference each other, nobody should be referencing them.
1068 Addr
address(entry_
.Data()->long_key
);
1069 DeleteData(address
, kKeyFileIndex
);
1070 backend_
->ModifyStorageSize(entry_
.Data()->key_len
, 0);
1072 backend_
->DeleteBlock(entry_
.address(), true);
1075 if (!LeaveRankingsBehind()) {
1076 backend_
->DeleteBlock(node_
.address(), true);
1081 // We keep a memory buffer for everything that ends up stored on a block file
1082 // (because we don't know yet the final data size), and for some of the data
1083 // that end up on external files. This function will initialize that memory
1084 // buffer and / or the files needed to store the data.
1086 // In general, a buffer may overlap data already stored on disk, and in that
1087 // case, the contents of the buffer are the most accurate. It may also extend
1088 // the file, but we don't want to read from disk just to keep the buffer up to
1089 // date. This means that as soon as there is a chance to get confused about what
1090 // is the most recent version of some part of a file, we'll flush the buffer and
1091 // reuse it for the new data. Keep in mind that the normal use pattern is quite
1092 // simple (write sequentially from the beginning), so we optimize for handling
1094 bool EntryImpl::PrepareTarget(int index
, int offset
, int buf_len
,
1097 return HandleTruncation(index
, offset
, buf_len
);
1099 if (!offset
&& !buf_len
)
1102 Addr
address(entry_
.Data()->data_addr
[index
]);
1103 if (address
.is_initialized()) {
1104 if (address
.is_block_file() && !MoveToLocalBuffer(index
))
1107 if (!user_buffers_
[index
].get() && offset
< kMaxBlockSize
) {
1108 // We are about to create a buffer for the first 16KB, make sure that we
1109 // preserve existing data.
1110 if (!CopyToLocalBuffer(index
))
1115 if (!user_buffers_
[index
].get())
1116 user_buffers_
[index
].reset(new UserBuffer(backend_
.get()));
1118 return PrepareBuffer(index
, offset
, buf_len
);
1121 // We get to this function with some data already stored. If there is a
1122 // truncation that results on data stored internally, we'll explicitly
1123 // handle the case here.
1124 bool EntryImpl::HandleTruncation(int index
, int offset
, int buf_len
) {
1125 Addr
address(entry_
.Data()->data_addr
[index
]);
1127 int current_size
= entry_
.Data()->data_size
[index
];
1128 int new_size
= offset
+ buf_len
;
1131 // This is by far the most common scenario.
1132 backend_
->ModifyStorageSize(current_size
- unreported_size_
[index
], 0);
1133 entry_
.Data()->data_addr
[index
] = 0;
1134 entry_
.Data()->data_size
[index
] = 0;
1135 unreported_size_
[index
] = 0;
1137 DeleteData(address
, index
);
1139 user_buffers_
[index
].reset();
1143 // We never postpone truncating a file, if there is one, but we may postpone
1144 // telling the backend about the size reduction.
1145 if (user_buffers_
[index
].get()) {
1146 DCHECK_GE(current_size
, user_buffers_
[index
]->Start());
1147 if (!address
.is_initialized()) {
1148 // There is no overlap between the buffer and disk.
1149 if (new_size
> user_buffers_
[index
]->Start()) {
1150 // Just truncate our buffer.
1151 DCHECK_LT(new_size
, user_buffers_
[index
]->End());
1152 user_buffers_
[index
]->Truncate(new_size
);
1156 // Just discard our buffer.
1157 user_buffers_
[index
]->Reset();
1158 return PrepareBuffer(index
, offset
, buf_len
);
1161 // There is some overlap or we need to extend the file before the
1163 if (offset
> user_buffers_
[index
]->Start())
1164 user_buffers_
[index
]->Truncate(new_size
);
1165 UpdateSize(index
, current_size
, new_size
);
1166 if (!Flush(index
, 0))
1168 user_buffers_
[index
].reset();
1171 // We have data somewhere, and it is not in a buffer.
1172 DCHECK(!user_buffers_
[index
].get());
1173 DCHECK(address
.is_initialized());
1175 if (new_size
> kMaxBlockSize
)
1176 return true; // Let the operation go directly to disk.
1178 return ImportSeparateFile(index
, offset
+ buf_len
);
1181 bool EntryImpl::CopyToLocalBuffer(int index
) {
1182 Addr
address(entry_
.Data()->data_addr
[index
]);
1183 DCHECK(!user_buffers_
[index
].get());
1184 DCHECK(address
.is_initialized());
1186 int len
= std::min(entry_
.Data()->data_size
[index
], kMaxBlockSize
);
1187 user_buffers_
[index
].reset(new UserBuffer(backend_
.get()));
1188 user_buffers_
[index
]->Write(len
, NULL
, 0);
1190 File
* file
= GetBackingFile(address
, index
);
1193 if (address
.is_block_file())
1194 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
1197 !file
->Read(user_buffers_
[index
]->Data(), len
, offset
, NULL
, NULL
)) {
1198 user_buffers_
[index
].reset();
1204 bool EntryImpl::MoveToLocalBuffer(int index
) {
1205 if (!CopyToLocalBuffer(index
))
1208 Addr
address(entry_
.Data()->data_addr
[index
]);
1209 entry_
.Data()->data_addr
[index
] = 0;
1211 DeleteData(address
, index
);
1213 // If we lose this entry we'll see it as zero sized.
1214 int len
= entry_
.Data()->data_size
[index
];
1215 backend_
->ModifyStorageSize(len
- unreported_size_
[index
], 0);
1216 unreported_size_
[index
] = len
;
1220 bool EntryImpl::ImportSeparateFile(int index
, int new_size
) {
1221 if (entry_
.Data()->data_size
[index
] > new_size
)
1222 UpdateSize(index
, entry_
.Data()->data_size
[index
], new_size
);
1224 return MoveToLocalBuffer(index
);
1227 bool EntryImpl::PrepareBuffer(int index
, int offset
, int buf_len
) {
1228 DCHECK(user_buffers_
[index
].get());
1229 if ((user_buffers_
[index
]->End() && offset
> user_buffers_
[index
]->End()) ||
1230 offset
> entry_
.Data()->data_size
[index
]) {
1231 // We are about to extend the buffer or the file (with zeros), so make sure
1232 // that we are not overwriting anything.
1233 Addr
address(entry_
.Data()->data_addr
[index
]);
1234 if (address
.is_initialized() && address
.is_separate_file()) {
1235 if (!Flush(index
, 0))
1237 // There is an actual file already, and we don't want to keep track of
1238 // its length so we let this operation go straight to disk.
1239 // The only case when a buffer is allowed to extend the file (as in fill
1240 // with zeros before the start) is when there is no file yet to extend.
1241 user_buffers_
[index
].reset();
1246 if (!user_buffers_
[index
]->PreWrite(offset
, buf_len
)) {
1247 if (!Flush(index
, offset
+ buf_len
))
1251 if (offset
> user_buffers_
[index
]->End() ||
1252 !user_buffers_
[index
]->PreWrite(offset
, buf_len
)) {
1253 // We cannot complete the operation with a buffer.
1254 DCHECK(!user_buffers_
[index
]->Size());
1255 DCHECK(!user_buffers_
[index
]->Start());
1256 user_buffers_
[index
].reset();
1262 bool EntryImpl::Flush(int index
, int min_len
) {
1263 Addr
address(entry_
.Data()->data_addr
[index
]);
1264 DCHECK(user_buffers_
[index
].get());
1265 DCHECK(!address
.is_initialized() || address
.is_separate_file());
1266 DVLOG(3) << "Flush";
1268 int size
= std::max(entry_
.Data()->data_size
[index
], min_len
);
1269 if (size
&& !address
.is_initialized() && !CreateDataBlock(index
, size
))
1272 if (!entry_
.Data()->data_size
[index
]) {
1273 DCHECK(!user_buffers_
[index
]->Size());
1277 address
.set_value(entry_
.Data()->data_addr
[index
]);
1279 int len
= user_buffers_
[index
]->Size();
1280 int offset
= user_buffers_
[index
]->Start();
1281 if (!len
&& !offset
)
1284 if (address
.is_block_file()) {
1285 DCHECK_EQ(len
, entry_
.Data()->data_size
[index
]);
1287 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
1290 File
* file
= GetBackingFile(address
, index
);
1294 if (!file
->Write(user_buffers_
[index
]->Data(), len
, offset
, NULL
, NULL
))
1296 user_buffers_
[index
]->Reset();
1301 void EntryImpl::UpdateSize(int index
, int old_size
, int new_size
) {
1302 if (entry_
.Data()->data_size
[index
] == new_size
)
1305 unreported_size_
[index
] += new_size
- old_size
;
1306 entry_
.Data()->data_size
[index
] = new_size
;
1307 entry_
.set_modified();
1310 int EntryImpl::InitSparseData() {
1314 // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1315 scoped_ptr
<SparseControl
> sparse(new SparseControl(this));
1316 int result
= sparse
->Init();
1317 if (net::OK
== result
)
1318 sparse_
.swap(sparse
);
1323 void EntryImpl::SetEntryFlags(uint32 flags
) {
1324 entry_
.Data()->flags
|= flags
;
1325 entry_
.set_modified();
1328 uint32
EntryImpl::GetEntryFlags() {
1329 return entry_
.Data()->flags
;
1332 void EntryImpl::GetData(int index
, char** buffer
, Addr
* address
) {
1334 if (user_buffers_
[index
].get() && user_buffers_
[index
]->Size() &&
1335 !user_buffers_
[index
]->Start()) {
1336 // The data is already in memory, just copy it and we're done.
1337 int data_len
= entry_
.Data()->data_size
[index
];
1338 if (data_len
<= user_buffers_
[index
]->Size()) {
1339 DCHECK(!user_buffers_
[index
]->Start());
1340 *buffer
= new char[data_len
];
1341 memcpy(*buffer
, user_buffers_
[index
]->Data(), data_len
);
1346 // Bad news: we'd have to read the info from disk so instead we'll just tell
1347 // the caller where to read from.
1349 address
->set_value(entry_
.Data()->data_addr
[index
]);
1350 if (address
->is_initialized()) {
1351 // Prevent us from deleting the block from the backing store.
1352 backend_
->ModifyStorageSize(entry_
.Data()->data_size
[index
] -
1353 unreported_size_
[index
], 0);
1354 entry_
.Data()->data_addr
[index
] = 0;
1355 entry_
.Data()->data_size
[index
] = 0;
1359 #endif // defined(V3_NOT_JUST_YET_READY).
1361 void EntryImplV3::ReportIOTime(Operation op
, const base::TimeTicks
& start
) {
1367 CACHE_UMA(AGE_MS
, "ReadTime", start
);
1370 CACHE_UMA(AGE_MS
, "WriteTime", start
);
1373 CACHE_UMA(AGE_MS
, "SparseReadTime", start
);
1376 CACHE_UMA(AGE_MS
, "SparseWriteTime", start
);
1379 CACHE_UMA(AGE_MS
, "AsyncIOTime", start
);
1382 CACHE_UMA(AGE_MS
, "AsyncReadDispatchTime", start
);
1385 CACHE_UMA(AGE_MS
, "AsyncWriteDispatchTime", start
);
1392 void EntryImplV3::Log(const char* msg
) {
1393 Trace("%s 0x%p 0x%x", msg
, reinterpret_cast<void*>(this), address_
);
1394 Trace(" data: 0x%x 0x%x", entry_
->data_addr
[0], entry_
->data_addr
[1]);
1395 Trace(" doomed: %d", doomed_
);
1398 void EntryImplV3::Doom() {
1402 void EntryImplV3::Close() {
1406 std::string
EntryImplV3::GetKey() const {
1407 return std::string();
1410 Time
EntryImplV3::GetLastUsed() const {
1414 Time
EntryImplV3::GetLastModified() const {
1418 int32
EntryImplV3::GetDataSize(int index
) const {
1422 int EntryImplV3::ReadData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
1423 const CompletionCallback
& callback
) {
1424 return net::ERR_FAILED
;
1427 int EntryImplV3::WriteData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
1428 const CompletionCallback
& callback
, bool truncate
) {
1429 return net::ERR_FAILED
;
1432 int EntryImplV3::ReadSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
1433 const CompletionCallback
& callback
) {
1434 return net::ERR_FAILED
;
1437 int EntryImplV3::WriteSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
1438 const CompletionCallback
& callback
) {
1439 return net::ERR_FAILED
;
1442 int EntryImplV3::GetAvailableRange(int64 offset
, int len
, int64
* start
,
1443 const CompletionCallback
& callback
) {
1444 return net::ERR_FAILED
;
1447 bool EntryImplV3::CouldBeSparse() const {
1451 void EntryImplV3::CancelSparseIO() {
1455 int EntryImplV3::ReadyForSparseIO(const CompletionCallback
& callback
) {
1456 return net::ERR_FAILED
;
1459 EntryImplV3::~EntryImplV3() {
1463 } // namespace disk_cache