1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/entry_impl.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/strings/string_util.h"
10 #include "net/base/io_buffer.h"
11 #include "net/base/net_errors.h"
12 #include "net/disk_cache/blockfile/backend_impl.h"
13 #include "net/disk_cache/blockfile/bitmap.h"
14 #include "net/disk_cache/blockfile/disk_format.h"
15 #include "net/disk_cache/blockfile/histogram_macros.h"
16 #include "net/disk_cache/blockfile/sparse_control.h"
17 #include "net/disk_cache/cache_util.h"
18 #include "net/disk_cache/net_log_parameters.h"
20 // Provide a BackendImpl object to macros from histogram_macros.h.
21 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
24 using base::TimeDelta
;
25 using base::TimeTicks
;
29 // Index for the file used to store the key, if any (files_[kKeyFileIndex]).
30 const int kKeyFileIndex
= 3;
32 // This class implements FileIOCallback to buffer the callback from a file IO
33 // operation from the actual net class.
34 class SyncCallback
: public disk_cache::FileIOCallback
{
36 // |end_event_type| is the event type to log on completion. Logs nothing on
37 // discard, or when the NetLog is not set to log all events.
38 SyncCallback(disk_cache::EntryImpl
* entry
, net::IOBuffer
* buffer
,
39 const net::CompletionCallback
& callback
,
40 net::NetLog::EventType end_event_type
)
41 : entry_(entry
), callback_(callback
), buf_(buffer
),
42 start_(TimeTicks::Now()), end_event_type_(end_event_type
) {
44 entry
->IncrementIoCount();
46 ~SyncCallback() override
{}
48 void OnFileIOComplete(int bytes_copied
) override
;
52 disk_cache::EntryImpl
* entry_
;
53 net::CompletionCallback callback_
;
54 scoped_refptr
<net::IOBuffer
> buf_
;
56 const net::NetLog::EventType end_event_type_
;
58 DISALLOW_COPY_AND_ASSIGN(SyncCallback
);
61 void SyncCallback::OnFileIOComplete(int bytes_copied
) {
62 entry_
->DecrementIoCount();
63 if (!callback_
.is_null()) {
64 if (entry_
->net_log().IsCapturing()) {
65 entry_
->net_log().EndEvent(
67 disk_cache::CreateNetLogReadWriteCompleteCallback(bytes_copied
));
69 entry_
->ReportIOTime(disk_cache::EntryImpl::kAsyncIO
, start_
);
70 buf_
= NULL
; // Release the buffer before invoking the callback.
71 callback_
.Run(bytes_copied
);
77 void SyncCallback::Discard() {
83 const int kMaxBufferSize
= 1024 * 1024; // 1 MB.
87 namespace disk_cache
{
89 // This class handles individual memory buffers that store data before it is
90 // sent to disk. The buffer can start at any offset, but if we try to write to
91 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
92 // zero. The buffer grows up to a size determined by the backend, to keep the
93 // total memory used under control.
94 class EntryImpl::UserBuffer
{
96 explicit UserBuffer(BackendImpl
* backend
)
97 : backend_(backend
->GetWeakPtr()), offset_(0), grow_allowed_(true) {
98 buffer_
.reserve(kMaxBlockSize
);
102 backend_
->BufferDeleted(capacity() - kMaxBlockSize
);
105 // Returns true if we can handle writing |len| bytes to |offset|.
106 bool PreWrite(int offset
, int len
);
108 // Truncates the buffer to |offset| bytes.
109 void Truncate(int offset
);
111 // Writes |len| bytes from |buf| at the given |offset|.
112 void Write(int offset
, IOBuffer
* buf
, int len
);
114 // Returns true if we can read |len| bytes from |offset|, given that the
115 // actual file has |eof| bytes stored. Note that the number of bytes to read
116 // may be modified by this method even though it returns false: that means we
117 // should do a smaller read from disk.
118 bool PreRead(int eof
, int offset
, int* len
);
120 // Read |len| bytes from |buf| at the given |offset|.
121 int Read(int offset
, IOBuffer
* buf
, int len
);
123 // Prepare this buffer for reuse.
126 char* Data() { return buffer_
.size() ? &buffer_
[0] : NULL
; }
127 int Size() { return static_cast<int>(buffer_
.size()); }
128 int Start() { return offset_
; }
129 int End() { return offset_
+ Size(); }
132 int capacity() { return static_cast<int>(buffer_
.capacity()); }
133 bool GrowBuffer(int required
, int limit
);
135 base::WeakPtr
<BackendImpl
> backend_
;
137 std::vector
<char> buffer_
;
139 DISALLOW_COPY_AND_ASSIGN(UserBuffer
);
142 bool EntryImpl::UserBuffer::PreWrite(int offset
, int len
) {
143 DCHECK_GE(offset
, 0);
145 DCHECK_GE(offset
+ len
, 0);
147 // We don't want to write before our current start.
148 if (offset
< offset_
)
151 // Lets get the common case out of the way.
152 if (offset
+ len
<= capacity())
155 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
156 // buffer offset_ at 0.
157 if (!Size() && offset
> kMaxBlockSize
)
158 return GrowBuffer(len
, kMaxBufferSize
);
160 int required
= offset
- offset_
+ len
;
161 return GrowBuffer(required
, kMaxBufferSize
* 6 / 5);
164 void EntryImpl::UserBuffer::Truncate(int offset
) {
165 DCHECK_GE(offset
, 0);
166 DCHECK_GE(offset
, offset_
);
167 DVLOG(3) << "Buffer truncate at " << offset
<< " current " << offset_
;
170 if (Size() >= offset
)
171 buffer_
.resize(offset
);
174 void EntryImpl::UserBuffer::Write(int offset
, IOBuffer
* buf
, int len
) {
175 DCHECK_GE(offset
, 0);
177 DCHECK_GE(offset
+ len
, 0);
178 DCHECK_GE(offset
, offset_
);
179 DVLOG(3) << "Buffer write at " << offset
<< " current " << offset_
;
181 if (!Size() && offset
> kMaxBlockSize
)
187 buffer_
.resize(offset
);
192 char* buffer
= buf
->data();
193 int valid_len
= Size() - offset
;
194 int copy_len
= std::min(valid_len
, len
);
196 memcpy(&buffer_
[offset
], buffer
, copy_len
);
203 buffer_
.insert(buffer_
.end(), buffer
, buffer
+ len
);
206 bool EntryImpl::UserBuffer::PreRead(int eof
, int offset
, int* len
) {
207 DCHECK_GE(offset
, 0);
210 if (offset
< offset_
) {
211 // We are reading before this buffer.
215 // If the read overlaps with the buffer, change its length so that there is
217 *len
= std::min(*len
, offset_
- offset
);
218 *len
= std::min(*len
, eof
- offset
);
220 // We should read from disk.
227 // See if we can fulfill the first part of the operation.
228 return (offset
- offset_
< Size());
231 int EntryImpl::UserBuffer::Read(int offset
, IOBuffer
* buf
, int len
) {
232 DCHECK_GE(offset
, 0);
234 DCHECK(Size() || offset
< offset_
);
237 if (offset
< offset_
) {
238 // We don't have a file so lets fill the first part with 0.
239 clean_bytes
= std::min(offset_
- offset
, len
);
240 memset(buf
->data(), 0, clean_bytes
);
241 if (len
== clean_bytes
)
247 int start
= offset
- offset_
;
248 int available
= Size() - start
;
250 DCHECK_GE(available
, 0);
251 len
= std::min(len
, available
);
252 memcpy(buf
->data() + clean_bytes
, &buffer_
[start
], len
);
253 return len
+ clean_bytes
;
256 void EntryImpl::UserBuffer::Reset() {
257 if (!grow_allowed_
) {
259 backend_
->BufferDeleted(capacity() - kMaxBlockSize
);
260 grow_allowed_
= true;
261 std::vector
<char> tmp
;
263 buffer_
.reserve(kMaxBlockSize
);
269 bool EntryImpl::UserBuffer::GrowBuffer(int required
, int limit
) {
270 DCHECK_GE(required
, 0);
271 int current_size
= capacity();
272 if (required
<= current_size
)
275 if (required
> limit
)
281 int to_add
= std::max(required
- current_size
, kMaxBlockSize
* 4);
282 to_add
= std::max(current_size
, to_add
);
283 required
= std::min(current_size
+ to_add
, limit
);
285 grow_allowed_
= backend_
->IsAllocAllowed(current_size
, required
);
289 DVLOG(3) << "Buffer grow to " << required
;
291 buffer_
.reserve(required
);
295 // ------------------------------------------------------------------------
297 EntryImpl::EntryImpl(BackendImpl
* backend
, Addr address
, bool read_only
)
298 : entry_(NULL
, Addr(0)), node_(NULL
, Addr(0)),
299 backend_(backend
->GetWeakPtr()), doomed_(false), read_only_(read_only
),
301 entry_
.LazyInit(backend
->File(address
), address
);
302 for (int i
= 0; i
< kNumStreams
; i
++) {
303 unreported_size_
[i
] = 0;
307 void EntryImpl::DoomImpl() {
308 if (doomed_
|| !backend_
.get())
311 SetPointerForInvalidEntry(backend_
->GetCurrentEntryId());
312 backend_
->InternalDoomEntry(this);
315 int EntryImpl::ReadDataImpl(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
316 const CompletionCallback
& callback
) {
317 if (net_log_
.IsCapturing()) {
319 net::NetLog::TYPE_ENTRY_READ_DATA
,
320 CreateNetLogReadWriteDataCallback(index
, offset
, buf_len
, false));
323 int result
= InternalReadData(index
, offset
, buf
, buf_len
, callback
);
325 if (result
!= net::ERR_IO_PENDING
&& net_log_
.IsCapturing()) {
327 net::NetLog::TYPE_ENTRY_READ_DATA
,
328 CreateNetLogReadWriteCompleteCallback(result
));
333 int EntryImpl::WriteDataImpl(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
334 const CompletionCallback
& callback
,
336 if (net_log_
.IsCapturing()) {
338 net::NetLog::TYPE_ENTRY_WRITE_DATA
,
339 CreateNetLogReadWriteDataCallback(index
, offset
, buf_len
, truncate
));
342 int result
= InternalWriteData(index
, offset
, buf
, buf_len
, callback
,
345 if (result
!= net::ERR_IO_PENDING
&& net_log_
.IsCapturing()) {
347 net::NetLog::TYPE_ENTRY_WRITE_DATA
,
348 CreateNetLogReadWriteCompleteCallback(result
));
353 int EntryImpl::ReadSparseDataImpl(int64 offset
, IOBuffer
* buf
, int buf_len
,
354 const CompletionCallback
& callback
) {
355 DCHECK(node_
.Data()->dirty
|| read_only_
);
356 int result
= InitSparseData();
357 if (net::OK
!= result
)
360 TimeTicks start
= TimeTicks::Now();
361 result
= sparse_
->StartIO(SparseControl::kReadOperation
, offset
, buf
, buf_len
,
363 ReportIOTime(kSparseRead
, start
);
367 int EntryImpl::WriteSparseDataImpl(int64 offset
, IOBuffer
* buf
, int buf_len
,
368 const CompletionCallback
& callback
) {
369 DCHECK(node_
.Data()->dirty
|| read_only_
);
370 int result
= InitSparseData();
371 if (net::OK
!= result
)
374 TimeTicks start
= TimeTicks::Now();
375 result
= sparse_
->StartIO(SparseControl::kWriteOperation
, offset
, buf
,
377 ReportIOTime(kSparseWrite
, start
);
381 int EntryImpl::GetAvailableRangeImpl(int64 offset
, int len
, int64
* start
) {
382 int result
= InitSparseData();
383 if (net::OK
!= result
)
386 return sparse_
->GetAvailableRange(offset
, len
, start
);
389 void EntryImpl::CancelSparseIOImpl() {
396 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback
& callback
) {
397 DCHECK(sparse_
.get());
398 return sparse_
->ReadyToUse(callback
);
401 uint32
EntryImpl::GetHash() {
402 return entry_
.Data()->hash
;
405 bool EntryImpl::CreateEntry(Addr node_address
, const std::string
& key
,
407 Trace("Create entry In");
408 EntryStore
* entry_store
= entry_
.Data();
409 RankingsNode
* node
= node_
.Data();
410 memset(entry_store
, 0, sizeof(EntryStore
) * entry_
.address().num_blocks());
411 memset(node
, 0, sizeof(RankingsNode
));
412 if (!node_
.LazyInit(backend_
->File(node_address
), node_address
))
415 entry_store
->rankings_node
= node_address
.value();
416 node
->contents
= entry_
.address().value();
418 entry_store
->hash
= hash
;
419 entry_store
->creation_time
= Time::Now().ToInternalValue();
420 entry_store
->key_len
= static_cast<int32
>(key
.size());
421 if (entry_store
->key_len
> kMaxInternalKeyLength
) {
423 if (!CreateBlock(entry_store
->key_len
+ 1, &address
))
426 entry_store
->long_key
= address
.value();
427 File
* key_file
= GetBackingFile(address
, kKeyFileIndex
);
431 if (address
.is_block_file())
432 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
434 if (!key_file
|| !key_file
->Write(key
.data(), key
.size(), offset
)) {
435 DeleteData(address
, kKeyFileIndex
);
439 if (address
.is_separate_file())
440 key_file
->SetLength(key
.size() + 1);
442 memcpy(entry_store
->key
, key
.data(), key
.size());
443 entry_store
->key
[key
.size()] = '\0';
445 backend_
->ModifyStorageSize(0, static_cast<int32
>(key
.size()));
446 CACHE_UMA(COUNTS
, "KeySize", 0, static_cast<int32
>(key
.size()));
447 node
->dirty
= backend_
->GetCurrentEntryId();
448 Log("Create Entry ");
452 bool EntryImpl::IsSameEntry(const std::string
& key
, uint32 hash
) {
453 if (entry_
.Data()->hash
!= hash
||
454 static_cast<size_t>(entry_
.Data()->key_len
) != key
.size())
457 return (key
.compare(GetKey()) == 0);
460 void EntryImpl::InternalDoom() {
461 net_log_
.AddEvent(net::NetLog::TYPE_ENTRY_DOOM
);
462 DCHECK(node_
.HasData());
463 if (!node_
.Data()->dirty
) {
464 node_
.Data()->dirty
= backend_
->GetCurrentEntryId();
470 void EntryImpl::DeleteEntryData(bool everything
) {
471 DCHECK(doomed_
|| !everything
);
473 if (GetEntryFlags() & PARENT_ENTRY
) {
474 // We have some child entries that must go away.
475 SparseControl::DeleteChildren(this);
479 CACHE_UMA(COUNTS
, "DeleteHeader", 0, GetDataSize(0));
481 CACHE_UMA(COUNTS
, "DeleteData", 0, GetDataSize(1));
482 for (int index
= 0; index
< kNumStreams
; index
++) {
483 Addr
address(entry_
.Data()->data_addr
[index
]);
484 if (address
.is_initialized()) {
485 backend_
->ModifyStorageSize(entry_
.Data()->data_size
[index
] -
486 unreported_size_
[index
], 0);
487 entry_
.Data()->data_addr
[index
] = 0;
488 entry_
.Data()->data_size
[index
] = 0;
490 DeleteData(address
, index
);
497 // Remove all traces of this entry.
498 backend_
->RemoveEntry(this);
500 // Note that at this point node_ and entry_ are just two blocks of data, and
501 // even if they reference each other, nobody should be referencing them.
503 Addr
address(entry_
.Data()->long_key
);
504 DeleteData(address
, kKeyFileIndex
);
505 backend_
->ModifyStorageSize(entry_
.Data()->key_len
, 0);
507 backend_
->DeleteBlock(entry_
.address(), true);
510 if (!LeaveRankingsBehind()) {
511 backend_
->DeleteBlock(node_
.address(), true);
516 CacheAddr
EntryImpl::GetNextAddress() {
517 return entry_
.Data()->next
;
520 void EntryImpl::SetNextAddress(Addr address
) {
521 DCHECK_NE(address
.value(), entry_
.address().value());
522 entry_
.Data()->next
= address
.value();
523 bool success
= entry_
.Store();
527 bool EntryImpl::LoadNodeAddress() {
528 Addr
address(entry_
.Data()->rankings_node
);
529 if (!node_
.LazyInit(backend_
->File(address
), address
))
534 bool EntryImpl::Update() {
535 DCHECK(node_
.HasData());
540 RankingsNode
* rankings
= node_
.Data();
541 if (!rankings
->dirty
) {
542 rankings
->dirty
= backend_
->GetCurrentEntryId();
549 void EntryImpl::SetDirtyFlag(int32 current_id
) {
550 DCHECK(node_
.HasData());
551 if (node_
.Data()->dirty
&& current_id
!= node_
.Data()->dirty
)
558 void EntryImpl::SetPointerForInvalidEntry(int32 new_id
) {
559 node_
.Data()->dirty
= new_id
;
563 bool EntryImpl::LeaveRankingsBehind() {
564 return !node_
.Data()->contents
;
567 // This only includes checks that relate to the first block of the entry (the
568 // first 256 bytes), and values that should be set from the entry creation.
569 // Basically, even if there is something wrong with this entry, we want to see
570 // if it is possible to load the rankings node and delete them together.
571 bool EntryImpl::SanityCheck() {
572 if (!entry_
.VerifyHash())
575 EntryStore
* stored
= entry_
.Data();
576 if (!stored
->rankings_node
|| stored
->key_len
<= 0)
579 if (stored
->reuse_count
< 0 || stored
->refetch_count
< 0)
582 Addr
rankings_addr(stored
->rankings_node
);
583 if (!rankings_addr
.SanityCheckForRankings())
586 Addr
next_addr(stored
->next
);
587 if (next_addr
.is_initialized() && !next_addr
.SanityCheckForEntryV2()) {
591 STRESS_DCHECK(next_addr
.value() != entry_
.address().value());
593 if (stored
->state
> ENTRY_DOOMED
|| stored
->state
< ENTRY_NORMAL
)
596 Addr
key_addr(stored
->long_key
);
597 if ((stored
->key_len
<= kMaxInternalKeyLength
&& key_addr
.is_initialized()) ||
598 (stored
->key_len
> kMaxInternalKeyLength
&& !key_addr
.is_initialized()))
601 if (!key_addr
.SanityCheckV2())
604 if (key_addr
.is_initialized() &&
605 ((stored
->key_len
< kMaxBlockSize
&& key_addr
.is_separate_file()) ||
606 (stored
->key_len
>= kMaxBlockSize
&& key_addr
.is_block_file())))
609 int num_blocks
= NumBlocksForEntry(stored
->key_len
);
610 if (entry_
.address().num_blocks() != num_blocks
)
616 bool EntryImpl::DataSanityCheck() {
617 EntryStore
* stored
= entry_
.Data();
618 Addr
key_addr(stored
->long_key
);
620 // The key must be NULL terminated.
621 if (!key_addr
.is_initialized() && stored
->key
[stored
->key_len
])
624 if (stored
->hash
!= base::Hash(GetKey()))
627 for (int i
= 0; i
< kNumStreams
; i
++) {
628 Addr
data_addr(stored
->data_addr
[i
]);
629 int data_size
= stored
->data_size
[i
];
632 if (!data_size
&& data_addr
.is_initialized())
634 if (!data_addr
.SanityCheckV2())
638 if (data_size
<= kMaxBlockSize
&& data_addr
.is_separate_file())
640 if (data_size
> kMaxBlockSize
&& data_addr
.is_block_file())
646 void EntryImpl::FixForDelete() {
647 EntryStore
* stored
= entry_
.Data();
648 Addr
key_addr(stored
->long_key
);
650 if (!key_addr
.is_initialized())
651 stored
->key
[stored
->key_len
] = '\0';
653 for (int i
= 0; i
< kNumStreams
; i
++) {
654 Addr
data_addr(stored
->data_addr
[i
]);
655 int data_size
= stored
->data_size
[i
];
656 if (data_addr
.is_initialized()) {
657 if ((data_size
<= kMaxBlockSize
&& data_addr
.is_separate_file()) ||
658 (data_size
> kMaxBlockSize
&& data_addr
.is_block_file()) ||
659 !data_addr
.SanityCheckV2()) {
661 // The address is weird so don't attempt to delete it.
662 stored
->data_addr
[i
] = 0;
663 // In general, trust the stored size as it should be in sync with the
664 // total size tracked by the backend.
668 stored
->data_size
[i
] = 0;
673 void EntryImpl::IncrementIoCount() {
674 backend_
->IncrementIoCount();
677 void EntryImpl::DecrementIoCount() {
679 backend_
->DecrementIoCount();
682 void EntryImpl::OnEntryCreated(BackendImpl
* backend
) {
683 // Just grab a reference to the backround queue.
684 background_queue_
= backend
->GetBackgroundQueue();
687 void EntryImpl::SetTimes(base::Time last_used
, base::Time last_modified
) {
688 node_
.Data()->last_used
= last_used
.ToInternalValue();
689 node_
.Data()->last_modified
= last_modified
.ToInternalValue();
690 node_
.set_modified();
693 void EntryImpl::ReportIOTime(Operation op
, const base::TimeTicks
& start
) {
699 CACHE_UMA(AGE_MS
, "ReadTime", 0, start
);
702 CACHE_UMA(AGE_MS
, "WriteTime", 0, start
);
705 CACHE_UMA(AGE_MS
, "SparseReadTime", 0, start
);
708 CACHE_UMA(AGE_MS
, "SparseWriteTime", 0, start
);
711 CACHE_UMA(AGE_MS
, "AsyncIOTime", 0, start
);
714 CACHE_UMA(AGE_MS
, "AsyncReadDispatchTime", 0, start
);
717 CACHE_UMA(AGE_MS
, "AsyncWriteDispatchTime", 0, start
);
724 void EntryImpl::BeginLogging(net::NetLog
* net_log
, bool created
) {
725 DCHECK(!net_log_
.net_log());
726 net_log_
= net::BoundNetLog::Make(
727 net_log
, net::NetLog::SOURCE_DISK_CACHE_ENTRY
);
729 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL
,
730 CreateNetLogEntryCreationCallback(this, created
));
733 const net::BoundNetLog
& EntryImpl::net_log() const {
738 int EntryImpl::NumBlocksForEntry(int key_size
) {
739 // The longest key that can be stored using one block.
741 static_cast<int>(sizeof(EntryStore
) - offsetof(EntryStore
, key
));
743 if (key_size
< key1_len
|| key_size
> kMaxInternalKeyLength
)
746 return ((key_size
- key1_len
) / 256 + 2);
749 // ------------------------------------------------------------------------
751 void EntryImpl::Doom() {
752 if (background_queue_
.get())
753 background_queue_
->DoomEntryImpl(this);
756 void EntryImpl::Close() {
757 if (background_queue_
.get())
758 background_queue_
->CloseEntryImpl(this);
761 std::string
EntryImpl::GetKey() const {
762 CacheEntryBlock
* entry
= const_cast<CacheEntryBlock
*>(&entry_
);
763 int key_len
= entry
->Data()->key_len
;
764 if (key_len
<= kMaxInternalKeyLength
)
765 return std::string(entry
->Data()->key
);
767 // We keep a copy of the key so that we can always return it, even if the
768 // backend is disabled.
772 Addr
address(entry
->Data()->long_key
);
773 DCHECK(address
.is_initialized());
775 if (address
.is_block_file())
776 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
778 static_assert(kNumStreams
== kKeyFileIndex
, "invalid key index");
779 File
* key_file
= const_cast<EntryImpl
*>(this)->GetBackingFile(address
,
782 return std::string();
784 ++key_len
; // We store a trailing \0 on disk that we read back below.
785 if (!offset
&& key_file
->GetLength() != static_cast<size_t>(key_len
))
786 return std::string();
788 if (!key_file
->Read(base::WriteInto(&key_
, key_len
), key_len
, offset
))
793 Time
EntryImpl::GetLastUsed() const {
794 CacheRankingsBlock
* node
= const_cast<CacheRankingsBlock
*>(&node_
);
795 return Time::FromInternalValue(node
->Data()->last_used
);
798 Time
EntryImpl::GetLastModified() const {
799 CacheRankingsBlock
* node
= const_cast<CacheRankingsBlock
*>(&node_
);
800 return Time::FromInternalValue(node
->Data()->last_modified
);
803 int32
EntryImpl::GetDataSize(int index
) const {
804 if (index
< 0 || index
>= kNumStreams
)
807 CacheEntryBlock
* entry
= const_cast<CacheEntryBlock
*>(&entry_
);
808 return entry
->Data()->data_size
[index
];
811 int EntryImpl::ReadData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
812 const CompletionCallback
& callback
) {
813 if (callback
.is_null())
814 return ReadDataImpl(index
, offset
, buf
, buf_len
, callback
);
816 DCHECK(node_
.Data()->dirty
|| read_only_
);
817 if (index
< 0 || index
>= kNumStreams
)
818 return net::ERR_INVALID_ARGUMENT
;
820 int entry_size
= entry_
.Data()->data_size
[index
];
821 if (offset
>= entry_size
|| offset
< 0 || !buf_len
)
825 return net::ERR_INVALID_ARGUMENT
;
827 if (!background_queue_
.get())
828 return net::ERR_UNEXPECTED
;
830 background_queue_
->ReadData(this, index
, offset
, buf
, buf_len
, callback
);
831 return net::ERR_IO_PENDING
;
834 int EntryImpl::WriteData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
835 const CompletionCallback
& callback
, bool truncate
) {
836 if (callback
.is_null())
837 return WriteDataImpl(index
, offset
, buf
, buf_len
, callback
, truncate
);
839 DCHECK(node_
.Data()->dirty
|| read_only_
);
840 if (index
< 0 || index
>= kNumStreams
)
841 return net::ERR_INVALID_ARGUMENT
;
843 if (offset
< 0 || buf_len
< 0)
844 return net::ERR_INVALID_ARGUMENT
;
846 if (!background_queue_
.get())
847 return net::ERR_UNEXPECTED
;
849 background_queue_
->WriteData(this, index
, offset
, buf
, buf_len
, truncate
,
851 return net::ERR_IO_PENDING
;
854 int EntryImpl::ReadSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
855 const CompletionCallback
& callback
) {
856 if (callback
.is_null())
857 return ReadSparseDataImpl(offset
, buf
, buf_len
, callback
);
859 if (!background_queue_
.get())
860 return net::ERR_UNEXPECTED
;
862 background_queue_
->ReadSparseData(this, offset
, buf
, buf_len
, callback
);
863 return net::ERR_IO_PENDING
;
866 int EntryImpl::WriteSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
867 const CompletionCallback
& callback
) {
868 if (callback
.is_null())
869 return WriteSparseDataImpl(offset
, buf
, buf_len
, callback
);
871 if (!background_queue_
.get())
872 return net::ERR_UNEXPECTED
;
874 background_queue_
->WriteSparseData(this, offset
, buf
, buf_len
, callback
);
875 return net::ERR_IO_PENDING
;
878 int EntryImpl::GetAvailableRange(int64 offset
, int len
, int64
* start
,
879 const CompletionCallback
& callback
) {
880 if (!background_queue_
.get())
881 return net::ERR_UNEXPECTED
;
883 background_queue_
->GetAvailableRange(this, offset
, len
, start
, callback
);
884 return net::ERR_IO_PENDING
;
887 bool EntryImpl::CouldBeSparse() const {
891 scoped_ptr
<SparseControl
> sparse
;
892 sparse
.reset(new SparseControl(const_cast<EntryImpl
*>(this)));
893 return sparse
->CouldBeSparse();
896 void EntryImpl::CancelSparseIO() {
897 if (background_queue_
.get())
898 background_queue_
->CancelSparseIO(this);
901 int EntryImpl::ReadyForSparseIO(const CompletionCallback
& callback
) {
905 if (!background_queue_
.get())
906 return net::ERR_UNEXPECTED
;
908 background_queue_
->ReadyForSparseIO(this, callback
);
909 return net::ERR_IO_PENDING
;
912 // When an entry is deleted from the cache, we clean up all the data associated
913 // with it for two reasons: to simplify the reuse of the block (we know that any
914 // unused block is filled with zeros), and to simplify the handling of write /
915 // read partial information from an entry (don't have to worry about returning
916 // data related to a previous cache entry because the range was not fully
918 EntryImpl::~EntryImpl() {
919 if (!backend_
.get()) {
920 entry_
.clear_modified();
921 node_
.clear_modified();
924 Log("~EntryImpl in");
926 // Save the sparse info to disk. This will generate IO for this entry and
927 // maybe for a child entry, so it is important to do it before deleting this
931 // Remove this entry from the list of open entries.
932 backend_
->OnEntryDestroyBegin(entry_
.address());
935 DeleteEntryData(true);
937 #if defined(NET_BUILD_STRESS_CACHE)
940 net_log_
.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE
);
942 for (int index
= 0; index
< kNumStreams
; index
++) {
943 if (user_buffers_
[index
].get()) {
944 ret
= Flush(index
, 0);
946 LOG(ERROR
) << "Failed to save user data";
948 if (unreported_size_
[index
]) {
949 backend_
->ModifyStorageSize(
950 entry_
.Data()->data_size
[index
] - unreported_size_
[index
],
951 entry_
.Data()->data_size
[index
]);
956 // There was a failure writing the actual data. Mark the entry as dirty.
957 int current_id
= backend_
->GetCurrentEntryId();
958 node_
.Data()->dirty
= current_id
== 1 ? -1 : current_id
- 1;
960 } else if (node_
.HasData() && !dirty_
&& node_
.Data()->dirty
) {
961 node_
.Data()->dirty
= 0;
966 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
967 net_log_
.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL
);
968 backend_
->OnEntryDestroyEnd();
971 // ------------------------------------------------------------------------
973 int EntryImpl::InternalReadData(int index
, int offset
,
974 IOBuffer
* buf
, int buf_len
,
975 const CompletionCallback
& callback
) {
976 DCHECK(node_
.Data()->dirty
|| read_only_
);
977 DVLOG(2) << "Read from " << index
<< " at " << offset
<< " : " << buf_len
;
978 if (index
< 0 || index
>= kNumStreams
)
979 return net::ERR_INVALID_ARGUMENT
;
981 int entry_size
= entry_
.Data()->data_size
[index
];
982 if (offset
>= entry_size
|| offset
< 0 || !buf_len
)
986 return net::ERR_INVALID_ARGUMENT
;
989 return net::ERR_UNEXPECTED
;
991 TimeTicks start
= TimeTicks::Now();
993 if (offset
+ buf_len
> entry_size
)
994 buf_len
= entry_size
- offset
;
998 backend_
->OnEvent(Stats::READ_DATA
);
999 backend_
->OnRead(buf_len
);
1001 Addr
address(entry_
.Data()->data_addr
[index
]);
1002 int eof
= address
.is_initialized() ? entry_size
: 0;
1003 if (user_buffers_
[index
].get() &&
1004 user_buffers_
[index
]->PreRead(eof
, offset
, &buf_len
)) {
1005 // Complete the operation locally.
1006 buf_len
= user_buffers_
[index
]->Read(offset
, buf
, buf_len
);
1007 ReportIOTime(kRead
, start
);
1011 address
.set_value(entry_
.Data()->data_addr
[index
]);
1012 DCHECK(address
.is_initialized());
1013 if (!address
.is_initialized()) {
1015 return net::ERR_FAILED
;
1018 File
* file
= GetBackingFile(address
, index
);
1021 LOG(ERROR
) << "No file for " << std::hex
<< address
.value();
1022 return net::ERR_FILE_NOT_FOUND
;
1025 size_t file_offset
= offset
;
1026 if (address
.is_block_file()) {
1027 DCHECK_LE(offset
+ buf_len
, kMaxBlockSize
);
1028 file_offset
+= address
.start_block() * address
.BlockSize() +
1032 SyncCallback
* io_callback
= NULL
;
1033 if (!callback
.is_null()) {
1034 io_callback
= new SyncCallback(this, buf
, callback
,
1035 net::NetLog::TYPE_ENTRY_READ_DATA
);
1038 TimeTicks start_async
= TimeTicks::Now();
1041 if (!file
->Read(buf
->data(), buf_len
, file_offset
, io_callback
, &completed
)) {
1043 io_callback
->Discard();
1045 return net::ERR_CACHE_READ_FAILURE
;
1048 if (io_callback
&& completed
)
1049 io_callback
->Discard();
1052 ReportIOTime(kReadAsync1
, start_async
);
1054 ReportIOTime(kRead
, start
);
1055 return (completed
|| callback
.is_null()) ? buf_len
: net::ERR_IO_PENDING
;
1058 int EntryImpl::InternalWriteData(int index
, int offset
,
1059 IOBuffer
* buf
, int buf_len
,
1060 const CompletionCallback
& callback
,
1062 DCHECK(node_
.Data()->dirty
|| read_only_
);
1063 DVLOG(2) << "Write to " << index
<< " at " << offset
<< " : " << buf_len
;
1064 if (index
< 0 || index
>= kNumStreams
)
1065 return net::ERR_INVALID_ARGUMENT
;
1067 if (offset
< 0 || buf_len
< 0)
1068 return net::ERR_INVALID_ARGUMENT
;
1070 if (!backend_
.get())
1071 return net::ERR_UNEXPECTED
;
1073 int max_file_size
= backend_
->MaxFileSize();
1075 // offset or buf_len could be negative numbers.
1076 if (offset
> max_file_size
|| buf_len
> max_file_size
||
1077 offset
+ buf_len
> max_file_size
) {
1078 int size
= offset
+ buf_len
;
1079 if (size
<= max_file_size
)
1081 backend_
->TooMuchStorageRequested(size
);
1082 return net::ERR_FAILED
;
1085 TimeTicks start
= TimeTicks::Now();
1087 // Read the size at this point (it may change inside prepare).
1088 int entry_size
= entry_
.Data()->data_size
[index
];
1089 bool extending
= entry_size
< offset
+ buf_len
;
1090 truncate
= truncate
&& entry_size
> offset
+ buf_len
;
1091 Trace("To PrepareTarget 0x%x", entry_
.address().value());
1092 if (!PrepareTarget(index
, offset
, buf_len
, truncate
))
1093 return net::ERR_FAILED
;
1095 Trace("From PrepareTarget 0x%x", entry_
.address().value());
1096 if (extending
|| truncate
)
1097 UpdateSize(index
, entry_size
, offset
+ buf_len
);
1101 backend_
->OnEvent(Stats::WRITE_DATA
);
1102 backend_
->OnWrite(buf_len
);
1104 if (user_buffers_
[index
].get()) {
1105 // Complete the operation locally.
1106 user_buffers_
[index
]->Write(offset
, buf
, buf_len
);
1107 ReportIOTime(kWrite
, start
);
1111 Addr
address(entry_
.Data()->data_addr
[index
]);
1112 if (offset
+ buf_len
== 0) {
1114 DCHECK(!address
.is_initialized());
1119 File
* file
= GetBackingFile(address
, index
);
1121 return net::ERR_FILE_NOT_FOUND
;
1123 size_t file_offset
= offset
;
1124 if (address
.is_block_file()) {
1125 DCHECK_LE(offset
+ buf_len
, kMaxBlockSize
);
1126 file_offset
+= address
.start_block() * address
.BlockSize() +
1128 } else if (truncate
|| (extending
&& !buf_len
)) {
1129 if (!file
->SetLength(offset
+ buf_len
))
1130 return net::ERR_FAILED
;
1136 SyncCallback
* io_callback
= NULL
;
1137 if (!callback
.is_null()) {
1138 io_callback
= new SyncCallback(this, buf
, callback
,
1139 net::NetLog::TYPE_ENTRY_WRITE_DATA
);
1142 TimeTicks start_async
= TimeTicks::Now();
1145 if (!file
->Write(buf
->data(), buf_len
, file_offset
, io_callback
,
1148 io_callback
->Discard();
1149 return net::ERR_CACHE_WRITE_FAILURE
;
1152 if (io_callback
&& completed
)
1153 io_callback
->Discard();
1156 ReportIOTime(kWriteAsync1
, start_async
);
1158 ReportIOTime(kWrite
, start
);
1159 return (completed
|| callback
.is_null()) ? buf_len
: net::ERR_IO_PENDING
;
1162 // ------------------------------------------------------------------------
1164 bool EntryImpl::CreateDataBlock(int index
, int size
) {
1165 DCHECK(index
>= 0 && index
< kNumStreams
);
1167 Addr
address(entry_
.Data()->data_addr
[index
]);
1168 if (!CreateBlock(size
, &address
))
1171 entry_
.Data()->data_addr
[index
] = address
.value();
1176 bool EntryImpl::CreateBlock(int size
, Addr
* address
) {
1177 DCHECK(!address
->is_initialized());
1178 if (!backend_
.get())
1181 FileType file_type
= Addr::RequiredFileType(size
);
1182 if (EXTERNAL
== file_type
) {
1183 if (size
> backend_
->MaxFileSize())
1185 if (!backend_
->CreateExternalFile(address
))
1188 int num_blocks
= Addr::RequiredBlocks(size
, file_type
);
1190 if (!backend_
->CreateBlock(file_type
, num_blocks
, address
))
1196 // Note that this method may end up modifying a block file so upon return the
1197 // involved block will be free, and could be reused for something else. If there
1198 // is a crash after that point (and maybe before returning to the caller), the
1199 // entry will be left dirty... and at some point it will be discarded; it is
1200 // important that the entry doesn't keep a reference to this address, or we'll
1201 // end up deleting the contents of |address| once again.
1202 void EntryImpl::DeleteData(Addr address
, int index
) {
1203 DCHECK(backend_
.get());
1204 if (!address
.is_initialized())
1206 if (address
.is_separate_file()) {
1207 int failure
= !DeleteCacheFile(backend_
->GetFileName(address
));
1208 CACHE_UMA(COUNTS
, "DeleteFailed", 0, failure
);
1210 LOG(ERROR
) << "Failed to delete " <<
1211 backend_
->GetFileName(address
).value() << " from the cache.";
1213 if (files_
[index
].get())
1214 files_
[index
] = NULL
; // Releases the object.
1216 backend_
->DeleteBlock(address
, true);
1220 void EntryImpl::UpdateRank(bool modified
) {
1221 if (!backend_
.get())
1225 // Everything is handled by the backend.
1226 backend_
->UpdateRank(this, modified
);
1230 Time current
= Time::Now();
1231 node_
.Data()->last_used
= current
.ToInternalValue();
1234 node_
.Data()->last_modified
= current
.ToInternalValue();
1237 File
* EntryImpl::GetBackingFile(Addr address
, int index
) {
1238 if (!backend_
.get())
1242 if (address
.is_separate_file())
1243 file
= GetExternalFile(address
, index
);
1245 file
= backend_
->File(address
);
1249 File
* EntryImpl::GetExternalFile(Addr address
, int index
) {
1250 DCHECK(index
>= 0 && index
<= kKeyFileIndex
);
1251 if (!files_
[index
].get()) {
1252 // For a key file, use mixed mode IO.
1253 scoped_refptr
<File
> file(new File(kKeyFileIndex
== index
));
1254 if (file
->Init(backend_
->GetFileName(address
)))
1255 files_
[index
].swap(file
);
1257 return files_
[index
].get();
1260 // We keep a memory buffer for everything that ends up stored on a block file
1261 // (because we don't know yet the final data size), and for some of the data
1262 // that end up on external files. This function will initialize that memory
1263 // buffer and / or the files needed to store the data.
1265 // In general, a buffer may overlap data already stored on disk, and in that
1266 // case, the contents of the buffer are the most accurate. It may also extend
1267 // the file, but we don't want to read from disk just to keep the buffer up to
1268 // date. This means that as soon as there is a chance to get confused about what
1269 // is the most recent version of some part of a file, we'll flush the buffer and
1270 // reuse it for the new data. Keep in mind that the normal use pattern is quite
1271 // simple (write sequentially from the beginning), so we optimize for handling
1273 bool EntryImpl::PrepareTarget(int index
, int offset
, int buf_len
,
1276 return HandleTruncation(index
, offset
, buf_len
);
1278 if (!offset
&& !buf_len
)
1281 Addr
address(entry_
.Data()->data_addr
[index
]);
1282 if (address
.is_initialized()) {
1283 if (address
.is_block_file() && !MoveToLocalBuffer(index
))
1286 if (!user_buffers_
[index
].get() && offset
< kMaxBlockSize
) {
1287 // We are about to create a buffer for the first 16KB, make sure that we
1288 // preserve existing data.
1289 if (!CopyToLocalBuffer(index
))
1294 if (!user_buffers_
[index
].get())
1295 user_buffers_
[index
].reset(new UserBuffer(backend_
.get()));
1297 return PrepareBuffer(index
, offset
, buf_len
);
1300 // We get to this function with some data already stored. If there is a
1301 // truncation that results on data stored internally, we'll explicitly
1302 // handle the case here.
1303 bool EntryImpl::HandleTruncation(int index
, int offset
, int buf_len
) {
1304 Addr
address(entry_
.Data()->data_addr
[index
]);
1306 int current_size
= entry_
.Data()->data_size
[index
];
1307 int new_size
= offset
+ buf_len
;
1310 // This is by far the most common scenario.
1311 backend_
->ModifyStorageSize(current_size
- unreported_size_
[index
], 0);
1312 entry_
.Data()->data_addr
[index
] = 0;
1313 entry_
.Data()->data_size
[index
] = 0;
1314 unreported_size_
[index
] = 0;
1316 DeleteData(address
, index
);
1318 user_buffers_
[index
].reset();
1322 // We never postpone truncating a file, if there is one, but we may postpone
1323 // telling the backend about the size reduction.
1324 if (user_buffers_
[index
].get()) {
1325 DCHECK_GE(current_size
, user_buffers_
[index
]->Start());
1326 if (!address
.is_initialized()) {
1327 // There is no overlap between the buffer and disk.
1328 if (new_size
> user_buffers_
[index
]->Start()) {
1329 // Just truncate our buffer.
1330 DCHECK_LT(new_size
, user_buffers_
[index
]->End());
1331 user_buffers_
[index
]->Truncate(new_size
);
1335 // Just discard our buffer.
1336 user_buffers_
[index
]->Reset();
1337 return PrepareBuffer(index
, offset
, buf_len
);
1340 // There is some overlap or we need to extend the file before the
1342 if (offset
> user_buffers_
[index
]->Start())
1343 user_buffers_
[index
]->Truncate(new_size
);
1344 UpdateSize(index
, current_size
, new_size
);
1345 if (!Flush(index
, 0))
1347 user_buffers_
[index
].reset();
1350 // We have data somewhere, and it is not in a buffer.
1351 DCHECK(!user_buffers_
[index
].get());
1352 DCHECK(address
.is_initialized());
1354 if (new_size
> kMaxBlockSize
)
1355 return true; // Let the operation go directly to disk.
1357 return ImportSeparateFile(index
, offset
+ buf_len
);
1360 bool EntryImpl::CopyToLocalBuffer(int index
) {
1361 Addr
address(entry_
.Data()->data_addr
[index
]);
1362 DCHECK(!user_buffers_
[index
].get());
1363 DCHECK(address
.is_initialized());
1365 int len
= std::min(entry_
.Data()->data_size
[index
], kMaxBlockSize
);
1366 user_buffers_
[index
].reset(new UserBuffer(backend_
.get()));
1367 user_buffers_
[index
]->Write(len
, NULL
, 0);
1369 File
* file
= GetBackingFile(address
, index
);
1372 if (address
.is_block_file())
1373 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
1376 !file
->Read(user_buffers_
[index
]->Data(), len
, offset
, NULL
, NULL
)) {
1377 user_buffers_
[index
].reset();
1383 bool EntryImpl::MoveToLocalBuffer(int index
) {
1384 if (!CopyToLocalBuffer(index
))
1387 Addr
address(entry_
.Data()->data_addr
[index
]);
1388 entry_
.Data()->data_addr
[index
] = 0;
1390 DeleteData(address
, index
);
1392 // If we lose this entry we'll see it as zero sized.
1393 int len
= entry_
.Data()->data_size
[index
];
1394 backend_
->ModifyStorageSize(len
- unreported_size_
[index
], 0);
1395 unreported_size_
[index
] = len
;
1399 bool EntryImpl::ImportSeparateFile(int index
, int new_size
) {
1400 if (entry_
.Data()->data_size
[index
] > new_size
)
1401 UpdateSize(index
, entry_
.Data()->data_size
[index
], new_size
);
1403 return MoveToLocalBuffer(index
);
1406 bool EntryImpl::PrepareBuffer(int index
, int offset
, int buf_len
) {
1407 DCHECK(user_buffers_
[index
].get());
1408 if ((user_buffers_
[index
]->End() && offset
> user_buffers_
[index
]->End()) ||
1409 offset
> entry_
.Data()->data_size
[index
]) {
1410 // We are about to extend the buffer or the file (with zeros), so make sure
1411 // that we are not overwriting anything.
1412 Addr
address(entry_
.Data()->data_addr
[index
]);
1413 if (address
.is_initialized() && address
.is_separate_file()) {
1414 if (!Flush(index
, 0))
1416 // There is an actual file already, and we don't want to keep track of
1417 // its length so we let this operation go straight to disk.
1418 // The only case when a buffer is allowed to extend the file (as in fill
1419 // with zeros before the start) is when there is no file yet to extend.
1420 user_buffers_
[index
].reset();
1425 if (!user_buffers_
[index
]->PreWrite(offset
, buf_len
)) {
1426 if (!Flush(index
, offset
+ buf_len
))
1430 if (offset
> user_buffers_
[index
]->End() ||
1431 !user_buffers_
[index
]->PreWrite(offset
, buf_len
)) {
1432 // We cannot complete the operation with a buffer.
1433 DCHECK(!user_buffers_
[index
]->Size());
1434 DCHECK(!user_buffers_
[index
]->Start());
1435 user_buffers_
[index
].reset();
1441 bool EntryImpl::Flush(int index
, int min_len
) {
1442 Addr
address(entry_
.Data()->data_addr
[index
]);
1443 DCHECK(user_buffers_
[index
].get());
1444 DCHECK(!address
.is_initialized() || address
.is_separate_file());
1445 DVLOG(3) << "Flush";
1447 int size
= std::max(entry_
.Data()->data_size
[index
], min_len
);
1448 if (size
&& !address
.is_initialized() && !CreateDataBlock(index
, size
))
1451 if (!entry_
.Data()->data_size
[index
]) {
1452 DCHECK(!user_buffers_
[index
]->Size());
1456 address
.set_value(entry_
.Data()->data_addr
[index
]);
1458 int len
= user_buffers_
[index
]->Size();
1459 int offset
= user_buffers_
[index
]->Start();
1460 if (!len
&& !offset
)
1463 if (address
.is_block_file()) {
1464 DCHECK_EQ(len
, entry_
.Data()->data_size
[index
]);
1466 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
1469 File
* file
= GetBackingFile(address
, index
);
1473 if (!file
->Write(user_buffers_
[index
]->Data(), len
, offset
, NULL
, NULL
))
1475 user_buffers_
[index
]->Reset();
1480 void EntryImpl::UpdateSize(int index
, int old_size
, int new_size
) {
1481 if (entry_
.Data()->data_size
[index
] == new_size
)
1484 unreported_size_
[index
] += new_size
- old_size
;
1485 entry_
.Data()->data_size
[index
] = new_size
;
1486 entry_
.set_modified();
1489 int EntryImpl::InitSparseData() {
1493 // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1494 scoped_ptr
<SparseControl
> sparse(new SparseControl(this));
1495 int result
= sparse
->Init();
1496 if (net::OK
== result
)
1497 sparse_
.swap(sparse
);
1502 void EntryImpl::SetEntryFlags(uint32 flags
) {
1503 entry_
.Data()->flags
|= flags
;
1504 entry_
.set_modified();
1507 uint32
EntryImpl::GetEntryFlags() {
1508 return entry_
.Data()->flags
;
1511 void EntryImpl::GetData(int index
, char** buffer
, Addr
* address
) {
1512 DCHECK(backend_
.get());
1513 if (user_buffers_
[index
].get() && user_buffers_
[index
]->Size() &&
1514 !user_buffers_
[index
]->Start()) {
1515 // The data is already in memory, just copy it and we're done.
1516 int data_len
= entry_
.Data()->data_size
[index
];
1517 if (data_len
<= user_buffers_
[index
]->Size()) {
1518 DCHECK(!user_buffers_
[index
]->Start());
1519 *buffer
= new char[data_len
];
1520 memcpy(*buffer
, user_buffers_
[index
]->Data(), data_len
);
1525 // Bad news: we'd have to read the info from disk so instead we'll just tell
1526 // the caller where to read from.
1528 address
->set_value(entry_
.Data()->data_addr
[index
]);
1529 if (address
->is_initialized()) {
1530 // Prevent us from deleting the block from the backing store.
1531 backend_
->ModifyStorageSize(entry_
.Data()->data_size
[index
] -
1532 unreported_size_
[index
], 0);
1533 entry_
.Data()->data_addr
[index
] = 0;
1534 entry_
.Data()->data_size
[index
] = 0;
1538 void EntryImpl::Log(const char* msg
) {
1540 if (node_
.HasData()) {
1541 dirty
= node_
.Data()->dirty
;
1544 Trace("%s 0x%p 0x%x 0x%x", msg
, reinterpret_cast<void*>(this),
1545 entry_
.address().value(), node_
.address().value());
1547 Trace(" data: 0x%x 0x%x 0x%x", entry_
.Data()->data_addr
[0],
1548 entry_
.Data()->data_addr
[1], entry_
.Data()->long_key
);
1550 Trace(" doomed: %d 0x%x", doomed_
, dirty
);
1553 } // namespace disk_cache