1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/entry_impl.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/metrics/histogram.h"
10 #include "base/strings/string_util.h"
11 #include "net/base/io_buffer.h"
12 #include "net/base/net_errors.h"
13 #include "net/disk_cache/blockfile/backend_impl.h"
14 #include "net/disk_cache/blockfile/bitmap.h"
15 #include "net/disk_cache/blockfile/disk_format.h"
16 #include "net/disk_cache/blockfile/histogram_macros.h"
17 #include "net/disk_cache/blockfile/sparse_control.h"
18 #include "net/disk_cache/cache_util.h"
19 #include "net/disk_cache/net_log_parameters.h"
21 // Provide a BackendImpl object to macros from histogram_macros.h.
22 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
25 using base::TimeDelta
;
26 using base::TimeTicks
;
30 // Index for the file used to store the key, if any (files_[kKeyFileIndex]).
31 const int kKeyFileIndex
= 3;
33 // This class implements FileIOCallback to buffer the callback from a file IO
34 // operation from the actual net class.
35 class SyncCallback
: public disk_cache::FileIOCallback
{
37 // |end_event_type| is the event type to log on completion. Logs nothing on
38 // discard, or when the NetLog is not set to log all events.
39 SyncCallback(disk_cache::EntryImpl
* entry
, net::IOBuffer
* buffer
,
40 const net::CompletionCallback
& callback
,
41 net::NetLog::EventType end_event_type
)
42 : entry_(entry
), callback_(callback
), buf_(buffer
),
43 start_(TimeTicks::Now()), end_event_type_(end_event_type
) {
45 entry
->IncrementIoCount();
47 ~SyncCallback() override
{}
49 void OnFileIOComplete(int bytes_copied
) override
;
53 disk_cache::EntryImpl
* entry_
;
54 net::CompletionCallback callback_
;
55 scoped_refptr
<net::IOBuffer
> buf_
;
57 const net::NetLog::EventType end_event_type_
;
59 DISALLOW_COPY_AND_ASSIGN(SyncCallback
);
62 void SyncCallback::OnFileIOComplete(int bytes_copied
) {
63 entry_
->DecrementIoCount();
64 if (!callback_
.is_null()) {
65 if (entry_
->net_log().IsLogging()) {
66 entry_
->net_log().EndEvent(
68 disk_cache::CreateNetLogReadWriteCompleteCallback(bytes_copied
));
70 entry_
->ReportIOTime(disk_cache::EntryImpl::kAsyncIO
, start_
);
71 buf_
= NULL
; // Release the buffer before invoking the callback.
72 callback_
.Run(bytes_copied
);
78 void SyncCallback::Discard() {
84 const int kMaxBufferSize
= 1024 * 1024; // 1 MB.
88 namespace disk_cache
{
90 // This class handles individual memory buffers that store data before it is
91 // sent to disk. The buffer can start at any offset, but if we try to write to
92 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
93 // zero. The buffer grows up to a size determined by the backend, to keep the
94 // total memory used under control.
95 class EntryImpl::UserBuffer
{
97 explicit UserBuffer(BackendImpl
* backend
)
98 : backend_(backend
->GetWeakPtr()), offset_(0), grow_allowed_(true) {
99 buffer_
.reserve(kMaxBlockSize
);
103 backend_
->BufferDeleted(capacity() - kMaxBlockSize
);
106 // Returns true if we can handle writing |len| bytes to |offset|.
107 bool PreWrite(int offset
, int len
);
109 // Truncates the buffer to |offset| bytes.
110 void Truncate(int offset
);
112 // Writes |len| bytes from |buf| at the given |offset|.
113 void Write(int offset
, IOBuffer
* buf
, int len
);
115 // Returns true if we can read |len| bytes from |offset|, given that the
116 // actual file has |eof| bytes stored. Note that the number of bytes to read
117 // may be modified by this method even though it returns false: that means we
118 // should do a smaller read from disk.
119 bool PreRead(int eof
, int offset
, int* len
);
121 // Read |len| bytes from |buf| at the given |offset|.
122 int Read(int offset
, IOBuffer
* buf
, int len
);
124 // Prepare this buffer for reuse.
127 char* Data() { return buffer_
.size() ? &buffer_
[0] : NULL
; }
128 int Size() { return static_cast<int>(buffer_
.size()); }
129 int Start() { return offset_
; }
130 int End() { return offset_
+ Size(); }
133 int capacity() { return static_cast<int>(buffer_
.capacity()); }
134 bool GrowBuffer(int required
, int limit
);
136 base::WeakPtr
<BackendImpl
> backend_
;
138 std::vector
<char> buffer_
;
140 DISALLOW_COPY_AND_ASSIGN(UserBuffer
);
143 bool EntryImpl::UserBuffer::PreWrite(int offset
, int len
) {
144 DCHECK_GE(offset
, 0);
146 DCHECK_GE(offset
+ len
, 0);
148 // We don't want to write before our current start.
149 if (offset
< offset_
)
152 // Lets get the common case out of the way.
153 if (offset
+ len
<= capacity())
156 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
157 // buffer offset_ at 0.
158 if (!Size() && offset
> kMaxBlockSize
)
159 return GrowBuffer(len
, kMaxBufferSize
);
161 int required
= offset
- offset_
+ len
;
162 return GrowBuffer(required
, kMaxBufferSize
* 6 / 5);
165 void EntryImpl::UserBuffer::Truncate(int offset
) {
166 DCHECK_GE(offset
, 0);
167 DCHECK_GE(offset
, offset_
);
168 DVLOG(3) << "Buffer truncate at " << offset
<< " current " << offset_
;
171 if (Size() >= offset
)
172 buffer_
.resize(offset
);
175 void EntryImpl::UserBuffer::Write(int offset
, IOBuffer
* buf
, int len
) {
176 DCHECK_GE(offset
, 0);
178 DCHECK_GE(offset
+ len
, 0);
179 DCHECK_GE(offset
, offset_
);
180 DVLOG(3) << "Buffer write at " << offset
<< " current " << offset_
;
182 if (!Size() && offset
> kMaxBlockSize
)
188 buffer_
.resize(offset
);
193 char* buffer
= buf
->data();
194 int valid_len
= Size() - offset
;
195 int copy_len
= std::min(valid_len
, len
);
197 memcpy(&buffer_
[offset
], buffer
, copy_len
);
204 buffer_
.insert(buffer_
.end(), buffer
, buffer
+ len
);
207 bool EntryImpl::UserBuffer::PreRead(int eof
, int offset
, int* len
) {
208 DCHECK_GE(offset
, 0);
211 if (offset
< offset_
) {
212 // We are reading before this buffer.
216 // If the read overlaps with the buffer, change its length so that there is
218 *len
= std::min(*len
, offset_
- offset
);
219 *len
= std::min(*len
, eof
- offset
);
221 // We should read from disk.
228 // See if we can fulfill the first part of the operation.
229 return (offset
- offset_
< Size());
232 int EntryImpl::UserBuffer::Read(int offset
, IOBuffer
* buf
, int len
) {
233 DCHECK_GE(offset
, 0);
235 DCHECK(Size() || offset
< offset_
);
238 if (offset
< offset_
) {
239 // We don't have a file so lets fill the first part with 0.
240 clean_bytes
= std::min(offset_
- offset
, len
);
241 memset(buf
->data(), 0, clean_bytes
);
242 if (len
== clean_bytes
)
248 int start
= offset
- offset_
;
249 int available
= Size() - start
;
251 DCHECK_GE(available
, 0);
252 len
= std::min(len
, available
);
253 memcpy(buf
->data() + clean_bytes
, &buffer_
[start
], len
);
254 return len
+ clean_bytes
;
257 void EntryImpl::UserBuffer::Reset() {
258 if (!grow_allowed_
) {
260 backend_
->BufferDeleted(capacity() - kMaxBlockSize
);
261 grow_allowed_
= true;
262 std::vector
<char> tmp
;
264 buffer_
.reserve(kMaxBlockSize
);
270 bool EntryImpl::UserBuffer::GrowBuffer(int required
, int limit
) {
271 DCHECK_GE(required
, 0);
272 int current_size
= capacity();
273 if (required
<= current_size
)
276 if (required
> limit
)
282 int to_add
= std::max(required
- current_size
, kMaxBlockSize
* 4);
283 to_add
= std::max(current_size
, to_add
);
284 required
= std::min(current_size
+ to_add
, limit
);
286 grow_allowed_
= backend_
->IsAllocAllowed(current_size
, required
);
290 DVLOG(3) << "Buffer grow to " << required
;
292 buffer_
.reserve(required
);
296 // ------------------------------------------------------------------------
298 EntryImpl::EntryImpl(BackendImpl
* backend
, Addr address
, bool read_only
)
299 : entry_(NULL
, Addr(0)), node_(NULL
, Addr(0)),
300 backend_(backend
->GetWeakPtr()), doomed_(false), read_only_(read_only
),
302 entry_
.LazyInit(backend
->File(address
), address
);
303 for (int i
= 0; i
< kNumStreams
; i
++) {
304 unreported_size_
[i
] = 0;
308 void EntryImpl::DoomImpl() {
309 if (doomed_
|| !backend_
.get())
312 SetPointerForInvalidEntry(backend_
->GetCurrentEntryId());
313 backend_
->InternalDoomEntry(this);
316 int EntryImpl::ReadDataImpl(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
317 const CompletionCallback
& callback
) {
318 if (net_log_
.IsLogging()) {
320 net::NetLog::TYPE_ENTRY_READ_DATA
,
321 CreateNetLogReadWriteDataCallback(index
, offset
, buf_len
, false));
324 int result
= InternalReadData(index
, offset
, buf
, buf_len
, callback
);
326 if (result
!= net::ERR_IO_PENDING
&& net_log_
.IsLogging()) {
328 net::NetLog::TYPE_ENTRY_READ_DATA
,
329 CreateNetLogReadWriteCompleteCallback(result
));
334 int EntryImpl::WriteDataImpl(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
335 const CompletionCallback
& callback
,
337 if (net_log_
.IsLogging()) {
339 net::NetLog::TYPE_ENTRY_WRITE_DATA
,
340 CreateNetLogReadWriteDataCallback(index
, offset
, buf_len
, truncate
));
343 int result
= InternalWriteData(index
, offset
, buf
, buf_len
, callback
,
346 if (result
!= net::ERR_IO_PENDING
&& net_log_
.IsLogging()) {
348 net::NetLog::TYPE_ENTRY_WRITE_DATA
,
349 CreateNetLogReadWriteCompleteCallback(result
));
354 int EntryImpl::ReadSparseDataImpl(int64 offset
, IOBuffer
* buf
, int buf_len
,
355 const CompletionCallback
& callback
) {
356 DCHECK(node_
.Data()->dirty
|| read_only_
);
357 int result
= InitSparseData();
358 if (net::OK
!= result
)
361 TimeTicks start
= TimeTicks::Now();
362 result
= sparse_
->StartIO(SparseControl::kReadOperation
, offset
, buf
, buf_len
,
364 ReportIOTime(kSparseRead
, start
);
368 int EntryImpl::WriteSparseDataImpl(int64 offset
, IOBuffer
* buf
, int buf_len
,
369 const CompletionCallback
& callback
) {
370 DCHECK(node_
.Data()->dirty
|| read_only_
);
371 int result
= InitSparseData();
372 if (net::OK
!= result
)
375 TimeTicks start
= TimeTicks::Now();
376 result
= sparse_
->StartIO(SparseControl::kWriteOperation
, offset
, buf
,
378 ReportIOTime(kSparseWrite
, start
);
382 int EntryImpl::GetAvailableRangeImpl(int64 offset
, int len
, int64
* start
) {
383 int result
= InitSparseData();
384 if (net::OK
!= result
)
387 return sparse_
->GetAvailableRange(offset
, len
, start
);
390 void EntryImpl::CancelSparseIOImpl() {
397 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback
& callback
) {
398 DCHECK(sparse_
.get());
399 return sparse_
->ReadyToUse(callback
);
402 uint32
EntryImpl::GetHash() {
403 return entry_
.Data()->hash
;
406 bool EntryImpl::CreateEntry(Addr node_address
, const std::string
& key
,
408 Trace("Create entry In");
409 EntryStore
* entry_store
= entry_
.Data();
410 RankingsNode
* node
= node_
.Data();
411 memset(entry_store
, 0, sizeof(EntryStore
) * entry_
.address().num_blocks());
412 memset(node
, 0, sizeof(RankingsNode
));
413 if (!node_
.LazyInit(backend_
->File(node_address
), node_address
))
416 entry_store
->rankings_node
= node_address
.value();
417 node
->contents
= entry_
.address().value();
419 entry_store
->hash
= hash
;
420 entry_store
->creation_time
= Time::Now().ToInternalValue();
421 entry_store
->key_len
= static_cast<int32
>(key
.size());
422 if (entry_store
->key_len
> kMaxInternalKeyLength
) {
424 if (!CreateBlock(entry_store
->key_len
+ 1, &address
))
427 entry_store
->long_key
= address
.value();
428 File
* key_file
= GetBackingFile(address
, kKeyFileIndex
);
432 if (address
.is_block_file())
433 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
435 if (!key_file
|| !key_file
->Write(key
.data(), key
.size(), offset
)) {
436 DeleteData(address
, kKeyFileIndex
);
440 if (address
.is_separate_file())
441 key_file
->SetLength(key
.size() + 1);
443 memcpy(entry_store
->key
, key
.data(), key
.size());
444 entry_store
->key
[key
.size()] = '\0';
446 backend_
->ModifyStorageSize(0, static_cast<int32
>(key
.size()));
447 CACHE_UMA(COUNTS
, "KeySize", 0, static_cast<int32
>(key
.size()));
448 node
->dirty
= backend_
->GetCurrentEntryId();
449 Log("Create Entry ");
453 bool EntryImpl::IsSameEntry(const std::string
& key
, uint32 hash
) {
454 if (entry_
.Data()->hash
!= hash
||
455 static_cast<size_t>(entry_
.Data()->key_len
) != key
.size())
458 return (key
.compare(GetKey()) == 0);
461 void EntryImpl::InternalDoom() {
462 net_log_
.AddEvent(net::NetLog::TYPE_ENTRY_DOOM
);
463 DCHECK(node_
.HasData());
464 if (!node_
.Data()->dirty
) {
465 node_
.Data()->dirty
= backend_
->GetCurrentEntryId();
471 void EntryImpl::DeleteEntryData(bool everything
) {
472 DCHECK(doomed_
|| !everything
);
474 if (GetEntryFlags() & PARENT_ENTRY
) {
475 // We have some child entries that must go away.
476 SparseControl::DeleteChildren(this);
480 CACHE_UMA(COUNTS
, "DeleteHeader", 0, GetDataSize(0));
482 CACHE_UMA(COUNTS
, "DeleteData", 0, GetDataSize(1));
483 for (int index
= 0; index
< kNumStreams
; index
++) {
484 Addr
address(entry_
.Data()->data_addr
[index
]);
485 if (address
.is_initialized()) {
486 backend_
->ModifyStorageSize(entry_
.Data()->data_size
[index
] -
487 unreported_size_
[index
], 0);
488 entry_
.Data()->data_addr
[index
] = 0;
489 entry_
.Data()->data_size
[index
] = 0;
491 DeleteData(address
, index
);
498 // Remove all traces of this entry.
499 backend_
->RemoveEntry(this);
501 // Note that at this point node_ and entry_ are just two blocks of data, and
502 // even if they reference each other, nobody should be referencing them.
504 Addr
address(entry_
.Data()->long_key
);
505 DeleteData(address
, kKeyFileIndex
);
506 backend_
->ModifyStorageSize(entry_
.Data()->key_len
, 0);
508 backend_
->DeleteBlock(entry_
.address(), true);
511 if (!LeaveRankingsBehind()) {
512 backend_
->DeleteBlock(node_
.address(), true);
517 CacheAddr
EntryImpl::GetNextAddress() {
518 return entry_
.Data()->next
;
521 void EntryImpl::SetNextAddress(Addr address
) {
522 DCHECK_NE(address
.value(), entry_
.address().value());
523 entry_
.Data()->next
= address
.value();
524 bool success
= entry_
.Store();
528 bool EntryImpl::LoadNodeAddress() {
529 Addr
address(entry_
.Data()->rankings_node
);
530 if (!node_
.LazyInit(backend_
->File(address
), address
))
535 bool EntryImpl::Update() {
536 DCHECK(node_
.HasData());
541 RankingsNode
* rankings
= node_
.Data();
542 if (!rankings
->dirty
) {
543 rankings
->dirty
= backend_
->GetCurrentEntryId();
550 void EntryImpl::SetDirtyFlag(int32 current_id
) {
551 DCHECK(node_
.HasData());
552 if (node_
.Data()->dirty
&& current_id
!= node_
.Data()->dirty
)
559 void EntryImpl::SetPointerForInvalidEntry(int32 new_id
) {
560 node_
.Data()->dirty
= new_id
;
564 bool EntryImpl::LeaveRankingsBehind() {
565 return !node_
.Data()->contents
;
568 // This only includes checks that relate to the first block of the entry (the
569 // first 256 bytes), and values that should be set from the entry creation.
570 // Basically, even if there is something wrong with this entry, we want to see
571 // if it is possible to load the rankings node and delete them together.
572 bool EntryImpl::SanityCheck() {
573 if (!entry_
.VerifyHash())
576 EntryStore
* stored
= entry_
.Data();
577 if (!stored
->rankings_node
|| stored
->key_len
<= 0)
580 if (stored
->reuse_count
< 0 || stored
->refetch_count
< 0)
583 Addr
rankings_addr(stored
->rankings_node
);
584 if (!rankings_addr
.SanityCheckForRankings())
587 Addr
next_addr(stored
->next
);
588 if (next_addr
.is_initialized() && !next_addr
.SanityCheckForEntryV2()) {
592 STRESS_DCHECK(next_addr
.value() != entry_
.address().value());
594 if (stored
->state
> ENTRY_DOOMED
|| stored
->state
< ENTRY_NORMAL
)
597 Addr
key_addr(stored
->long_key
);
598 if ((stored
->key_len
<= kMaxInternalKeyLength
&& key_addr
.is_initialized()) ||
599 (stored
->key_len
> kMaxInternalKeyLength
&& !key_addr
.is_initialized()))
602 if (!key_addr
.SanityCheckV2())
605 if (key_addr
.is_initialized() &&
606 ((stored
->key_len
< kMaxBlockSize
&& key_addr
.is_separate_file()) ||
607 (stored
->key_len
>= kMaxBlockSize
&& key_addr
.is_block_file())))
610 int num_blocks
= NumBlocksForEntry(stored
->key_len
);
611 if (entry_
.address().num_blocks() != num_blocks
)
617 bool EntryImpl::DataSanityCheck() {
618 EntryStore
* stored
= entry_
.Data();
619 Addr
key_addr(stored
->long_key
);
621 // The key must be NULL terminated.
622 if (!key_addr
.is_initialized() && stored
->key
[stored
->key_len
])
625 if (stored
->hash
!= base::Hash(GetKey()))
628 for (int i
= 0; i
< kNumStreams
; i
++) {
629 Addr
data_addr(stored
->data_addr
[i
]);
630 int data_size
= stored
->data_size
[i
];
633 if (!data_size
&& data_addr
.is_initialized())
635 if (!data_addr
.SanityCheckV2())
639 if (data_size
<= kMaxBlockSize
&& data_addr
.is_separate_file())
641 if (data_size
> kMaxBlockSize
&& data_addr
.is_block_file())
647 void EntryImpl::FixForDelete() {
648 EntryStore
* stored
= entry_
.Data();
649 Addr
key_addr(stored
->long_key
);
651 if (!key_addr
.is_initialized())
652 stored
->key
[stored
->key_len
] = '\0';
654 for (int i
= 0; i
< kNumStreams
; i
++) {
655 Addr
data_addr(stored
->data_addr
[i
]);
656 int data_size
= stored
->data_size
[i
];
657 if (data_addr
.is_initialized()) {
658 if ((data_size
<= kMaxBlockSize
&& data_addr
.is_separate_file()) ||
659 (data_size
> kMaxBlockSize
&& data_addr
.is_block_file()) ||
660 !data_addr
.SanityCheckV2()) {
662 // The address is weird so don't attempt to delete it.
663 stored
->data_addr
[i
] = 0;
664 // In general, trust the stored size as it should be in sync with the
665 // total size tracked by the backend.
669 stored
->data_size
[i
] = 0;
674 void EntryImpl::IncrementIoCount() {
675 backend_
->IncrementIoCount();
678 void EntryImpl::DecrementIoCount() {
680 backend_
->DecrementIoCount();
683 void EntryImpl::OnEntryCreated(BackendImpl
* backend
) {
684 // Just grab a reference to the backround queue.
685 background_queue_
= backend
->GetBackgroundQueue();
688 void EntryImpl::SetTimes(base::Time last_used
, base::Time last_modified
) {
689 node_
.Data()->last_used
= last_used
.ToInternalValue();
690 node_
.Data()->last_modified
= last_modified
.ToInternalValue();
691 node_
.set_modified();
694 void EntryImpl::ReportIOTime(Operation op
, const base::TimeTicks
& start
) {
700 CACHE_UMA(AGE_MS
, "ReadTime", 0, start
);
703 CACHE_UMA(AGE_MS
, "WriteTime", 0, start
);
706 CACHE_UMA(AGE_MS
, "SparseReadTime", 0, start
);
709 CACHE_UMA(AGE_MS
, "SparseWriteTime", 0, start
);
712 CACHE_UMA(AGE_MS
, "AsyncIOTime", 0, start
);
715 CACHE_UMA(AGE_MS
, "AsyncReadDispatchTime", 0, start
);
718 CACHE_UMA(AGE_MS
, "AsyncWriteDispatchTime", 0, start
);
725 void EntryImpl::BeginLogging(net::NetLog
* net_log
, bool created
) {
726 DCHECK(!net_log_
.net_log());
727 net_log_
= net::BoundNetLog::Make(
728 net_log
, net::NetLog::SOURCE_DISK_CACHE_ENTRY
);
730 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL
,
731 CreateNetLogEntryCreationCallback(this, created
));
734 const net::BoundNetLog
& EntryImpl::net_log() const {
739 int EntryImpl::NumBlocksForEntry(int key_size
) {
740 // The longest key that can be stored using one block.
742 static_cast<int>(sizeof(EntryStore
) - offsetof(EntryStore
, key
));
744 if (key_size
< key1_len
|| key_size
> kMaxInternalKeyLength
)
747 return ((key_size
- key1_len
) / 256 + 2);
750 // ------------------------------------------------------------------------
752 void EntryImpl::Doom() {
753 if (background_queue_
.get())
754 background_queue_
->DoomEntryImpl(this);
757 void EntryImpl::Close() {
758 if (background_queue_
.get())
759 background_queue_
->CloseEntryImpl(this);
762 std::string
EntryImpl::GetKey() const {
763 CacheEntryBlock
* entry
= const_cast<CacheEntryBlock
*>(&entry_
);
764 int key_len
= entry
->Data()->key_len
;
765 if (key_len
<= kMaxInternalKeyLength
)
766 return std::string(entry
->Data()->key
);
768 // We keep a copy of the key so that we can always return it, even if the
769 // backend is disabled.
773 Addr
address(entry
->Data()->long_key
);
774 DCHECK(address
.is_initialized());
776 if (address
.is_block_file())
777 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
779 static_assert(kNumStreams
== kKeyFileIndex
, "invalid key index");
780 File
* key_file
= const_cast<EntryImpl
*>(this)->GetBackingFile(address
,
783 return std::string();
785 ++key_len
; // We store a trailing \0 on disk that we read back below.
786 if (!offset
&& key_file
->GetLength() != static_cast<size_t>(key_len
))
787 return std::string();
789 if (!key_file
->Read(WriteInto(&key_
, key_len
), key_len
, offset
))
794 Time
EntryImpl::GetLastUsed() const {
795 CacheRankingsBlock
* node
= const_cast<CacheRankingsBlock
*>(&node_
);
796 return Time::FromInternalValue(node
->Data()->last_used
);
799 Time
EntryImpl::GetLastModified() const {
800 CacheRankingsBlock
* node
= const_cast<CacheRankingsBlock
*>(&node_
);
801 return Time::FromInternalValue(node
->Data()->last_modified
);
804 int32
EntryImpl::GetDataSize(int index
) const {
805 if (index
< 0 || index
>= kNumStreams
)
808 CacheEntryBlock
* entry
= const_cast<CacheEntryBlock
*>(&entry_
);
809 return entry
->Data()->data_size
[index
];
812 int EntryImpl::ReadData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
813 const CompletionCallback
& callback
) {
814 if (callback
.is_null())
815 return ReadDataImpl(index
, offset
, buf
, buf_len
, callback
);
817 DCHECK(node_
.Data()->dirty
|| read_only_
);
818 if (index
< 0 || index
>= kNumStreams
)
819 return net::ERR_INVALID_ARGUMENT
;
821 int entry_size
= entry_
.Data()->data_size
[index
];
822 if (offset
>= entry_size
|| offset
< 0 || !buf_len
)
826 return net::ERR_INVALID_ARGUMENT
;
828 if (!background_queue_
.get())
829 return net::ERR_UNEXPECTED
;
831 background_queue_
->ReadData(this, index
, offset
, buf
, buf_len
, callback
);
832 return net::ERR_IO_PENDING
;
835 int EntryImpl::WriteData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
836 const CompletionCallback
& callback
, bool truncate
) {
837 if (callback
.is_null())
838 return WriteDataImpl(index
, offset
, buf
, buf_len
, callback
, truncate
);
840 DCHECK(node_
.Data()->dirty
|| read_only_
);
841 if (index
< 0 || index
>= kNumStreams
)
842 return net::ERR_INVALID_ARGUMENT
;
844 if (offset
< 0 || buf_len
< 0)
845 return net::ERR_INVALID_ARGUMENT
;
847 if (!background_queue_
.get())
848 return net::ERR_UNEXPECTED
;
850 background_queue_
->WriteData(this, index
, offset
, buf
, buf_len
, truncate
,
852 return net::ERR_IO_PENDING
;
855 int EntryImpl::ReadSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
856 const CompletionCallback
& callback
) {
857 if (callback
.is_null())
858 return ReadSparseDataImpl(offset
, buf
, buf_len
, callback
);
860 if (!background_queue_
.get())
861 return net::ERR_UNEXPECTED
;
863 background_queue_
->ReadSparseData(this, offset
, buf
, buf_len
, callback
);
864 return net::ERR_IO_PENDING
;
867 int EntryImpl::WriteSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
868 const CompletionCallback
& callback
) {
869 if (callback
.is_null())
870 return WriteSparseDataImpl(offset
, buf
, buf_len
, callback
);
872 if (!background_queue_
.get())
873 return net::ERR_UNEXPECTED
;
875 background_queue_
->WriteSparseData(this, offset
, buf
, buf_len
, callback
);
876 return net::ERR_IO_PENDING
;
879 int EntryImpl::GetAvailableRange(int64 offset
, int len
, int64
* start
,
880 const CompletionCallback
& callback
) {
881 if (!background_queue_
.get())
882 return net::ERR_UNEXPECTED
;
884 background_queue_
->GetAvailableRange(this, offset
, len
, start
, callback
);
885 return net::ERR_IO_PENDING
;
888 bool EntryImpl::CouldBeSparse() const {
892 scoped_ptr
<SparseControl
> sparse
;
893 sparse
.reset(new SparseControl(const_cast<EntryImpl
*>(this)));
894 return sparse
->CouldBeSparse();
897 void EntryImpl::CancelSparseIO() {
898 if (background_queue_
.get())
899 background_queue_
->CancelSparseIO(this);
902 int EntryImpl::ReadyForSparseIO(const CompletionCallback
& callback
) {
906 if (!background_queue_
.get())
907 return net::ERR_UNEXPECTED
;
909 background_queue_
->ReadyForSparseIO(this, callback
);
910 return net::ERR_IO_PENDING
;
913 // When an entry is deleted from the cache, we clean up all the data associated
914 // with it for two reasons: to simplify the reuse of the block (we know that any
915 // unused block is filled with zeros), and to simplify the handling of write /
916 // read partial information from an entry (don't have to worry about returning
917 // data related to a previous cache entry because the range was not fully
919 EntryImpl::~EntryImpl() {
920 if (!backend_
.get()) {
921 entry_
.clear_modified();
922 node_
.clear_modified();
925 Log("~EntryImpl in");
927 // Save the sparse info to disk. This will generate IO for this entry and
928 // maybe for a child entry, so it is important to do it before deleting this
932 // Remove this entry from the list of open entries.
933 backend_
->OnEntryDestroyBegin(entry_
.address());
936 DeleteEntryData(true);
938 #if defined(NET_BUILD_STRESS_CACHE)
941 net_log_
.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE
);
943 for (int index
= 0; index
< kNumStreams
; index
++) {
944 if (user_buffers_
[index
].get()) {
945 ret
= Flush(index
, 0);
947 LOG(ERROR
) << "Failed to save user data";
949 if (unreported_size_
[index
]) {
950 backend_
->ModifyStorageSize(
951 entry_
.Data()->data_size
[index
] - unreported_size_
[index
],
952 entry_
.Data()->data_size
[index
]);
957 // There was a failure writing the actual data. Mark the entry as dirty.
958 int current_id
= backend_
->GetCurrentEntryId();
959 node_
.Data()->dirty
= current_id
== 1 ? -1 : current_id
- 1;
961 } else if (node_
.HasData() && !dirty_
&& node_
.Data()->dirty
) {
962 node_
.Data()->dirty
= 0;
967 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
968 net_log_
.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL
);
969 backend_
->OnEntryDestroyEnd();
972 // ------------------------------------------------------------------------
974 int EntryImpl::InternalReadData(int index
, int offset
,
975 IOBuffer
* buf
, int buf_len
,
976 const CompletionCallback
& callback
) {
977 DCHECK(node_
.Data()->dirty
|| read_only_
);
978 DVLOG(2) << "Read from " << index
<< " at " << offset
<< " : " << buf_len
;
979 if (index
< 0 || index
>= kNumStreams
)
980 return net::ERR_INVALID_ARGUMENT
;
982 int entry_size
= entry_
.Data()->data_size
[index
];
983 if (offset
>= entry_size
|| offset
< 0 || !buf_len
)
987 return net::ERR_INVALID_ARGUMENT
;
990 return net::ERR_UNEXPECTED
;
992 TimeTicks start
= TimeTicks::Now();
994 if (offset
+ buf_len
> entry_size
)
995 buf_len
= entry_size
- offset
;
999 backend_
->OnEvent(Stats::READ_DATA
);
1000 backend_
->OnRead(buf_len
);
1002 Addr
address(entry_
.Data()->data_addr
[index
]);
1003 int eof
= address
.is_initialized() ? entry_size
: 0;
1004 if (user_buffers_
[index
].get() &&
1005 user_buffers_
[index
]->PreRead(eof
, offset
, &buf_len
)) {
1006 // Complete the operation locally.
1007 buf_len
= user_buffers_
[index
]->Read(offset
, buf
, buf_len
);
1008 ReportIOTime(kRead
, start
);
1012 address
.set_value(entry_
.Data()->data_addr
[index
]);
1013 DCHECK(address
.is_initialized());
1014 if (!address
.is_initialized()) {
1016 return net::ERR_FAILED
;
1019 File
* file
= GetBackingFile(address
, index
);
1022 LOG(ERROR
) << "No file for " << std::hex
<< address
.value();
1023 return net::ERR_FILE_NOT_FOUND
;
1026 size_t file_offset
= offset
;
1027 if (address
.is_block_file()) {
1028 DCHECK_LE(offset
+ buf_len
, kMaxBlockSize
);
1029 file_offset
+= address
.start_block() * address
.BlockSize() +
1033 SyncCallback
* io_callback
= NULL
;
1034 if (!callback
.is_null()) {
1035 io_callback
= new SyncCallback(this, buf
, callback
,
1036 net::NetLog::TYPE_ENTRY_READ_DATA
);
1039 TimeTicks start_async
= TimeTicks::Now();
1042 if (!file
->Read(buf
->data(), buf_len
, file_offset
, io_callback
, &completed
)) {
1044 io_callback
->Discard();
1046 return net::ERR_CACHE_READ_FAILURE
;
1049 if (io_callback
&& completed
)
1050 io_callback
->Discard();
1053 ReportIOTime(kReadAsync1
, start_async
);
1055 ReportIOTime(kRead
, start
);
1056 return (completed
|| callback
.is_null()) ? buf_len
: net::ERR_IO_PENDING
;
1059 int EntryImpl::InternalWriteData(int index
, int offset
,
1060 IOBuffer
* buf
, int buf_len
,
1061 const CompletionCallback
& callback
,
1063 DCHECK(node_
.Data()->dirty
|| read_only_
);
1064 DVLOG(2) << "Write to " << index
<< " at " << offset
<< " : " << buf_len
;
1065 if (index
< 0 || index
>= kNumStreams
)
1066 return net::ERR_INVALID_ARGUMENT
;
1068 if (offset
< 0 || buf_len
< 0)
1069 return net::ERR_INVALID_ARGUMENT
;
1071 if (!backend_
.get())
1072 return net::ERR_UNEXPECTED
;
1074 int max_file_size
= backend_
->MaxFileSize();
1076 // offset or buf_len could be negative numbers.
1077 if (offset
> max_file_size
|| buf_len
> max_file_size
||
1078 offset
+ buf_len
> max_file_size
) {
1079 int size
= offset
+ buf_len
;
1080 if (size
<= max_file_size
)
1082 backend_
->TooMuchStorageRequested(size
);
1083 return net::ERR_FAILED
;
1086 TimeTicks start
= TimeTicks::Now();
1088 // Read the size at this point (it may change inside prepare).
1089 int entry_size
= entry_
.Data()->data_size
[index
];
1090 bool extending
= entry_size
< offset
+ buf_len
;
1091 truncate
= truncate
&& entry_size
> offset
+ buf_len
;
1092 Trace("To PrepareTarget 0x%x", entry_
.address().value());
1093 if (!PrepareTarget(index
, offset
, buf_len
, truncate
))
1094 return net::ERR_FAILED
;
1096 Trace("From PrepareTarget 0x%x", entry_
.address().value());
1097 if (extending
|| truncate
)
1098 UpdateSize(index
, entry_size
, offset
+ buf_len
);
1102 backend_
->OnEvent(Stats::WRITE_DATA
);
1103 backend_
->OnWrite(buf_len
);
1105 if (user_buffers_
[index
].get()) {
1106 // Complete the operation locally.
1107 user_buffers_
[index
]->Write(offset
, buf
, buf_len
);
1108 ReportIOTime(kWrite
, start
);
1112 Addr
address(entry_
.Data()->data_addr
[index
]);
1113 if (offset
+ buf_len
== 0) {
1115 DCHECK(!address
.is_initialized());
1120 File
* file
= GetBackingFile(address
, index
);
1122 return net::ERR_FILE_NOT_FOUND
;
1124 size_t file_offset
= offset
;
1125 if (address
.is_block_file()) {
1126 DCHECK_LE(offset
+ buf_len
, kMaxBlockSize
);
1127 file_offset
+= address
.start_block() * address
.BlockSize() +
1129 } else if (truncate
|| (extending
&& !buf_len
)) {
1130 if (!file
->SetLength(offset
+ buf_len
))
1131 return net::ERR_FAILED
;
1137 SyncCallback
* io_callback
= NULL
;
1138 if (!callback
.is_null()) {
1139 io_callback
= new SyncCallback(this, buf
, callback
,
1140 net::NetLog::TYPE_ENTRY_WRITE_DATA
);
1143 TimeTicks start_async
= TimeTicks::Now();
1146 if (!file
->Write(buf
->data(), buf_len
, file_offset
, io_callback
,
1149 io_callback
->Discard();
1150 return net::ERR_CACHE_WRITE_FAILURE
;
1153 if (io_callback
&& completed
)
1154 io_callback
->Discard();
1157 ReportIOTime(kWriteAsync1
, start_async
);
1159 ReportIOTime(kWrite
, start
);
1160 return (completed
|| callback
.is_null()) ? buf_len
: net::ERR_IO_PENDING
;
1163 // ------------------------------------------------------------------------
1165 bool EntryImpl::CreateDataBlock(int index
, int size
) {
1166 DCHECK(index
>= 0 && index
< kNumStreams
);
1168 Addr
address(entry_
.Data()->data_addr
[index
]);
1169 if (!CreateBlock(size
, &address
))
1172 entry_
.Data()->data_addr
[index
] = address
.value();
1177 bool EntryImpl::CreateBlock(int size
, Addr
* address
) {
1178 DCHECK(!address
->is_initialized());
1179 if (!backend_
.get())
1182 FileType file_type
= Addr::RequiredFileType(size
);
1183 if (EXTERNAL
== file_type
) {
1184 if (size
> backend_
->MaxFileSize())
1186 if (!backend_
->CreateExternalFile(address
))
1189 int num_blocks
= Addr::RequiredBlocks(size
, file_type
);
1191 if (!backend_
->CreateBlock(file_type
, num_blocks
, address
))
1197 // Note that this method may end up modifying a block file so upon return the
1198 // involved block will be free, and could be reused for something else. If there
1199 // is a crash after that point (and maybe before returning to the caller), the
1200 // entry will be left dirty... and at some point it will be discarded; it is
1201 // important that the entry doesn't keep a reference to this address, or we'll
1202 // end up deleting the contents of |address| once again.
1203 void EntryImpl::DeleteData(Addr address
, int index
) {
1204 DCHECK(backend_
.get());
1205 if (!address
.is_initialized())
1207 if (address
.is_separate_file()) {
1208 int failure
= !DeleteCacheFile(backend_
->GetFileName(address
));
1209 CACHE_UMA(COUNTS
, "DeleteFailed", 0, failure
);
1211 LOG(ERROR
) << "Failed to delete " <<
1212 backend_
->GetFileName(address
).value() << " from the cache.";
1214 if (files_
[index
].get())
1215 files_
[index
] = NULL
; // Releases the object.
1217 backend_
->DeleteBlock(address
, true);
1221 void EntryImpl::UpdateRank(bool modified
) {
1222 if (!backend_
.get())
1226 // Everything is handled by the backend.
1227 backend_
->UpdateRank(this, modified
);
1231 Time current
= Time::Now();
1232 node_
.Data()->last_used
= current
.ToInternalValue();
1235 node_
.Data()->last_modified
= current
.ToInternalValue();
1238 File
* EntryImpl::GetBackingFile(Addr address
, int index
) {
1239 if (!backend_
.get())
1243 if (address
.is_separate_file())
1244 file
= GetExternalFile(address
, index
);
1246 file
= backend_
->File(address
);
1250 File
* EntryImpl::GetExternalFile(Addr address
, int index
) {
1251 DCHECK(index
>= 0 && index
<= kKeyFileIndex
);
1252 if (!files_
[index
].get()) {
1253 // For a key file, use mixed mode IO.
1254 scoped_refptr
<File
> file(new File(kKeyFileIndex
== index
));
1255 if (file
->Init(backend_
->GetFileName(address
)))
1256 files_
[index
].swap(file
);
1258 return files_
[index
].get();
1261 // We keep a memory buffer for everything that ends up stored on a block file
1262 // (because we don't know yet the final data size), and for some of the data
1263 // that end up on external files. This function will initialize that memory
1264 // buffer and / or the files needed to store the data.
1266 // In general, a buffer may overlap data already stored on disk, and in that
1267 // case, the contents of the buffer are the most accurate. It may also extend
1268 // the file, but we don't want to read from disk just to keep the buffer up to
1269 // date. This means that as soon as there is a chance to get confused about what
1270 // is the most recent version of some part of a file, we'll flush the buffer and
1271 // reuse it for the new data. Keep in mind that the normal use pattern is quite
1272 // simple (write sequentially from the beginning), so we optimize for handling
1274 bool EntryImpl::PrepareTarget(int index
, int offset
, int buf_len
,
1277 return HandleTruncation(index
, offset
, buf_len
);
1279 if (!offset
&& !buf_len
)
1282 Addr
address(entry_
.Data()->data_addr
[index
]);
1283 if (address
.is_initialized()) {
1284 if (address
.is_block_file() && !MoveToLocalBuffer(index
))
1287 if (!user_buffers_
[index
].get() && offset
< kMaxBlockSize
) {
1288 // We are about to create a buffer for the first 16KB, make sure that we
1289 // preserve existing data.
1290 if (!CopyToLocalBuffer(index
))
1295 if (!user_buffers_
[index
].get())
1296 user_buffers_
[index
].reset(new UserBuffer(backend_
.get()));
1298 return PrepareBuffer(index
, offset
, buf_len
);
1301 // We get to this function with some data already stored. If there is a
1302 // truncation that results on data stored internally, we'll explicitly
1303 // handle the case here.
1304 bool EntryImpl::HandleTruncation(int index
, int offset
, int buf_len
) {
1305 Addr
address(entry_
.Data()->data_addr
[index
]);
1307 int current_size
= entry_
.Data()->data_size
[index
];
1308 int new_size
= offset
+ buf_len
;
1311 // This is by far the most common scenario.
1312 backend_
->ModifyStorageSize(current_size
- unreported_size_
[index
], 0);
1313 entry_
.Data()->data_addr
[index
] = 0;
1314 entry_
.Data()->data_size
[index
] = 0;
1315 unreported_size_
[index
] = 0;
1317 DeleteData(address
, index
);
1319 user_buffers_
[index
].reset();
1323 // We never postpone truncating a file, if there is one, but we may postpone
1324 // telling the backend about the size reduction.
1325 if (user_buffers_
[index
].get()) {
1326 DCHECK_GE(current_size
, user_buffers_
[index
]->Start());
1327 if (!address
.is_initialized()) {
1328 // There is no overlap between the buffer and disk.
1329 if (new_size
> user_buffers_
[index
]->Start()) {
1330 // Just truncate our buffer.
1331 DCHECK_LT(new_size
, user_buffers_
[index
]->End());
1332 user_buffers_
[index
]->Truncate(new_size
);
1336 // Just discard our buffer.
1337 user_buffers_
[index
]->Reset();
1338 return PrepareBuffer(index
, offset
, buf_len
);
1341 // There is some overlap or we need to extend the file before the
1343 if (offset
> user_buffers_
[index
]->Start())
1344 user_buffers_
[index
]->Truncate(new_size
);
1345 UpdateSize(index
, current_size
, new_size
);
1346 if (!Flush(index
, 0))
1348 user_buffers_
[index
].reset();
1351 // We have data somewhere, and it is not in a buffer.
1352 DCHECK(!user_buffers_
[index
].get());
1353 DCHECK(address
.is_initialized());
1355 if (new_size
> kMaxBlockSize
)
1356 return true; // Let the operation go directly to disk.
1358 return ImportSeparateFile(index
, offset
+ buf_len
);
1361 bool EntryImpl::CopyToLocalBuffer(int index
) {
1362 Addr
address(entry_
.Data()->data_addr
[index
]);
1363 DCHECK(!user_buffers_
[index
].get());
1364 DCHECK(address
.is_initialized());
1366 int len
= std::min(entry_
.Data()->data_size
[index
], kMaxBlockSize
);
1367 user_buffers_
[index
].reset(new UserBuffer(backend_
.get()));
1368 user_buffers_
[index
]->Write(len
, NULL
, 0);
1370 File
* file
= GetBackingFile(address
, index
);
1373 if (address
.is_block_file())
1374 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
1377 !file
->Read(user_buffers_
[index
]->Data(), len
, offset
, NULL
, NULL
)) {
1378 user_buffers_
[index
].reset();
1384 bool EntryImpl::MoveToLocalBuffer(int index
) {
1385 if (!CopyToLocalBuffer(index
))
1388 Addr
address(entry_
.Data()->data_addr
[index
]);
1389 entry_
.Data()->data_addr
[index
] = 0;
1391 DeleteData(address
, index
);
1393 // If we lose this entry we'll see it as zero sized.
1394 int len
= entry_
.Data()->data_size
[index
];
1395 backend_
->ModifyStorageSize(len
- unreported_size_
[index
], 0);
1396 unreported_size_
[index
] = len
;
1400 bool EntryImpl::ImportSeparateFile(int index
, int new_size
) {
1401 if (entry_
.Data()->data_size
[index
] > new_size
)
1402 UpdateSize(index
, entry_
.Data()->data_size
[index
], new_size
);
1404 return MoveToLocalBuffer(index
);
1407 bool EntryImpl::PrepareBuffer(int index
, int offset
, int buf_len
) {
1408 DCHECK(user_buffers_
[index
].get());
1409 if ((user_buffers_
[index
]->End() && offset
> user_buffers_
[index
]->End()) ||
1410 offset
> entry_
.Data()->data_size
[index
]) {
1411 // We are about to extend the buffer or the file (with zeros), so make sure
1412 // that we are not overwriting anything.
1413 Addr
address(entry_
.Data()->data_addr
[index
]);
1414 if (address
.is_initialized() && address
.is_separate_file()) {
1415 if (!Flush(index
, 0))
1417 // There is an actual file already, and we don't want to keep track of
1418 // its length so we let this operation go straight to disk.
1419 // The only case when a buffer is allowed to extend the file (as in fill
1420 // with zeros before the start) is when there is no file yet to extend.
1421 user_buffers_
[index
].reset();
1426 if (!user_buffers_
[index
]->PreWrite(offset
, buf_len
)) {
1427 if (!Flush(index
, offset
+ buf_len
))
1431 if (offset
> user_buffers_
[index
]->End() ||
1432 !user_buffers_
[index
]->PreWrite(offset
, buf_len
)) {
1433 // We cannot complete the operation with a buffer.
1434 DCHECK(!user_buffers_
[index
]->Size());
1435 DCHECK(!user_buffers_
[index
]->Start());
1436 user_buffers_
[index
].reset();
1442 bool EntryImpl::Flush(int index
, int min_len
) {
1443 Addr
address(entry_
.Data()->data_addr
[index
]);
1444 DCHECK(user_buffers_
[index
].get());
1445 DCHECK(!address
.is_initialized() || address
.is_separate_file());
1446 DVLOG(3) << "Flush";
1448 int size
= std::max(entry_
.Data()->data_size
[index
], min_len
);
1449 if (size
&& !address
.is_initialized() && !CreateDataBlock(index
, size
))
1452 if (!entry_
.Data()->data_size
[index
]) {
1453 DCHECK(!user_buffers_
[index
]->Size());
1457 address
.set_value(entry_
.Data()->data_addr
[index
]);
1459 int len
= user_buffers_
[index
]->Size();
1460 int offset
= user_buffers_
[index
]->Start();
1461 if (!len
&& !offset
)
1464 if (address
.is_block_file()) {
1465 DCHECK_EQ(len
, entry_
.Data()->data_size
[index
]);
1467 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
1470 File
* file
= GetBackingFile(address
, index
);
1474 if (!file
->Write(user_buffers_
[index
]->Data(), len
, offset
, NULL
, NULL
))
1476 user_buffers_
[index
]->Reset();
1481 void EntryImpl::UpdateSize(int index
, int old_size
, int new_size
) {
1482 if (entry_
.Data()->data_size
[index
] == new_size
)
1485 unreported_size_
[index
] += new_size
- old_size
;
1486 entry_
.Data()->data_size
[index
] = new_size
;
1487 entry_
.set_modified();
1490 int EntryImpl::InitSparseData() {
1494 // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1495 scoped_ptr
<SparseControl
> sparse(new SparseControl(this));
1496 int result
= sparse
->Init();
1497 if (net::OK
== result
)
1498 sparse_
.swap(sparse
);
1503 void EntryImpl::SetEntryFlags(uint32 flags
) {
1504 entry_
.Data()->flags
|= flags
;
1505 entry_
.set_modified();
1508 uint32
EntryImpl::GetEntryFlags() {
1509 return entry_
.Data()->flags
;
1512 void EntryImpl::GetData(int index
, char** buffer
, Addr
* address
) {
1513 DCHECK(backend_
.get());
1514 if (user_buffers_
[index
].get() && user_buffers_
[index
]->Size() &&
1515 !user_buffers_
[index
]->Start()) {
1516 // The data is already in memory, just copy it and we're done.
1517 int data_len
= entry_
.Data()->data_size
[index
];
1518 if (data_len
<= user_buffers_
[index
]->Size()) {
1519 DCHECK(!user_buffers_
[index
]->Start());
1520 *buffer
= new char[data_len
];
1521 memcpy(*buffer
, user_buffers_
[index
]->Data(), data_len
);
1526 // Bad news: we'd have to read the info from disk so instead we'll just tell
1527 // the caller where to read from.
1529 address
->set_value(entry_
.Data()->data_addr
[index
]);
1530 if (address
->is_initialized()) {
1531 // Prevent us from deleting the block from the backing store.
1532 backend_
->ModifyStorageSize(entry_
.Data()->data_size
[index
] -
1533 unreported_size_
[index
], 0);
1534 entry_
.Data()->data_addr
[index
] = 0;
1535 entry_
.Data()->data_size
[index
] = 0;
1539 void EntryImpl::Log(const char* msg
) {
1541 if (node_
.HasData()) {
1542 dirty
= node_
.Data()->dirty
;
1545 Trace("%s 0x%p 0x%x 0x%x", msg
, reinterpret_cast<void*>(this),
1546 entry_
.address().value(), node_
.address().value());
1548 Trace(" data: 0x%x 0x%x 0x%x", entry_
.Data()->data_addr
[0],
1549 entry_
.Data()->data_addr
[1], entry_
.Data()->long_key
);
1551 Trace(" doomed: %d 0x%x", doomed_
, dirty
);
1554 } // namespace disk_cache