1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/entry_impl.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/metrics/histogram.h"
10 #include "base/strings/string_util.h"
11 #include "net/base/io_buffer.h"
12 #include "net/base/net_errors.h"
13 #include "net/disk_cache/backend_impl.h"
14 #include "net/disk_cache/bitmap.h"
15 #include "net/disk_cache/cache_util.h"
16 #include "net/disk_cache/disk_format.h"
17 #include "net/disk_cache/histogram_macros.h"
18 #include "net/disk_cache/net_log_parameters.h"
19 #include "net/disk_cache/sparse_control.h"
22 using base::TimeDelta
;
23 using base::TimeTicks
;
27 // Index for the file used to store the key, if any (files_[kKeyFileIndex]).
28 const int kKeyFileIndex
= 3;
30 // This class implements FileIOCallback to buffer the callback from a file IO
31 // operation from the actual net class.
32 class SyncCallback
: public disk_cache::FileIOCallback
{
34 // |end_event_type| is the event type to log on completion. Logs nothing on
35 // discard, or when the NetLog is not set to log all events.
36 SyncCallback(disk_cache::EntryImpl
* entry
, net::IOBuffer
* buffer
,
37 const net::CompletionCallback
& callback
,
38 net::NetLog::EventType end_event_type
)
39 : entry_(entry
), callback_(callback
), buf_(buffer
),
40 start_(TimeTicks::Now()), end_event_type_(end_event_type
) {
42 entry
->IncrementIoCount();
44 virtual ~SyncCallback() {}
46 virtual void OnFileIOComplete(int bytes_copied
) OVERRIDE
;
50 disk_cache::EntryImpl
* entry_
;
51 net::CompletionCallback callback_
;
52 scoped_refptr
<net::IOBuffer
> buf_
;
54 const net::NetLog::EventType end_event_type_
;
56 DISALLOW_COPY_AND_ASSIGN(SyncCallback
);
59 void SyncCallback::OnFileIOComplete(int bytes_copied
) {
60 entry_
->DecrementIoCount();
61 if (!callback_
.is_null()) {
62 if (entry_
->net_log().IsLoggingAllEvents()) {
63 entry_
->net_log().EndEvent(
65 disk_cache::CreateNetLogReadWriteCompleteCallback(bytes_copied
));
67 entry_
->ReportIOTime(disk_cache::EntryImpl::kAsyncIO
, start_
);
68 buf_
= NULL
; // Release the buffer before invoking the callback.
69 callback_
.Run(bytes_copied
);
75 void SyncCallback::Discard() {
81 const int kMaxBufferSize
= 1024 * 1024; // 1 MB.
85 namespace disk_cache
{
87 // This class handles individual memory buffers that store data before it is
88 // sent to disk. The buffer can start at any offset, but if we try to write to
89 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
90 // zero. The buffer grows up to a size determined by the backend, to keep the
91 // total memory used under control.
92 class EntryImpl::UserBuffer
{
94 explicit UserBuffer(BackendImpl
* backend
)
95 : backend_(backend
->GetWeakPtr()), offset_(0), grow_allowed_(true) {
96 buffer_
.reserve(kMaxBlockSize
);
100 backend_
->BufferDeleted(capacity() - kMaxBlockSize
);
103 // Returns true if we can handle writing |len| bytes to |offset|.
104 bool PreWrite(int offset
, int len
);
106 // Truncates the buffer to |offset| bytes.
107 void Truncate(int offset
);
109 // Writes |len| bytes from |buf| at the given |offset|.
110 void Write(int offset
, IOBuffer
* buf
, int len
);
112 // Returns true if we can read |len| bytes from |offset|, given that the
113 // actual file has |eof| bytes stored. Note that the number of bytes to read
114 // may be modified by this method even though it returns false: that means we
115 // should do a smaller read from disk.
116 bool PreRead(int eof
, int offset
, int* len
);
118 // Read |len| bytes from |buf| at the given |offset|.
119 int Read(int offset
, IOBuffer
* buf
, int len
);
121 // Prepare this buffer for reuse.
124 char* Data() { return buffer_
.size() ? &buffer_
[0] : NULL
; }
125 int Size() { return static_cast<int>(buffer_
.size()); }
126 int Start() { return offset_
; }
127 int End() { return offset_
+ Size(); }
130 int capacity() { return static_cast<int>(buffer_
.capacity()); }
131 bool GrowBuffer(int required
, int limit
);
133 base::WeakPtr
<BackendImpl
> backend_
;
135 std::vector
<char> buffer_
;
137 DISALLOW_COPY_AND_ASSIGN(UserBuffer
);
140 bool EntryImpl::UserBuffer::PreWrite(int offset
, int len
) {
141 DCHECK_GE(offset
, 0);
143 DCHECK_GE(offset
+ len
, 0);
145 // We don't want to write before our current start.
146 if (offset
< offset_
)
149 // Lets get the common case out of the way.
150 if (offset
+ len
<= capacity())
153 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
154 // buffer offset_ at 0.
155 if (!Size() && offset
> kMaxBlockSize
)
156 return GrowBuffer(len
, kMaxBufferSize
);
158 int required
= offset
- offset_
+ len
;
159 return GrowBuffer(required
, kMaxBufferSize
* 6 / 5);
162 void EntryImpl::UserBuffer::Truncate(int offset
) {
163 DCHECK_GE(offset
, 0);
164 DCHECK_GE(offset
, offset_
);
165 DVLOG(3) << "Buffer truncate at " << offset
<< " current " << offset_
;
168 if (Size() >= offset
)
169 buffer_
.resize(offset
);
172 void EntryImpl::UserBuffer::Write(int offset
, IOBuffer
* buf
, int len
) {
173 DCHECK_GE(offset
, 0);
175 DCHECK_GE(offset
+ len
, 0);
176 DCHECK_GE(offset
, offset_
);
177 DVLOG(3) << "Buffer write at " << offset
<< " current " << offset_
;
179 if (!Size() && offset
> kMaxBlockSize
)
185 buffer_
.resize(offset
);
190 char* buffer
= buf
->data();
191 int valid_len
= Size() - offset
;
192 int copy_len
= std::min(valid_len
, len
);
194 memcpy(&buffer_
[offset
], buffer
, copy_len
);
201 buffer_
.insert(buffer_
.end(), buffer
, buffer
+ len
);
204 bool EntryImpl::UserBuffer::PreRead(int eof
, int offset
, int* len
) {
205 DCHECK_GE(offset
, 0);
208 if (offset
< offset_
) {
209 // We are reading before this buffer.
213 // If the read overlaps with the buffer, change its length so that there is
215 *len
= std::min(*len
, offset_
- offset
);
216 *len
= std::min(*len
, eof
- offset
);
218 // We should read from disk.
225 // See if we can fulfill the first part of the operation.
226 return (offset
- offset_
< Size());
229 int EntryImpl::UserBuffer::Read(int offset
, IOBuffer
* buf
, int len
) {
230 DCHECK_GE(offset
, 0);
232 DCHECK(Size() || offset
< offset_
);
235 if (offset
< offset_
) {
236 // We don't have a file so lets fill the first part with 0.
237 clean_bytes
= std::min(offset_
- offset
, len
);
238 memset(buf
->data(), 0, clean_bytes
);
239 if (len
== clean_bytes
)
245 int start
= offset
- offset_
;
246 int available
= Size() - start
;
248 DCHECK_GE(available
, 0);
249 len
= std::min(len
, available
);
250 memcpy(buf
->data() + clean_bytes
, &buffer_
[start
], len
);
251 return len
+ clean_bytes
;
254 void EntryImpl::UserBuffer::Reset() {
255 if (!grow_allowed_
) {
257 backend_
->BufferDeleted(capacity() - kMaxBlockSize
);
258 grow_allowed_
= true;
259 std::vector
<char> tmp
;
261 buffer_
.reserve(kMaxBlockSize
);
267 bool EntryImpl::UserBuffer::GrowBuffer(int required
, int limit
) {
268 DCHECK_GE(required
, 0);
269 int current_size
= capacity();
270 if (required
<= current_size
)
273 if (required
> limit
)
279 int to_add
= std::max(required
- current_size
, kMaxBlockSize
* 4);
280 to_add
= std::max(current_size
, to_add
);
281 required
= std::min(current_size
+ to_add
, limit
);
283 grow_allowed_
= backend_
->IsAllocAllowed(current_size
, required
);
287 DVLOG(3) << "Buffer grow to " << required
;
289 buffer_
.reserve(required
);
293 // ------------------------------------------------------------------------
295 EntryImpl::EntryImpl(BackendImpl
* backend
, Addr address
, bool read_only
)
296 : entry_(NULL
, Addr(0)), node_(NULL
, Addr(0)),
297 backend_(backend
->GetWeakPtr()), doomed_(false), read_only_(read_only
),
299 entry_
.LazyInit(backend
->File(address
), address
);
300 for (int i
= 0; i
< kNumStreams
; i
++) {
301 unreported_size_
[i
] = 0;
305 void EntryImpl::DoomImpl() {
306 if (doomed_
|| !backend_
.get())
309 SetPointerForInvalidEntry(backend_
->GetCurrentEntryId());
310 backend_
->InternalDoomEntry(this);
313 int EntryImpl::ReadDataImpl(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
314 const CompletionCallback
& callback
) {
315 if (net_log_
.IsLoggingAllEvents()) {
317 net::NetLog::TYPE_ENTRY_READ_DATA
,
318 CreateNetLogReadWriteDataCallback(index
, offset
, buf_len
, false));
321 int result
= InternalReadData(index
, offset
, buf
, buf_len
, callback
);
323 if (result
!= net::ERR_IO_PENDING
&& net_log_
.IsLoggingAllEvents()) {
325 net::NetLog::TYPE_ENTRY_READ_DATA
,
326 CreateNetLogReadWriteCompleteCallback(result
));
331 int EntryImpl::WriteDataImpl(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
332 const CompletionCallback
& callback
,
334 if (net_log_
.IsLoggingAllEvents()) {
336 net::NetLog::TYPE_ENTRY_WRITE_DATA
,
337 CreateNetLogReadWriteDataCallback(index
, offset
, buf_len
, truncate
));
340 int result
= InternalWriteData(index
, offset
, buf
, buf_len
, callback
,
343 if (result
!= net::ERR_IO_PENDING
&& net_log_
.IsLoggingAllEvents()) {
345 net::NetLog::TYPE_ENTRY_WRITE_DATA
,
346 CreateNetLogReadWriteCompleteCallback(result
));
351 int EntryImpl::ReadSparseDataImpl(int64 offset
, IOBuffer
* buf
, int buf_len
,
352 const CompletionCallback
& callback
) {
353 DCHECK(node_
.Data()->dirty
|| read_only_
);
354 int result
= InitSparseData();
355 if (net::OK
!= result
)
358 TimeTicks start
= TimeTicks::Now();
359 result
= sparse_
->StartIO(SparseControl::kReadOperation
, offset
, buf
, buf_len
,
361 ReportIOTime(kSparseRead
, start
);
365 int EntryImpl::WriteSparseDataImpl(int64 offset
, IOBuffer
* buf
, int buf_len
,
366 const CompletionCallback
& callback
) {
367 DCHECK(node_
.Data()->dirty
|| read_only_
);
368 int result
= InitSparseData();
369 if (net::OK
!= result
)
372 TimeTicks start
= TimeTicks::Now();
373 result
= sparse_
->StartIO(SparseControl::kWriteOperation
, offset
, buf
,
375 ReportIOTime(kSparseWrite
, start
);
379 int EntryImpl::GetAvailableRangeImpl(int64 offset
, int len
, int64
* start
) {
380 int result
= InitSparseData();
381 if (net::OK
!= result
)
384 return sparse_
->GetAvailableRange(offset
, len
, start
);
387 void EntryImpl::CancelSparseIOImpl() {
394 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback
& callback
) {
395 DCHECK(sparse_
.get());
396 return sparse_
->ReadyToUse(callback
);
399 uint32
EntryImpl::GetHash() {
400 return entry_
.Data()->hash
;
403 bool EntryImpl::CreateEntry(Addr node_address
, const std::string
& key
,
405 Trace("Create entry In");
406 EntryStore
* entry_store
= entry_
.Data();
407 RankingsNode
* node
= node_
.Data();
408 memset(entry_store
, 0, sizeof(EntryStore
) * entry_
.address().num_blocks());
409 memset(node
, 0, sizeof(RankingsNode
));
410 if (!node_
.LazyInit(backend_
->File(node_address
), node_address
))
413 entry_store
->rankings_node
= node_address
.value();
414 node
->contents
= entry_
.address().value();
416 entry_store
->hash
= hash
;
417 entry_store
->creation_time
= Time::Now().ToInternalValue();
418 entry_store
->key_len
= static_cast<int32
>(key
.size());
419 if (entry_store
->key_len
> kMaxInternalKeyLength
) {
421 if (!CreateBlock(entry_store
->key_len
+ 1, &address
))
424 entry_store
->long_key
= address
.value();
425 File
* key_file
= GetBackingFile(address
, kKeyFileIndex
);
429 if (address
.is_block_file())
430 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
432 if (!key_file
|| !key_file
->Write(key
.data(), key
.size(), offset
)) {
433 DeleteData(address
, kKeyFileIndex
);
437 if (address
.is_separate_file())
438 key_file
->SetLength(key
.size() + 1);
440 memcpy(entry_store
->key
, key
.data(), key
.size());
441 entry_store
->key
[key
.size()] = '\0';
443 backend_
->ModifyStorageSize(0, static_cast<int32
>(key
.size()));
444 CACHE_UMA(COUNTS
, "KeySize", 0, static_cast<int32
>(key
.size()));
445 node
->dirty
= backend_
->GetCurrentEntryId();
446 Log("Create Entry ");
450 bool EntryImpl::IsSameEntry(const std::string
& key
, uint32 hash
) {
451 if (entry_
.Data()->hash
!= hash
||
452 static_cast<size_t>(entry_
.Data()->key_len
) != key
.size())
455 return (key
.compare(GetKey()) == 0);
458 void EntryImpl::InternalDoom() {
459 net_log_
.AddEvent(net::NetLog::TYPE_ENTRY_DOOM
);
460 DCHECK(node_
.HasData());
461 if (!node_
.Data()->dirty
) {
462 node_
.Data()->dirty
= backend_
->GetCurrentEntryId();
468 void EntryImpl::DeleteEntryData(bool everything
) {
469 DCHECK(doomed_
|| !everything
);
471 if (GetEntryFlags() & PARENT_ENTRY
) {
472 // We have some child entries that must go away.
473 SparseControl::DeleteChildren(this);
477 CACHE_UMA(COUNTS
, "DeleteHeader", 0, GetDataSize(0));
479 CACHE_UMA(COUNTS
, "DeleteData", 0, GetDataSize(1));
480 for (int index
= 0; index
< kNumStreams
; index
++) {
481 Addr
address(entry_
.Data()->data_addr
[index
]);
482 if (address
.is_initialized()) {
483 backend_
->ModifyStorageSize(entry_
.Data()->data_size
[index
] -
484 unreported_size_
[index
], 0);
485 entry_
.Data()->data_addr
[index
] = 0;
486 entry_
.Data()->data_size
[index
] = 0;
488 DeleteData(address
, index
);
495 // Remove all traces of this entry.
496 backend_
->RemoveEntry(this);
498 // Note that at this point node_ and entry_ are just two blocks of data, and
499 // even if they reference each other, nobody should be referencing them.
501 Addr
address(entry_
.Data()->long_key
);
502 DeleteData(address
, kKeyFileIndex
);
503 backend_
->ModifyStorageSize(entry_
.Data()->key_len
, 0);
505 backend_
->DeleteBlock(entry_
.address(), true);
508 if (!LeaveRankingsBehind()) {
509 backend_
->DeleteBlock(node_
.address(), true);
514 CacheAddr
EntryImpl::GetNextAddress() {
515 return entry_
.Data()->next
;
518 void EntryImpl::SetNextAddress(Addr address
) {
519 DCHECK_NE(address
.value(), entry_
.address().value());
520 entry_
.Data()->next
= address
.value();
521 bool success
= entry_
.Store();
525 bool EntryImpl::LoadNodeAddress() {
526 Addr
address(entry_
.Data()->rankings_node
);
527 if (!node_
.LazyInit(backend_
->File(address
), address
))
532 bool EntryImpl::Update() {
533 DCHECK(node_
.HasData());
538 RankingsNode
* rankings
= node_
.Data();
539 if (!rankings
->dirty
) {
540 rankings
->dirty
= backend_
->GetCurrentEntryId();
547 void EntryImpl::SetDirtyFlag(int32 current_id
) {
548 DCHECK(node_
.HasData());
549 if (node_
.Data()->dirty
&& current_id
!= node_
.Data()->dirty
)
556 void EntryImpl::SetPointerForInvalidEntry(int32 new_id
) {
557 node_
.Data()->dirty
= new_id
;
561 bool EntryImpl::LeaveRankingsBehind() {
562 return !node_
.Data()->contents
;
565 // This only includes checks that relate to the first block of the entry (the
566 // first 256 bytes), and values that should be set from the entry creation.
567 // Basically, even if there is something wrong with this entry, we want to see
568 // if it is possible to load the rankings node and delete them together.
569 bool EntryImpl::SanityCheck() {
570 if (!entry_
.VerifyHash())
573 EntryStore
* stored
= entry_
.Data();
574 if (!stored
->rankings_node
|| stored
->key_len
<= 0)
577 if (stored
->reuse_count
< 0 || stored
->refetch_count
< 0)
580 Addr
rankings_addr(stored
->rankings_node
);
581 if (!rankings_addr
.SanityCheckForRankings())
584 Addr
next_addr(stored
->next
);
585 if (next_addr
.is_initialized() && !next_addr
.SanityCheckForEntryV2()) {
589 STRESS_DCHECK(next_addr
.value() != entry_
.address().value());
591 if (stored
->state
> ENTRY_DOOMED
|| stored
->state
< ENTRY_NORMAL
)
594 Addr
key_addr(stored
->long_key
);
595 if ((stored
->key_len
<= kMaxInternalKeyLength
&& key_addr
.is_initialized()) ||
596 (stored
->key_len
> kMaxInternalKeyLength
&& !key_addr
.is_initialized()))
599 if (!key_addr
.SanityCheckV2())
602 if (key_addr
.is_initialized() &&
603 ((stored
->key_len
< kMaxBlockSize
&& key_addr
.is_separate_file()) ||
604 (stored
->key_len
>= kMaxBlockSize
&& key_addr
.is_block_file())))
607 int num_blocks
= NumBlocksForEntry(stored
->key_len
);
608 if (entry_
.address().num_blocks() != num_blocks
)
614 bool EntryImpl::DataSanityCheck() {
615 EntryStore
* stored
= entry_
.Data();
616 Addr
key_addr(stored
->long_key
);
618 // The key must be NULL terminated.
619 if (!key_addr
.is_initialized() && stored
->key
[stored
->key_len
])
622 if (stored
->hash
!= base::Hash(GetKey()))
625 for (int i
= 0; i
< kNumStreams
; i
++) {
626 Addr
data_addr(stored
->data_addr
[i
]);
627 int data_size
= stored
->data_size
[i
];
630 if (!data_size
&& data_addr
.is_initialized())
632 if (!data_addr
.SanityCheckV2())
636 if (data_size
<= kMaxBlockSize
&& data_addr
.is_separate_file())
638 if (data_size
> kMaxBlockSize
&& data_addr
.is_block_file())
644 void EntryImpl::FixForDelete() {
645 EntryStore
* stored
= entry_
.Data();
646 Addr
key_addr(stored
->long_key
);
648 if (!key_addr
.is_initialized())
649 stored
->key
[stored
->key_len
] = '\0';
651 for (int i
= 0; i
< kNumStreams
; i
++) {
652 Addr
data_addr(stored
->data_addr
[i
]);
653 int data_size
= stored
->data_size
[i
];
654 if (data_addr
.is_initialized()) {
655 if ((data_size
<= kMaxBlockSize
&& data_addr
.is_separate_file()) ||
656 (data_size
> kMaxBlockSize
&& data_addr
.is_block_file()) ||
657 !data_addr
.SanityCheckV2()) {
659 // The address is weird so don't attempt to delete it.
660 stored
->data_addr
[i
] = 0;
661 // In general, trust the stored size as it should be in sync with the
662 // total size tracked by the backend.
666 stored
->data_size
[i
] = 0;
671 void EntryImpl::IncrementIoCount() {
672 backend_
->IncrementIoCount();
675 void EntryImpl::DecrementIoCount() {
677 backend_
->DecrementIoCount();
680 void EntryImpl::OnEntryCreated(BackendImpl
* backend
) {
681 // Just grab a reference to the backround queue.
682 background_queue_
= backend
->GetBackgroundQueue();
685 void EntryImpl::SetTimes(base::Time last_used
, base::Time last_modified
) {
686 node_
.Data()->last_used
= last_used
.ToInternalValue();
687 node_
.Data()->last_modified
= last_modified
.ToInternalValue();
688 node_
.set_modified();
691 void EntryImpl::ReportIOTime(Operation op
, const base::TimeTicks
& start
) {
697 CACHE_UMA(AGE_MS
, "ReadTime", 0, start
);
700 CACHE_UMA(AGE_MS
, "WriteTime", 0, start
);
703 CACHE_UMA(AGE_MS
, "SparseReadTime", 0, start
);
706 CACHE_UMA(AGE_MS
, "SparseWriteTime", 0, start
);
709 CACHE_UMA(AGE_MS
, "AsyncIOTime", 0, start
);
712 CACHE_UMA(AGE_MS
, "AsyncReadDispatchTime", 0, start
);
715 CACHE_UMA(AGE_MS
, "AsyncWriteDispatchTime", 0, start
);
722 void EntryImpl::BeginLogging(net::NetLog
* net_log
, bool created
) {
723 DCHECK(!net_log_
.net_log());
724 net_log_
= net::BoundNetLog::Make(
725 net_log
, net::NetLog::SOURCE_DISK_CACHE_ENTRY
);
727 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL
,
728 CreateNetLogEntryCreationCallback(this, created
));
731 const net::BoundNetLog
& EntryImpl::net_log() const {
736 int EntryImpl::NumBlocksForEntry(int key_size
) {
737 // The longest key that can be stored using one block.
739 static_cast<int>(sizeof(EntryStore
) - offsetof(EntryStore
, key
));
741 if (key_size
< key1_len
|| key_size
> kMaxInternalKeyLength
)
744 return ((key_size
- key1_len
) / 256 + 2);
747 // ------------------------------------------------------------------------
749 void EntryImpl::Doom() {
750 if (background_queue_
.get())
751 background_queue_
->DoomEntryImpl(this);
754 void EntryImpl::Close() {
755 if (background_queue_
.get())
756 background_queue_
->CloseEntryImpl(this);
759 std::string
EntryImpl::GetKey() const {
760 CacheEntryBlock
* entry
= const_cast<CacheEntryBlock
*>(&entry_
);
761 int key_len
= entry
->Data()->key_len
;
762 if (key_len
<= kMaxInternalKeyLength
)
763 return std::string(entry
->Data()->key
);
765 // We keep a copy of the key so that we can always return it, even if the
766 // backend is disabled.
770 Addr
address(entry
->Data()->long_key
);
771 DCHECK(address
.is_initialized());
773 if (address
.is_block_file())
774 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
776 COMPILE_ASSERT(kNumStreams
== kKeyFileIndex
, invalid_key_index
);
777 File
* key_file
= const_cast<EntryImpl
*>(this)->GetBackingFile(address
,
780 return std::string();
782 ++key_len
; // We store a trailing \0 on disk that we read back below.
783 if (!offset
&& key_file
->GetLength() != static_cast<size_t>(key_len
))
784 return std::string();
786 if (!key_file
->Read(WriteInto(&key_
, key_len
), key_len
, offset
))
791 Time
EntryImpl::GetLastUsed() const {
792 CacheRankingsBlock
* node
= const_cast<CacheRankingsBlock
*>(&node_
);
793 return Time::FromInternalValue(node
->Data()->last_used
);
796 Time
EntryImpl::GetLastModified() const {
797 CacheRankingsBlock
* node
= const_cast<CacheRankingsBlock
*>(&node_
);
798 return Time::FromInternalValue(node
->Data()->last_modified
);
801 int32
EntryImpl::GetDataSize(int index
) const {
802 if (index
< 0 || index
>= kNumStreams
)
805 CacheEntryBlock
* entry
= const_cast<CacheEntryBlock
*>(&entry_
);
806 return entry
->Data()->data_size
[index
];
809 int EntryImpl::ReadData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
810 const CompletionCallback
& callback
) {
811 if (callback
.is_null())
812 return ReadDataImpl(index
, offset
, buf
, buf_len
, callback
);
814 DCHECK(node_
.Data()->dirty
|| read_only_
);
815 if (index
< 0 || index
>= kNumStreams
)
816 return net::ERR_INVALID_ARGUMENT
;
818 int entry_size
= entry_
.Data()->data_size
[index
];
819 if (offset
>= entry_size
|| offset
< 0 || !buf_len
)
823 return net::ERR_INVALID_ARGUMENT
;
825 if (!background_queue_
.get())
826 return net::ERR_UNEXPECTED
;
828 background_queue_
->ReadData(this, index
, offset
, buf
, buf_len
, callback
);
829 return net::ERR_IO_PENDING
;
832 int EntryImpl::WriteData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
833 const CompletionCallback
& callback
, bool truncate
) {
834 if (callback
.is_null())
835 return WriteDataImpl(index
, offset
, buf
, buf_len
, callback
, truncate
);
837 DCHECK(node_
.Data()->dirty
|| read_only_
);
838 if (index
< 0 || index
>= kNumStreams
)
839 return net::ERR_INVALID_ARGUMENT
;
841 if (offset
< 0 || buf_len
< 0)
842 return net::ERR_INVALID_ARGUMENT
;
844 if (!background_queue_
.get())
845 return net::ERR_UNEXPECTED
;
847 background_queue_
->WriteData(this, index
, offset
, buf
, buf_len
, truncate
,
849 return net::ERR_IO_PENDING
;
852 int EntryImpl::ReadSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
853 const CompletionCallback
& callback
) {
854 if (callback
.is_null())
855 return ReadSparseDataImpl(offset
, buf
, buf_len
, callback
);
857 if (!background_queue_
.get())
858 return net::ERR_UNEXPECTED
;
860 background_queue_
->ReadSparseData(this, offset
, buf
, buf_len
, callback
);
861 return net::ERR_IO_PENDING
;
864 int EntryImpl::WriteSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
865 const CompletionCallback
& callback
) {
866 if (callback
.is_null())
867 return WriteSparseDataImpl(offset
, buf
, buf_len
, callback
);
869 if (!background_queue_
.get())
870 return net::ERR_UNEXPECTED
;
872 background_queue_
->WriteSparseData(this, offset
, buf
, buf_len
, callback
);
873 return net::ERR_IO_PENDING
;
876 int EntryImpl::GetAvailableRange(int64 offset
, int len
, int64
* start
,
877 const CompletionCallback
& callback
) {
878 if (!background_queue_
.get())
879 return net::ERR_UNEXPECTED
;
881 background_queue_
->GetAvailableRange(this, offset
, len
, start
, callback
);
882 return net::ERR_IO_PENDING
;
885 bool EntryImpl::CouldBeSparse() const {
889 scoped_ptr
<SparseControl
> sparse
;
890 sparse
.reset(new SparseControl(const_cast<EntryImpl
*>(this)));
891 return sparse
->CouldBeSparse();
894 void EntryImpl::CancelSparseIO() {
895 if (background_queue_
.get())
896 background_queue_
->CancelSparseIO(this);
899 int EntryImpl::ReadyForSparseIO(const CompletionCallback
& callback
) {
903 if (!background_queue_
.get())
904 return net::ERR_UNEXPECTED
;
906 background_queue_
->ReadyForSparseIO(this, callback
);
907 return net::ERR_IO_PENDING
;
910 // When an entry is deleted from the cache, we clean up all the data associated
911 // with it for two reasons: to simplify the reuse of the block (we know that any
912 // unused block is filled with zeros), and to simplify the handling of write /
913 // read partial information from an entry (don't have to worry about returning
914 // data related to a previous cache entry because the range was not fully
916 EntryImpl::~EntryImpl() {
917 if (!backend_
.get()) {
918 entry_
.clear_modified();
919 node_
.clear_modified();
922 Log("~EntryImpl in");
924 // Save the sparse info to disk. This will generate IO for this entry and
925 // maybe for a child entry, so it is important to do it before deleting this
929 // Remove this entry from the list of open entries.
930 backend_
->OnEntryDestroyBegin(entry_
.address());
933 DeleteEntryData(true);
935 #if defined(NET_BUILD_STRESS_CACHE)
938 net_log_
.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE
);
940 for (int index
= 0; index
< kNumStreams
; index
++) {
941 if (user_buffers_
[index
].get()) {
942 if (!(ret
= Flush(index
, 0)))
943 LOG(ERROR
) << "Failed to save user data";
945 if (unreported_size_
[index
]) {
946 backend_
->ModifyStorageSize(
947 entry_
.Data()->data_size
[index
] - unreported_size_
[index
],
948 entry_
.Data()->data_size
[index
]);
953 // There was a failure writing the actual data. Mark the entry as dirty.
954 int current_id
= backend_
->GetCurrentEntryId();
955 node_
.Data()->dirty
= current_id
== 1 ? -1 : current_id
- 1;
957 } else if (node_
.HasData() && !dirty_
&& node_
.Data()->dirty
) {
958 node_
.Data()->dirty
= 0;
963 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
964 net_log_
.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL
);
965 backend_
->OnEntryDestroyEnd();
968 // ------------------------------------------------------------------------
970 int EntryImpl::InternalReadData(int index
, int offset
,
971 IOBuffer
* buf
, int buf_len
,
972 const CompletionCallback
& callback
) {
973 DCHECK(node_
.Data()->dirty
|| read_only_
);
974 DVLOG(2) << "Read from " << index
<< " at " << offset
<< " : " << buf_len
;
975 if (index
< 0 || index
>= kNumStreams
)
976 return net::ERR_INVALID_ARGUMENT
;
978 int entry_size
= entry_
.Data()->data_size
[index
];
979 if (offset
>= entry_size
|| offset
< 0 || !buf_len
)
983 return net::ERR_INVALID_ARGUMENT
;
986 return net::ERR_UNEXPECTED
;
988 TimeTicks start
= TimeTicks::Now();
990 if (offset
+ buf_len
> entry_size
)
991 buf_len
= entry_size
- offset
;
995 backend_
->OnEvent(Stats::READ_DATA
);
996 backend_
->OnRead(buf_len
);
998 Addr
address(entry_
.Data()->data_addr
[index
]);
999 int eof
= address
.is_initialized() ? entry_size
: 0;
1000 if (user_buffers_
[index
].get() &&
1001 user_buffers_
[index
]->PreRead(eof
, offset
, &buf_len
)) {
1002 // Complete the operation locally.
1003 buf_len
= user_buffers_
[index
]->Read(offset
, buf
, buf_len
);
1004 ReportIOTime(kRead
, start
);
1008 address
.set_value(entry_
.Data()->data_addr
[index
]);
1009 DCHECK(address
.is_initialized());
1010 if (!address
.is_initialized()) {
1012 return net::ERR_FAILED
;
1015 File
* file
= GetBackingFile(address
, index
);
1018 LOG(ERROR
) << "No file for " << std::hex
<< address
.value();
1019 return net::ERR_FILE_NOT_FOUND
;
1022 size_t file_offset
= offset
;
1023 if (address
.is_block_file()) {
1024 DCHECK_LE(offset
+ buf_len
, kMaxBlockSize
);
1025 file_offset
+= address
.start_block() * address
.BlockSize() +
1029 SyncCallback
* io_callback
= NULL
;
1030 if (!callback
.is_null()) {
1031 io_callback
= new SyncCallback(this, buf
, callback
,
1032 net::NetLog::TYPE_ENTRY_READ_DATA
);
1035 TimeTicks start_async
= TimeTicks::Now();
1038 if (!file
->Read(buf
->data(), buf_len
, file_offset
, io_callback
, &completed
)) {
1040 io_callback
->Discard();
1042 return net::ERR_CACHE_READ_FAILURE
;
1045 if (io_callback
&& completed
)
1046 io_callback
->Discard();
1049 ReportIOTime(kReadAsync1
, start_async
);
1051 ReportIOTime(kRead
, start
);
1052 return (completed
|| callback
.is_null()) ? buf_len
: net::ERR_IO_PENDING
;
1055 int EntryImpl::InternalWriteData(int index
, int offset
,
1056 IOBuffer
* buf
, int buf_len
,
1057 const CompletionCallback
& callback
,
1059 DCHECK(node_
.Data()->dirty
|| read_only_
);
1060 DVLOG(2) << "Write to " << index
<< " at " << offset
<< " : " << buf_len
;
1061 if (index
< 0 || index
>= kNumStreams
)
1062 return net::ERR_INVALID_ARGUMENT
;
1064 if (offset
< 0 || buf_len
< 0)
1065 return net::ERR_INVALID_ARGUMENT
;
1067 if (!backend_
.get())
1068 return net::ERR_UNEXPECTED
;
1070 int max_file_size
= backend_
->MaxFileSize();
1072 // offset or buf_len could be negative numbers.
1073 if (offset
> max_file_size
|| buf_len
> max_file_size
||
1074 offset
+ buf_len
> max_file_size
) {
1075 int size
= offset
+ buf_len
;
1076 if (size
<= max_file_size
)
1078 backend_
->TooMuchStorageRequested(size
);
1079 return net::ERR_FAILED
;
1082 TimeTicks start
= TimeTicks::Now();
1084 // Read the size at this point (it may change inside prepare).
1085 int entry_size
= entry_
.Data()->data_size
[index
];
1086 bool extending
= entry_size
< offset
+ buf_len
;
1087 truncate
= truncate
&& entry_size
> offset
+ buf_len
;
1088 Trace("To PrepareTarget 0x%x", entry_
.address().value());
1089 if (!PrepareTarget(index
, offset
, buf_len
, truncate
))
1090 return net::ERR_FAILED
;
1092 Trace("From PrepareTarget 0x%x", entry_
.address().value());
1093 if (extending
|| truncate
)
1094 UpdateSize(index
, entry_size
, offset
+ buf_len
);
1098 backend_
->OnEvent(Stats::WRITE_DATA
);
1099 backend_
->OnWrite(buf_len
);
1101 if (user_buffers_
[index
].get()) {
1102 // Complete the operation locally.
1103 user_buffers_
[index
]->Write(offset
, buf
, buf_len
);
1104 ReportIOTime(kWrite
, start
);
1108 Addr
address(entry_
.Data()->data_addr
[index
]);
1109 if (offset
+ buf_len
== 0) {
1111 DCHECK(!address
.is_initialized());
1116 File
* file
= GetBackingFile(address
, index
);
1118 return net::ERR_FILE_NOT_FOUND
;
1120 size_t file_offset
= offset
;
1121 if (address
.is_block_file()) {
1122 DCHECK_LE(offset
+ buf_len
, kMaxBlockSize
);
1123 file_offset
+= address
.start_block() * address
.BlockSize() +
1125 } else if (truncate
|| (extending
&& !buf_len
)) {
1126 if (!file
->SetLength(offset
+ buf_len
))
1127 return net::ERR_FAILED
;
1133 SyncCallback
* io_callback
= NULL
;
1134 if (!callback
.is_null()) {
1135 io_callback
= new SyncCallback(this, buf
, callback
,
1136 net::NetLog::TYPE_ENTRY_WRITE_DATA
);
1139 TimeTicks start_async
= TimeTicks::Now();
1142 if (!file
->Write(buf
->data(), buf_len
, file_offset
, io_callback
,
1145 io_callback
->Discard();
1146 return net::ERR_CACHE_WRITE_FAILURE
;
1149 if (io_callback
&& completed
)
1150 io_callback
->Discard();
1153 ReportIOTime(kWriteAsync1
, start_async
);
1155 ReportIOTime(kWrite
, start
);
1156 return (completed
|| callback
.is_null()) ? buf_len
: net::ERR_IO_PENDING
;
1159 // ------------------------------------------------------------------------
1161 bool EntryImpl::CreateDataBlock(int index
, int size
) {
1162 DCHECK(index
>= 0 && index
< kNumStreams
);
1164 Addr
address(entry_
.Data()->data_addr
[index
]);
1165 if (!CreateBlock(size
, &address
))
1168 entry_
.Data()->data_addr
[index
] = address
.value();
1173 bool EntryImpl::CreateBlock(int size
, Addr
* address
) {
1174 DCHECK(!address
->is_initialized());
1175 if (!backend_
.get())
1178 FileType file_type
= Addr::RequiredFileType(size
);
1179 if (EXTERNAL
== file_type
) {
1180 if (size
> backend_
->MaxFileSize())
1182 if (!backend_
->CreateExternalFile(address
))
1185 int num_blocks
= Addr::RequiredBlocks(size
, file_type
);
1187 if (!backend_
->CreateBlock(file_type
, num_blocks
, address
))
1193 // Note that this method may end up modifying a block file so upon return the
1194 // involved block will be free, and could be reused for something else. If there
1195 // is a crash after that point (and maybe before returning to the caller), the
1196 // entry will be left dirty... and at some point it will be discarded; it is
1197 // important that the entry doesn't keep a reference to this address, or we'll
1198 // end up deleting the contents of |address| once again.
1199 void EntryImpl::DeleteData(Addr address
, int index
) {
1200 DCHECK(backend_
.get());
1201 if (!address
.is_initialized())
1203 if (address
.is_separate_file()) {
1204 int failure
= !DeleteCacheFile(backend_
->GetFileName(address
));
1205 CACHE_UMA(COUNTS
, "DeleteFailed", 0, failure
);
1207 LOG(ERROR
) << "Failed to delete " <<
1208 backend_
->GetFileName(address
).value() << " from the cache.";
1210 if (files_
[index
].get())
1211 files_
[index
] = NULL
; // Releases the object.
1213 backend_
->DeleteBlock(address
, true);
1217 void EntryImpl::UpdateRank(bool modified
) {
1218 if (!backend_
.get())
1222 // Everything is handled by the backend.
1223 backend_
->UpdateRank(this, modified
);
1227 Time current
= Time::Now();
1228 node_
.Data()->last_used
= current
.ToInternalValue();
1231 node_
.Data()->last_modified
= current
.ToInternalValue();
1234 File
* EntryImpl::GetBackingFile(Addr address
, int index
) {
1235 if (!backend_
.get())
1239 if (address
.is_separate_file())
1240 file
= GetExternalFile(address
, index
);
1242 file
= backend_
->File(address
);
1246 File
* EntryImpl::GetExternalFile(Addr address
, int index
) {
1247 DCHECK(index
>= 0 && index
<= kKeyFileIndex
);
1248 if (!files_
[index
].get()) {
1249 // For a key file, use mixed mode IO.
1250 scoped_refptr
<File
> file(new File(kKeyFileIndex
== index
));
1251 if (file
->Init(backend_
->GetFileName(address
)))
1252 files_
[index
].swap(file
);
1254 return files_
[index
].get();
1257 // We keep a memory buffer for everything that ends up stored on a block file
1258 // (because we don't know yet the final data size), and for some of the data
1259 // that end up on external files. This function will initialize that memory
1260 // buffer and / or the files needed to store the data.
1262 // In general, a buffer may overlap data already stored on disk, and in that
1263 // case, the contents of the buffer are the most accurate. It may also extend
1264 // the file, but we don't want to read from disk just to keep the buffer up to
1265 // date. This means that as soon as there is a chance to get confused about what
1266 // is the most recent version of some part of a file, we'll flush the buffer and
1267 // reuse it for the new data. Keep in mind that the normal use pattern is quite
1268 // simple (write sequentially from the beginning), so we optimize for handling
1270 bool EntryImpl::PrepareTarget(int index
, int offset
, int buf_len
,
1273 return HandleTruncation(index
, offset
, buf_len
);
1275 if (!offset
&& !buf_len
)
1278 Addr
address(entry_
.Data()->data_addr
[index
]);
1279 if (address
.is_initialized()) {
1280 if (address
.is_block_file() && !MoveToLocalBuffer(index
))
1283 if (!user_buffers_
[index
].get() && offset
< kMaxBlockSize
) {
1284 // We are about to create a buffer for the first 16KB, make sure that we
1285 // preserve existing data.
1286 if (!CopyToLocalBuffer(index
))
1291 if (!user_buffers_
[index
].get())
1292 user_buffers_
[index
].reset(new UserBuffer(backend_
.get()));
1294 return PrepareBuffer(index
, offset
, buf_len
);
1297 // We get to this function with some data already stored. If there is a
1298 // truncation that results on data stored internally, we'll explicitly
1299 // handle the case here.
1300 bool EntryImpl::HandleTruncation(int index
, int offset
, int buf_len
) {
1301 Addr
address(entry_
.Data()->data_addr
[index
]);
1303 int current_size
= entry_
.Data()->data_size
[index
];
1304 int new_size
= offset
+ buf_len
;
1307 // This is by far the most common scenario.
1308 backend_
->ModifyStorageSize(current_size
- unreported_size_
[index
], 0);
1309 entry_
.Data()->data_addr
[index
] = 0;
1310 entry_
.Data()->data_size
[index
] = 0;
1311 unreported_size_
[index
] = 0;
1313 DeleteData(address
, index
);
1315 user_buffers_
[index
].reset();
1319 // We never postpone truncating a file, if there is one, but we may postpone
1320 // telling the backend about the size reduction.
1321 if (user_buffers_
[index
].get()) {
1322 DCHECK_GE(current_size
, user_buffers_
[index
]->Start());
1323 if (!address
.is_initialized()) {
1324 // There is no overlap between the buffer and disk.
1325 if (new_size
> user_buffers_
[index
]->Start()) {
1326 // Just truncate our buffer.
1327 DCHECK_LT(new_size
, user_buffers_
[index
]->End());
1328 user_buffers_
[index
]->Truncate(new_size
);
1332 // Just discard our buffer.
1333 user_buffers_
[index
]->Reset();
1334 return PrepareBuffer(index
, offset
, buf_len
);
1337 // There is some overlap or we need to extend the file before the
1339 if (offset
> user_buffers_
[index
]->Start())
1340 user_buffers_
[index
]->Truncate(new_size
);
1341 UpdateSize(index
, current_size
, new_size
);
1342 if (!Flush(index
, 0))
1344 user_buffers_
[index
].reset();
1347 // We have data somewhere, and it is not in a buffer.
1348 DCHECK(!user_buffers_
[index
].get());
1349 DCHECK(address
.is_initialized());
1351 if (new_size
> kMaxBlockSize
)
1352 return true; // Let the operation go directly to disk.
1354 return ImportSeparateFile(index
, offset
+ buf_len
);
1357 bool EntryImpl::CopyToLocalBuffer(int index
) {
1358 Addr
address(entry_
.Data()->data_addr
[index
]);
1359 DCHECK(!user_buffers_
[index
].get());
1360 DCHECK(address
.is_initialized());
1362 int len
= std::min(entry_
.Data()->data_size
[index
], kMaxBlockSize
);
1363 user_buffers_
[index
].reset(new UserBuffer(backend_
.get()));
1364 user_buffers_
[index
]->Write(len
, NULL
, 0);
1366 File
* file
= GetBackingFile(address
, index
);
1369 if (address
.is_block_file())
1370 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
1373 !file
->Read(user_buffers_
[index
]->Data(), len
, offset
, NULL
, NULL
)) {
1374 user_buffers_
[index
].reset();
1380 bool EntryImpl::MoveToLocalBuffer(int index
) {
1381 if (!CopyToLocalBuffer(index
))
1384 Addr
address(entry_
.Data()->data_addr
[index
]);
1385 entry_
.Data()->data_addr
[index
] = 0;
1387 DeleteData(address
, index
);
1389 // If we lose this entry we'll see it as zero sized.
1390 int len
= entry_
.Data()->data_size
[index
];
1391 backend_
->ModifyStorageSize(len
- unreported_size_
[index
], 0);
1392 unreported_size_
[index
] = len
;
1396 bool EntryImpl::ImportSeparateFile(int index
, int new_size
) {
1397 if (entry_
.Data()->data_size
[index
] > new_size
)
1398 UpdateSize(index
, entry_
.Data()->data_size
[index
], new_size
);
1400 return MoveToLocalBuffer(index
);
1403 bool EntryImpl::PrepareBuffer(int index
, int offset
, int buf_len
) {
1404 DCHECK(user_buffers_
[index
].get());
1405 if ((user_buffers_
[index
]->End() && offset
> user_buffers_
[index
]->End()) ||
1406 offset
> entry_
.Data()->data_size
[index
]) {
1407 // We are about to extend the buffer or the file (with zeros), so make sure
1408 // that we are not overwriting anything.
1409 Addr
address(entry_
.Data()->data_addr
[index
]);
1410 if (address
.is_initialized() && address
.is_separate_file()) {
1411 if (!Flush(index
, 0))
1413 // There is an actual file already, and we don't want to keep track of
1414 // its length so we let this operation go straight to disk.
1415 // The only case when a buffer is allowed to extend the file (as in fill
1416 // with zeros before the start) is when there is no file yet to extend.
1417 user_buffers_
[index
].reset();
1422 if (!user_buffers_
[index
]->PreWrite(offset
, buf_len
)) {
1423 if (!Flush(index
, offset
+ buf_len
))
1427 if (offset
> user_buffers_
[index
]->End() ||
1428 !user_buffers_
[index
]->PreWrite(offset
, buf_len
)) {
1429 // We cannot complete the operation with a buffer.
1430 DCHECK(!user_buffers_
[index
]->Size());
1431 DCHECK(!user_buffers_
[index
]->Start());
1432 user_buffers_
[index
].reset();
1438 bool EntryImpl::Flush(int index
, int min_len
) {
1439 Addr
address(entry_
.Data()->data_addr
[index
]);
1440 DCHECK(user_buffers_
[index
].get());
1441 DCHECK(!address
.is_initialized() || address
.is_separate_file());
1442 DVLOG(3) << "Flush";
1444 int size
= std::max(entry_
.Data()->data_size
[index
], min_len
);
1445 if (size
&& !address
.is_initialized() && !CreateDataBlock(index
, size
))
1448 if (!entry_
.Data()->data_size
[index
]) {
1449 DCHECK(!user_buffers_
[index
]->Size());
1453 address
.set_value(entry_
.Data()->data_addr
[index
]);
1455 int len
= user_buffers_
[index
]->Size();
1456 int offset
= user_buffers_
[index
]->Start();
1457 if (!len
&& !offset
)
1460 if (address
.is_block_file()) {
1461 DCHECK_EQ(len
, entry_
.Data()->data_size
[index
]);
1463 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
1466 File
* file
= GetBackingFile(address
, index
);
1470 if (!file
->Write(user_buffers_
[index
]->Data(), len
, offset
, NULL
, NULL
))
1472 user_buffers_
[index
]->Reset();
1477 void EntryImpl::UpdateSize(int index
, int old_size
, int new_size
) {
1478 if (entry_
.Data()->data_size
[index
] == new_size
)
1481 unreported_size_
[index
] += new_size
- old_size
;
1482 entry_
.Data()->data_size
[index
] = new_size
;
1483 entry_
.set_modified();
1486 int EntryImpl::InitSparseData() {
1490 // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1491 scoped_ptr
<SparseControl
> sparse(new SparseControl(this));
1492 int result
= sparse
->Init();
1493 if (net::OK
== result
)
1494 sparse_
.swap(sparse
);
1499 void EntryImpl::SetEntryFlags(uint32 flags
) {
1500 entry_
.Data()->flags
|= flags
;
1501 entry_
.set_modified();
1504 uint32
EntryImpl::GetEntryFlags() {
1505 return entry_
.Data()->flags
;
1508 void EntryImpl::GetData(int index
, char** buffer
, Addr
* address
) {
1509 DCHECK(backend_
.get());
1510 if (user_buffers_
[index
].get() && user_buffers_
[index
]->Size() &&
1511 !user_buffers_
[index
]->Start()) {
1512 // The data is already in memory, just copy it and we're done.
1513 int data_len
= entry_
.Data()->data_size
[index
];
1514 if (data_len
<= user_buffers_
[index
]->Size()) {
1515 DCHECK(!user_buffers_
[index
]->Start());
1516 *buffer
= new char[data_len
];
1517 memcpy(*buffer
, user_buffers_
[index
]->Data(), data_len
);
1522 // Bad news: we'd have to read the info from disk so instead we'll just tell
1523 // the caller where to read from.
1525 address
->set_value(entry_
.Data()->data_addr
[index
]);
1526 if (address
->is_initialized()) {
1527 // Prevent us from deleting the block from the backing store.
1528 backend_
->ModifyStorageSize(entry_
.Data()->data_size
[index
] -
1529 unreported_size_
[index
], 0);
1530 entry_
.Data()->data_addr
[index
] = 0;
1531 entry_
.Data()->data_size
[index
] = 0;
1535 void EntryImpl::Log(const char* msg
) {
1537 if (node_
.HasData()) {
1538 dirty
= node_
.Data()->dirty
;
1541 Trace("%s 0x%p 0x%x 0x%x", msg
, reinterpret_cast<void*>(this),
1542 entry_
.address().value(), node_
.address().value());
1544 Trace(" data: 0x%x 0x%x 0x%x", entry_
.Data()->data_addr
[0],
1545 entry_
.Data()->data_addr
[1], entry_
.Data()->long_key
);
1547 Trace(" doomed: %d 0x%x", doomed_
, dirty
);
1550 } // namespace disk_cache