1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/blockfile/entry_impl_v3.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/metrics/histogram.h"
10 #include "base/strings/string_util.h"
11 #include "net/base/io_buffer.h"
12 #include "net/base/net_errors.h"
13 #include "net/disk_cache/blockfile/backend_impl_v3.h"
14 #include "net/disk_cache/blockfile/bitmap.h"
15 #include "net/disk_cache/blockfile/disk_format_v3.h"
16 #include "net/disk_cache/blockfile/histogram_macros_v3.h"
17 #include "net/disk_cache/cache_util.h"
18 #include "net/disk_cache/net_log_parameters.h"
19 // #include "net/disk_cache/blockfile/sparse_control_v3.h"
21 // Provide a BackendImpl object to macros from histogram_macros.h.
22 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
25 using base::TimeDelta
;
26 using base::TimeTicks
;
30 const int kMaxBufferSize
= 1024 * 1024; // 1 MB.
34 namespace disk_cache
{
36 typedef StorageBlock
<EntryRecord
> CacheEntryBlockV3
;
37 typedef StorageBlock
<ShortEntryRecord
> CacheShortEntryBlock
;
39 // This class handles individual memory buffers that store data before it is
40 // sent to disk. The buffer can start at any offset, but if we try to write to
41 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
42 // zero. The buffer grows up to a size determined by the backend, to keep the
43 // total memory used under control.
44 class EntryImplV3::UserBuffer
{
46 explicit UserBuffer(BackendImplV3
* backend
)
47 : backend_(backend
->GetWeakPtr()), offset_(0), grow_allowed_(true) {
48 buffer_
.reserve(kMaxBlockSize
);
52 backend_
->BufferDeleted(capacity() - kMaxBlockSize
);
55 // Returns true if we can handle writing |len| bytes to |offset|.
56 bool PreWrite(int offset
, int len
);
58 // Truncates the buffer to |offset| bytes.
59 void Truncate(int offset
);
61 // Writes |len| bytes from |buf| at the given |offset|.
62 void Write(int offset
, IOBuffer
* buf
, int len
);
64 // Returns true if we can read |len| bytes from |offset|, given that the
65 // actual file has |eof| bytes stored. Note that the number of bytes to read
66 // may be modified by this method even though it returns false: that means we
67 // should do a smaller read from disk.
68 bool PreRead(int eof
, int offset
, int* len
);
70 // Read |len| bytes from |buf| at the given |offset|.
71 int Read(int offset
, IOBuffer
* buf
, int len
);
73 // Prepare this buffer for reuse.
76 char* Data() { return buffer_
.size() ? &buffer_
[0] : NULL
; }
77 int Size() { return static_cast<int>(buffer_
.size()); }
78 int Start() { return offset_
; }
79 int End() { return offset_
+ Size(); }
82 int capacity() { return static_cast<int>(buffer_
.capacity()); }
83 bool GrowBuffer(int required
, int limit
);
85 base::WeakPtr
<BackendImplV3
> backend_
;
87 std::vector
<char> buffer_
;
89 DISALLOW_COPY_AND_ASSIGN(UserBuffer
);
92 bool EntryImplV3::UserBuffer::PreWrite(int offset
, int len
) {
95 DCHECK_GE(offset
+ len
, 0);
97 // We don't want to write before our current start.
101 // Lets get the common case out of the way.
102 if (offset
+ len
<= capacity())
105 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
106 // buffer offset_ at 0.
107 if (!Size() && offset
> kMaxBlockSize
)
108 return GrowBuffer(len
, kMaxBufferSize
);
110 int required
= offset
- offset_
+ len
;
111 return GrowBuffer(required
, kMaxBufferSize
* 6 / 5);
114 void EntryImplV3::UserBuffer::Truncate(int offset
) {
115 DCHECK_GE(offset
, 0);
116 DCHECK_GE(offset
, offset_
);
117 DVLOG(3) << "Buffer truncate at " << offset
<< " current " << offset_
;
120 if (Size() >= offset
)
121 buffer_
.resize(offset
);
124 void EntryImplV3::UserBuffer::Write(int offset
, IOBuffer
* buf
, int len
) {
125 DCHECK_GE(offset
, 0);
127 DCHECK_GE(offset
+ len
, 0);
128 DCHECK_GE(offset
, offset_
);
129 DVLOG(3) << "Buffer write at " << offset
<< " current " << offset_
;
131 if (!Size() && offset
> kMaxBlockSize
)
137 buffer_
.resize(offset
);
142 char* buffer
= buf
->data();
143 int valid_len
= Size() - offset
;
144 int copy_len
= std::min(valid_len
, len
);
146 memcpy(&buffer_
[offset
], buffer
, copy_len
);
153 buffer_
.insert(buffer_
.end(), buffer
, buffer
+ len
);
156 bool EntryImplV3::UserBuffer::PreRead(int eof
, int offset
, int* len
) {
157 DCHECK_GE(offset
, 0);
160 if (offset
< offset_
) {
161 // We are reading before this buffer.
165 // If the read overlaps with the buffer, change its length so that there is
167 *len
= std::min(*len
, offset_
- offset
);
168 *len
= std::min(*len
, eof
- offset
);
170 // We should read from disk.
177 // See if we can fulfill the first part of the operation.
178 return (offset
- offset_
< Size());
181 int EntryImplV3::UserBuffer::Read(int offset
, IOBuffer
* buf
, int len
) {
182 DCHECK_GE(offset
, 0);
184 DCHECK(Size() || offset
< offset_
);
187 if (offset
< offset_
) {
188 // We don't have a file so lets fill the first part with 0.
189 clean_bytes
= std::min(offset_
- offset
, len
);
190 memset(buf
->data(), 0, clean_bytes
);
191 if (len
== clean_bytes
)
197 int start
= offset
- offset_
;
198 int available
= Size() - start
;
200 DCHECK_GE(available
, 0);
201 len
= std::min(len
, available
);
202 memcpy(buf
->data() + clean_bytes
, &buffer_
[start
], len
);
203 return len
+ clean_bytes
;
206 void EntryImplV3::UserBuffer::Reset() {
207 if (!grow_allowed_
) {
209 backend_
->BufferDeleted(capacity() - kMaxBlockSize
);
210 grow_allowed_
= true;
211 std::vector
<char> tmp
;
213 buffer_
.reserve(kMaxBlockSize
);
219 bool EntryImplV3::UserBuffer::GrowBuffer(int required
, int limit
) {
220 DCHECK_GE(required
, 0);
221 int current_size
= capacity();
222 if (required
<= current_size
)
225 if (required
> limit
)
231 int to_add
= std::max(required
- current_size
, kMaxBlockSize
* 4);
232 to_add
= std::max(current_size
, to_add
);
233 required
= std::min(current_size
+ to_add
, limit
);
235 grow_allowed_
= backend_
->IsAllocAllowed(current_size
, required
);
239 DVLOG(3) << "Buffer grow to " << required
;
241 buffer_
.reserve(required
);
245 // ------------------------------------------------------------------------
247 EntryImplV3::EntryImplV3(BackendImplV3
* backend
, Addr address
, bool read_only
)
248 : backend_(backend
->GetWeakPtr()),
251 read_only_(read_only
),
254 for (int i
= 0; i
< kNumStreams
; i
++) {
255 unreported_size_
[i
] = 0;
259 #if defined(V3_NOT_JUST_YET_READY)
261 bool EntryImplV3::CreateEntry(Addr node_address
, const std::string
& key
,
263 Trace("Create entry In");
264 EntryStore
* entry_store
= entry_
.Data();
265 RankingsNode
* node
= node_
.Data();
266 memset(entry_store
, 0, sizeof(EntryStore
) * entry_
.address().num_blocks());
267 memset(node
, 0, sizeof(RankingsNode
));
268 if (!node_
.LazyInit(backend_
->File(node_address
), node_address
))
271 entry_store
->rankings_node
= node_address
.value();
272 node
->contents
= entry_
.address().value();
274 entry_store
->hash
= hash
;
275 entry_store
->creation_time
= Time::Now().ToInternalValue();
276 entry_store
->key_len
= static_cast<int32
>(key
.size());
277 if (entry_store
->key_len
> kMaxInternalKeyLength
) {
279 if (!CreateBlock(entry_store
->key_len
+ 1, &address
))
282 entry_store
->long_key
= address
.value();
283 File
* key_file
= GetBackingFile(address
, kKeyFileIndex
);
287 if (address
.is_block_file())
288 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
290 if (!key_file
|| !key_file
->Write(key
.data(), key
.size(), offset
)) {
291 DeleteData(address
, kKeyFileIndex
);
295 if (address
.is_separate_file())
296 key_file
->SetLength(key
.size() + 1);
298 memcpy(entry_store
->key
, key
.data(), key
.size());
299 entry_store
->key
[key
.size()] = '\0';
301 backend_
->ModifyStorageSize(0, static_cast<int32
>(key
.size()));
302 CACHE_UMA(COUNTS
, "KeySize", 0, static_cast<int32
>(key
.size()));
303 node
->dirty
= backend_
->GetCurrentEntryId();
304 Log("Create Entry ");
308 uint32
EntryImplV3::GetHash() {
309 return entry_
.Data()->hash
;
312 bool EntryImplV3::IsSameEntry(const std::string
& key
, uint32 hash
) {
313 if (entry_
.Data()->hash
!= hash
||
314 static_cast<size_t>(entry_
.Data()->key_len
) != key
.size())
317 return (key
.compare(GetKey()) == 0);
320 void EntryImplV3::InternalDoom() {
321 net_log_
.AddEvent(net::NetLog::TYPE_ENTRY_DOOM
);
322 DCHECK(node_
.HasData());
323 if (!node_
.Data()->dirty
) {
324 node_
.Data()->dirty
= backend_
->GetCurrentEntryId();
330 // This only includes checks that relate to the first block of the entry (the
331 // first 256 bytes), and values that should be set from the entry creation.
332 // Basically, even if there is something wrong with this entry, we want to see
333 // if it is possible to load the rankings node and delete them together.
334 bool EntryImplV3::SanityCheck() {
335 if (!entry_
.VerifyHash())
338 EntryStore
* stored
= entry_
.Data();
339 if (!stored
->rankings_node
|| stored
->key_len
<= 0)
342 if (stored
->reuse_count
< 0 || stored
->refetch_count
< 0)
345 Addr
rankings_addr(stored
->rankings_node
);
346 if (!rankings_addr
.SanityCheckForRankings())
349 Addr
next_addr(stored
->next
);
350 if (next_addr
.is_initialized() && !next_addr
.SanityCheckForEntry()) {
354 STRESS_DCHECK(next_addr
.value() != entry_
.address().value());
356 if (stored
->state
> ENTRY_DOOMED
|| stored
->state
< ENTRY_NORMAL
)
359 Addr
key_addr(stored
->long_key
);
360 if ((stored
->key_len
<= kMaxInternalKeyLength
&& key_addr
.is_initialized()) ||
361 (stored
->key_len
> kMaxInternalKeyLength
&& !key_addr
.is_initialized()))
364 if (!key_addr
.SanityCheck())
367 if (key_addr
.is_initialized() &&
368 ((stored
->key_len
< kMaxBlockSize
&& key_addr
.is_separate_file()) ||
369 (stored
->key_len
>= kMaxBlockSize
&& key_addr
.is_block_file())))
372 int num_blocks
= NumBlocksForEntry(stored
->key_len
);
373 if (entry_
.address().num_blocks() != num_blocks
)
379 bool EntryImplV3::DataSanityCheck() {
380 EntryStore
* stored
= entry_
.Data();
381 Addr
key_addr(stored
->long_key
);
383 // The key must be NULL terminated.
384 if (!key_addr
.is_initialized() && stored
->key
[stored
->key_len
])
387 if (stored
->hash
!= base::Hash(GetKey()))
390 for (int i
= 0; i
< kNumStreams
; i
++) {
391 Addr
data_addr(stored
->data_addr
[i
]);
392 int data_size
= stored
->data_size
[i
];
395 if (!data_size
&& data_addr
.is_initialized())
397 if (!data_addr
.SanityCheck())
401 if (data_size
<= kMaxBlockSize
&& data_addr
.is_separate_file())
403 if (data_size
> kMaxBlockSize
&& data_addr
.is_block_file())
409 void EntryImplV3::FixForDelete() {
410 EntryStore
* stored
= entry_
.Data();
411 Addr
key_addr(stored
->long_key
);
413 if (!key_addr
.is_initialized())
414 stored
->key
[stored
->key_len
] = '\0';
416 for (int i
= 0; i
< kNumStreams
; i
++) {
417 Addr
data_addr(stored
->data_addr
[i
]);
418 int data_size
= stored
->data_size
[i
];
419 if (data_addr
.is_initialized()) {
420 if ((data_size
<= kMaxBlockSize
&& data_addr
.is_separate_file()) ||
421 (data_size
> kMaxBlockSize
&& data_addr
.is_block_file()) ||
422 !data_addr
.SanityCheck()) {
424 // The address is weird so don't attempt to delete it.
425 stored
->data_addr
[i
] = 0;
426 // In general, trust the stored size as it should be in sync with the
427 // total size tracked by the backend.
431 stored
->data_size
[i
] = 0;
436 void EntryImplV3::SetTimes(base::Time last_used
, base::Time last_modified
) {
437 node_
.Data()->last_used
= last_used
.ToInternalValue();
438 node_
.Data()->last_modified
= last_modified
.ToInternalValue();
439 node_
.set_modified();
442 void EntryImplV3::BeginLogging(net::NetLog
* net_log
, bool created
) {
443 DCHECK(!net_log_
.net_log());
444 net_log_
= net::BoundNetLog::Make(
445 net_log
, net::NetLog::SOURCE_DISK_CACHE_ENTRY
);
447 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL
,
448 CreateNetLogEntryCreationCallback(this, created
));
451 const net::BoundNetLog
& EntryImplV3::net_log() const {
455 // ------------------------------------------------------------------------
457 void EntryImplV3::Doom() {
458 if (background_queue_
)
459 background_queue_
->DoomEntryImpl(this);
462 void EntryImplV3::DoomImpl() {
463 if (doomed_
|| !backend_
)
466 SetPointerForInvalidEntry(backend_
->GetCurrentEntryId());
467 backend_
->InternalDoomEntry(this);
470 void EntryImplV3::Close() {
471 if (background_queue_
)
472 background_queue_
->CloseEntryImpl(this);
475 std::string
EntryImplV3::GetKey() const {
476 CacheEntryBlock
* entry
= const_cast<CacheEntryBlock
*>(&entry_
);
477 int key_len
= entry
->Data()->key_len
;
478 if (key_len
<= kMaxInternalKeyLength
)
479 return std::string(entry
->Data()->key
);
481 // We keep a copy of the key so that we can always return it, even if the
482 // backend is disabled.
486 Addr
address(entry
->Data()->long_key
);
487 DCHECK(address
.is_initialized());
489 if (address
.is_block_file())
490 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
492 COMPILE_ASSERT(kNumStreams
== kKeyFileIndex
, invalid_key_index
);
493 File
* key_file
= const_cast<EntryImpl
*>(this)->GetBackingFile(address
,
496 return std::string();
498 ++key_len
; // We store a trailing \0 on disk that we read back below.
499 if (!offset
&& key_file
->GetLength() != static_cast<size_t>(key_len
))
500 return std::string();
502 if (!key_file
->Read(WriteInto(&key_
, key_len
), key_len
, offset
))
507 Time
EntryImplV3::GetLastUsed() const {
508 CacheRankingsBlock
* node
= const_cast<CacheRankingsBlock
*>(&node_
);
509 return Time::FromInternalValue(node
->Data()->last_used
);
512 Time
EntryImplV3::GetLastModified() const {
513 CacheRankingsBlock
* node
= const_cast<CacheRankingsBlock
*>(&node_
);
514 return Time::FromInternalValue(node
->Data()->last_modified
);
517 int32
EntryImplV3::GetDataSize(int index
) const {
518 if (index
< 0 || index
>= kNumStreams
)
521 CacheEntryBlock
* entry
= const_cast<CacheEntryBlock
*>(&entry_
);
522 return entry
->Data()->data_size
[index
];
525 int EntryImplV3::ReadData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
526 const CompletionCallback
& callback
) {
527 if (callback
.is_null())
528 return ReadDataImpl(index
, offset
, buf
, buf_len
, callback
);
530 DCHECK(node_
.Data()->dirty
|| read_only_
);
531 if (index
< 0 || index
>= kNumStreams
)
532 return net::ERR_INVALID_ARGUMENT
;
534 int entry_size
= entry_
.Data()->data_size
[index
];
535 if (offset
>= entry_size
|| offset
< 0 || !buf_len
)
539 return net::ERR_INVALID_ARGUMENT
;
541 if (!background_queue_
)
542 return net::ERR_UNEXPECTED
;
544 background_queue_
->ReadData(this, index
, offset
, buf
, buf_len
, callback
);
545 return net::ERR_IO_PENDING
;
548 int EntryImpl::ReadDataImpl(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
549 const CompletionCallback
& callback
) {
550 if (net_log_
.IsLogging()) {
552 net::NetLog::TYPE_ENTRY_READ_DATA
,
553 CreateNetLogReadWriteDataCallback(index
, offset
, buf_len
, false));
556 int result
= InternalReadData(index
, offset
, buf
, buf_len
, callback
);
558 if (result
!= net::ERR_IO_PENDING
&& net_log_
.IsLogging()) {
560 net::NetLog::TYPE_ENTRY_READ_DATA
,
561 CreateNetLogReadWriteCompleteCallback(result
));
566 int EntryImplV3::WriteData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
567 const CompletionCallback
& callback
, bool truncate
) {
568 if (callback
.is_null())
569 return WriteDataImpl(index
, offset
, buf
, buf_len
, callback
, truncate
);
571 DCHECK(node_
.Data()->dirty
|| read_only_
);
572 if (index
< 0 || index
>= kNumStreams
)
573 return net::ERR_INVALID_ARGUMENT
;
575 if (offset
< 0 || buf_len
< 0)
576 return net::ERR_INVALID_ARGUMENT
;
578 if (!background_queue_
)
579 return net::ERR_UNEXPECTED
;
581 background_queue_
->WriteData(this, index
, offset
, buf
, buf_len
, truncate
,
583 return net::ERR_IO_PENDING
;
586 int EntryImpl::WriteDataImpl(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
587 const CompletionCallback
& callback
,
589 if (net_log_
.IsLogging()) {
591 net::NetLog::TYPE_ENTRY_WRITE_DATA
,
592 CreateNetLogReadWriteDataCallback(index
, offset
, buf_len
, truncate
));
595 int result
= InternalWriteData(index
, offset
, buf
, buf_len
, callback
,
598 if (result
!= net::ERR_IO_PENDING
&& net_log_
.IsLogging()) {
600 net::NetLog::TYPE_ENTRY_WRITE_DATA
,
601 CreateNetLogReadWriteCompleteCallback(result
));
606 int EntryImplV3::ReadSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
607 const CompletionCallback
& callback
) {
608 if (callback
.is_null())
609 return ReadSparseDataImpl(offset
, buf
, buf_len
, callback
);
611 if (!background_queue_
)
612 return net::ERR_UNEXPECTED
;
614 background_queue_
->ReadSparseData(this, offset
, buf
, buf_len
, callback
);
615 return net::ERR_IO_PENDING
;
618 int EntryImpl::ReadSparseDataImpl(int64 offset
, IOBuffer
* buf
, int buf_len
,
619 const CompletionCallback
& callback
) {
620 DCHECK(node_
.Data()->dirty
|| read_only_
);
621 int result
= InitSparseData();
622 if (net::OK
!= result
)
625 TimeTicks start
= TimeTicks::Now();
626 result
= sparse_
->StartIO(SparseControl::kReadOperation
, offset
, buf
, buf_len
,
628 ReportIOTime(kSparseRead
, start
);
632 int EntryImplV3::WriteSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
633 const CompletionCallback
& callback
) {
634 if (callback
.is_null())
635 return WriteSparseDataImpl(offset
, buf
, buf_len
, callback
);
637 if (!background_queue_
)
638 return net::ERR_UNEXPECTED
;
640 background_queue_
->WriteSparseData(this, offset
, buf
, buf_len
, callback
);
641 return net::ERR_IO_PENDING
;
644 int EntryImpl::WriteSparseDataImpl(int64 offset
, IOBuffer
* buf
, int buf_len
,
645 const CompletionCallback
& callback
) {
646 DCHECK(node_
.Data()->dirty
|| read_only_
);
647 int result
= InitSparseData();
648 if (net::OK
!= result
)
651 TimeTicks start
= TimeTicks::Now();
652 result
= sparse_
->StartIO(SparseControl::kWriteOperation
, offset
, buf
,
654 ReportIOTime(kSparseWrite
, start
);
658 int EntryImplV3::GetAvailableRange(int64 offset
, int len
, int64
* start
,
659 const CompletionCallback
& callback
) {
660 if (!background_queue_
)
661 return net::ERR_UNEXPECTED
;
663 background_queue_
->GetAvailableRange(this, offset
, len
, start
, callback
);
664 return net::ERR_IO_PENDING
;
667 int EntryImpl::GetAvailableRangeImpl(int64 offset
, int len
, int64
* start
) {
668 int result
= InitSparseData();
669 if (net::OK
!= result
)
672 return sparse_
->GetAvailableRange(offset
, len
, start
);
675 bool EntryImplV3::CouldBeSparse() const {
679 scoped_ptr
<SparseControl
> sparse
;
680 sparse
.reset(new SparseControl(const_cast<EntryImpl
*>(this)));
681 return sparse
->CouldBeSparse();
684 void EntryImplV3::CancelSparseIO() {
685 if (background_queue_
)
686 background_queue_
->CancelSparseIO(this);
689 void EntryImplV3::CancelSparseIOImpl() {
696 int EntryImplV3::ReadyForSparseIO(const CompletionCallback
& callback
) {
700 if (!background_queue_
)
701 return net::ERR_UNEXPECTED
;
703 background_queue_
->ReadyForSparseIO(this, callback
);
704 return net::ERR_IO_PENDING
;
707 int EntryImplV3::ReadyForSparseIOImpl(const CompletionCallback
& callback
) {
708 DCHECK(sparse_
.get());
709 return sparse_
->ReadyToUse(callback
);
712 // ------------------------------------------------------------------------
714 // When an entry is deleted from the cache, we clean up all the data associated
715 // with it for two reasons: to simplify the reuse of the block (we know that any
716 // unused block is filled with zeros), and to simplify the handling of write /
717 // read partial information from an entry (don't have to worry about returning
718 // data related to a previous cache entry because the range was not fully
720 EntryImplV3::~EntryImplV3() {
722 entry_
.clear_modified();
723 node_
.clear_modified();
726 Log("~EntryImpl in");
728 // Save the sparse info to disk. This will generate IO for this entry and
729 // maybe for a child entry, so it is important to do it before deleting this
733 // Remove this entry from the list of open entries.
734 backend_
->OnEntryDestroyBegin(entry_
.address());
737 DeleteEntryData(true);
739 #if defined(NET_BUILD_STRESS_CACHE)
742 net_log_
.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE
);
744 for (int index
= 0; index
< kNumStreams
; index
++) {
745 if (user_buffers_
[index
].get()) {
746 if (!(ret
= Flush(index
, 0)))
747 LOG(ERROR
) << "Failed to save user data";
749 if (unreported_size_
[index
]) {
750 backend_
->ModifyStorageSize(
751 entry_
.Data()->data_size
[index
] - unreported_size_
[index
],
752 entry_
.Data()->data_size
[index
]);
757 // There was a failure writing the actual data. Mark the entry as dirty.
758 int current_id
= backend_
->GetCurrentEntryId();
759 node_
.Data()->dirty
= current_id
== 1 ? -1 : current_id
- 1;
761 } else if (node_
.HasData() && !dirty_
&& node_
.Data()->dirty
) {
762 node_
.Data()->dirty
= 0;
767 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
768 net_log_
.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL
);
769 backend_
->OnEntryDestroyEnd();
772 int EntryImpl::InternalReadData(int index
, int offset
,
773 IOBuffer
* buf
, int buf_len
,
774 const CompletionCallback
& callback
) {
775 DCHECK(node_
.Data()->dirty
|| read_only_
);
776 DVLOG(2) << "Read from " << index
<< " at " << offset
<< " : " << buf_len
;
777 if (index
< 0 || index
>= kNumStreams
)
778 return net::ERR_INVALID_ARGUMENT
;
780 int entry_size
= entry_
.Data()->data_size
[index
];
781 if (offset
>= entry_size
|| offset
< 0 || !buf_len
)
785 return net::ERR_INVALID_ARGUMENT
;
788 return net::ERR_UNEXPECTED
;
790 TimeTicks start
= TimeTicks::Now();
792 if (offset
+ buf_len
> entry_size
)
793 buf_len
= entry_size
- offset
;
797 backend_
->OnEvent(Stats::READ_DATA
);
798 backend_
->OnRead(buf_len
);
800 Addr
address(entry_
.Data()->data_addr
[index
]);
801 int eof
= address
.is_initialized() ? entry_size
: 0;
802 if (user_buffers_
[index
].get() &&
803 user_buffers_
[index
]->PreRead(eof
, offset
, &buf_len
)) {
804 // Complete the operation locally.
805 buf_len
= user_buffers_
[index
]->Read(offset
, buf
, buf_len
);
806 ReportIOTime(kRead
, start
);
810 address
.set_value(entry_
.Data()->data_addr
[index
]);
811 DCHECK(address
.is_initialized());
812 if (!address
.is_initialized()) {
814 return net::ERR_FAILED
;
817 File
* file
= GetBackingFile(address
, index
);
820 LOG(ERROR
) << "No file for " << std::hex
<< address
.value();
821 return net::ERR_FILE_NOT_FOUND
;
824 size_t file_offset
= offset
;
825 if (address
.is_block_file()) {
826 DCHECK_LE(offset
+ buf_len
, kMaxBlockSize
);
827 file_offset
+= address
.start_block() * address
.BlockSize() +
831 SyncCallback
* io_callback
= NULL
;
832 if (!callback
.is_null()) {
833 io_callback
= new SyncCallback(this, buf
, callback
,
834 net::NetLog::TYPE_ENTRY_READ_DATA
);
837 TimeTicks start_async
= TimeTicks::Now();
840 if (!file
->Read(buf
->data(), buf_len
, file_offset
, io_callback
, &completed
)) {
842 io_callback
->Discard();
844 return net::ERR_CACHE_READ_FAILURE
;
847 if (io_callback
&& completed
)
848 io_callback
->Discard();
851 ReportIOTime(kReadAsync1
, start_async
);
853 ReportIOTime(kRead
, start
);
854 return (completed
|| callback
.is_null()) ? buf_len
: net::ERR_IO_PENDING
;
857 int EntryImpl::InternalWriteData(int index
, int offset
,
858 IOBuffer
* buf
, int buf_len
,
859 const CompletionCallback
& callback
,
861 DCHECK(node_
.Data()->dirty
|| read_only_
);
862 DVLOG(2) << "Write to " << index
<< " at " << offset
<< " : " << buf_len
;
863 if (index
< 0 || index
>= kNumStreams
)
864 return net::ERR_INVALID_ARGUMENT
;
866 if (offset
< 0 || buf_len
< 0)
867 return net::ERR_INVALID_ARGUMENT
;
870 return net::ERR_UNEXPECTED
;
872 int max_file_size
= backend_
->MaxFileSize();
874 // offset or buf_len could be negative numbers.
875 if (offset
> max_file_size
|| buf_len
> max_file_size
||
876 offset
+ buf_len
> max_file_size
) {
877 int size
= offset
+ buf_len
;
878 if (size
<= max_file_size
)
880 backend_
->TooMuchStorageRequested(size
);
881 return net::ERR_FAILED
;
884 TimeTicks start
= TimeTicks::Now();
886 // Read the size at this point (it may change inside prepare).
887 int entry_size
= entry_
.Data()->data_size
[index
];
888 bool extending
= entry_size
< offset
+ buf_len
;
889 truncate
= truncate
&& entry_size
> offset
+ buf_len
;
890 Trace("To PrepareTarget 0x%x", entry_
.address().value());
891 if (!PrepareTarget(index
, offset
, buf_len
, truncate
))
892 return net::ERR_FAILED
;
894 Trace("From PrepareTarget 0x%x", entry_
.address().value());
895 if (extending
|| truncate
)
896 UpdateSize(index
, entry_size
, offset
+ buf_len
);
900 backend_
->OnEvent(Stats::WRITE_DATA
);
901 backend_
->OnWrite(buf_len
);
903 if (user_buffers_
[index
].get()) {
904 // Complete the operation locally.
905 user_buffers_
[index
]->Write(offset
, buf
, buf_len
);
906 ReportIOTime(kWrite
, start
);
910 Addr
address(entry_
.Data()->data_addr
[index
]);
911 if (offset
+ buf_len
== 0) {
913 DCHECK(!address
.is_initialized());
918 File
* file
= GetBackingFile(address
, index
);
920 return net::ERR_FILE_NOT_FOUND
;
922 size_t file_offset
= offset
;
923 if (address
.is_block_file()) {
924 DCHECK_LE(offset
+ buf_len
, kMaxBlockSize
);
925 file_offset
+= address
.start_block() * address
.BlockSize() +
927 } else if (truncate
|| (extending
&& !buf_len
)) {
928 if (!file
->SetLength(offset
+ buf_len
))
929 return net::ERR_FAILED
;
935 SyncCallback
* io_callback
= NULL
;
936 if (!callback
.is_null()) {
937 io_callback
= new SyncCallback(this, buf
, callback
,
938 net::NetLog::TYPE_ENTRY_WRITE_DATA
);
941 TimeTicks start_async
= TimeTicks::Now();
944 if (!file
->Write(buf
->data(), buf_len
, file_offset
, io_callback
,
947 io_callback
->Discard();
948 return net::ERR_CACHE_WRITE_FAILURE
;
951 if (io_callback
&& completed
)
952 io_callback
->Discard();
955 ReportIOTime(kWriteAsync1
, start_async
);
957 ReportIOTime(kWrite
, start
);
958 return (completed
|| callback
.is_null()) ? buf_len
: net::ERR_IO_PENDING
;
961 // ------------------------------------------------------------------------
963 bool EntryImpl::CreateDataBlock(int index
, int size
) {
964 DCHECK(index
>= 0 && index
< kNumStreams
);
966 Addr
address(entry_
.Data()->data_addr
[index
]);
967 if (!CreateBlock(size
, &address
))
970 entry_
.Data()->data_addr
[index
] = address
.value();
975 bool EntryImpl::CreateBlock(int size
, Addr
* address
) {
976 DCHECK(!address
->is_initialized());
980 FileType file_type
= Addr::RequiredFileType(size
);
981 if (EXTERNAL
== file_type
) {
982 if (size
> backend_
->MaxFileSize())
984 if (!backend_
->CreateExternalFile(address
))
987 int num_blocks
= Addr::RequiredBlocks(size
, file_type
);
989 if (!backend_
->CreateBlock(file_type
, num_blocks
, address
))
995 // Note that this method may end up modifying a block file so upon return the
996 // involved block will be free, and could be reused for something else. If there
997 // is a crash after that point (and maybe before returning to the caller), the
998 // entry will be left dirty... and at some point it will be discarded; it is
999 // important that the entry doesn't keep a reference to this address, or we'll
1000 // end up deleting the contents of |address| once again.
1001 void EntryImpl::DeleteData(Addr address
, int index
) {
1003 if (!address
.is_initialized())
1005 if (address
.is_separate_file()) {
1006 int failure
= !DeleteCacheFile(backend_
->GetFileName(address
));
1007 CACHE_UMA(COUNTS
, "DeleteFailed", 0, failure
);
1009 LOG(ERROR
) << "Failed to delete " <<
1010 backend_
->GetFileName(address
).value() << " from the cache.";
1013 files_
[index
] = NULL
; // Releases the object.
1015 backend_
->DeleteBlock(address
, true);
1019 void EntryImpl::UpdateRank(bool modified
) {
1024 // Everything is handled by the backend.
1025 backend_
->UpdateRank(this, modified
);
1029 Time current
= Time::Now();
1030 node_
.Data()->last_used
= current
.ToInternalValue();
1033 node_
.Data()->last_modified
= current
.ToInternalValue();
1036 void EntryImpl::DeleteEntryData(bool everything
) {
1037 DCHECK(doomed_
|| !everything
);
1039 if (GetEntryFlags() & PARENT_ENTRY
) {
1040 // We have some child entries that must go away.
1041 SparseControl::DeleteChildren(this);
1045 CACHE_UMA(COUNTS
, "DeleteHeader", 0, GetDataSize(0));
1047 CACHE_UMA(COUNTS
, "DeleteData", 0, GetDataSize(1));
1048 for (int index
= 0; index
< kNumStreams
; index
++) {
1049 Addr
address(entry_
.Data()->data_addr
[index
]);
1050 if (address
.is_initialized()) {
1051 backend_
->ModifyStorageSize(entry_
.Data()->data_size
[index
] -
1052 unreported_size_
[index
], 0);
1053 entry_
.Data()->data_addr
[index
] = 0;
1054 entry_
.Data()->data_size
[index
] = 0;
1056 DeleteData(address
, index
);
1063 // Remove all traces of this entry.
1064 backend_
->RemoveEntry(this);
1066 // Note that at this point node_ and entry_ are just two blocks of data, and
1067 // even if they reference each other, nobody should be referencing them.
1069 Addr
address(entry_
.Data()->long_key
);
1070 DeleteData(address
, kKeyFileIndex
);
1071 backend_
->ModifyStorageSize(entry_
.Data()->key_len
, 0);
1073 backend_
->DeleteBlock(entry_
.address(), true);
1076 if (!LeaveRankingsBehind()) {
1077 backend_
->DeleteBlock(node_
.address(), true);
1082 // We keep a memory buffer for everything that ends up stored on a block file
1083 // (because we don't know yet the final data size), and for some of the data
1084 // that end up on external files. This function will initialize that memory
1085 // buffer and / or the files needed to store the data.
1087 // In general, a buffer may overlap data already stored on disk, and in that
1088 // case, the contents of the buffer are the most accurate. It may also extend
1089 // the file, but we don't want to read from disk just to keep the buffer up to
1090 // date. This means that as soon as there is a chance to get confused about what
1091 // is the most recent version of some part of a file, we'll flush the buffer and
1092 // reuse it for the new data. Keep in mind that the normal use pattern is quite
1093 // simple (write sequentially from the beginning), so we optimize for handling
1095 bool EntryImpl::PrepareTarget(int index
, int offset
, int buf_len
,
1098 return HandleTruncation(index
, offset
, buf_len
);
1100 if (!offset
&& !buf_len
)
1103 Addr
address(entry_
.Data()->data_addr
[index
]);
1104 if (address
.is_initialized()) {
1105 if (address
.is_block_file() && !MoveToLocalBuffer(index
))
1108 if (!user_buffers_
[index
].get() && offset
< kMaxBlockSize
) {
1109 // We are about to create a buffer for the first 16KB, make sure that we
1110 // preserve existing data.
1111 if (!CopyToLocalBuffer(index
))
1116 if (!user_buffers_
[index
].get())
1117 user_buffers_
[index
].reset(new UserBuffer(backend_
.get()));
1119 return PrepareBuffer(index
, offset
, buf_len
);
1122 // We get to this function with some data already stored. If there is a
1123 // truncation that results on data stored internally, we'll explicitly
1124 // handle the case here.
1125 bool EntryImpl::HandleTruncation(int index
, int offset
, int buf_len
) {
1126 Addr
address(entry_
.Data()->data_addr
[index
]);
1128 int current_size
= entry_
.Data()->data_size
[index
];
1129 int new_size
= offset
+ buf_len
;
1132 // This is by far the most common scenario.
1133 backend_
->ModifyStorageSize(current_size
- unreported_size_
[index
], 0);
1134 entry_
.Data()->data_addr
[index
] = 0;
1135 entry_
.Data()->data_size
[index
] = 0;
1136 unreported_size_
[index
] = 0;
1138 DeleteData(address
, index
);
1140 user_buffers_
[index
].reset();
1144 // We never postpone truncating a file, if there is one, but we may postpone
1145 // telling the backend about the size reduction.
1146 if (user_buffers_
[index
].get()) {
1147 DCHECK_GE(current_size
, user_buffers_
[index
]->Start());
1148 if (!address
.is_initialized()) {
1149 // There is no overlap between the buffer and disk.
1150 if (new_size
> user_buffers_
[index
]->Start()) {
1151 // Just truncate our buffer.
1152 DCHECK_LT(new_size
, user_buffers_
[index
]->End());
1153 user_buffers_
[index
]->Truncate(new_size
);
1157 // Just discard our buffer.
1158 user_buffers_
[index
]->Reset();
1159 return PrepareBuffer(index
, offset
, buf_len
);
1162 // There is some overlap or we need to extend the file before the
1164 if (offset
> user_buffers_
[index
]->Start())
1165 user_buffers_
[index
]->Truncate(new_size
);
1166 UpdateSize(index
, current_size
, new_size
);
1167 if (!Flush(index
, 0))
1169 user_buffers_
[index
].reset();
1172 // We have data somewhere, and it is not in a buffer.
1173 DCHECK(!user_buffers_
[index
].get());
1174 DCHECK(address
.is_initialized());
1176 if (new_size
> kMaxBlockSize
)
1177 return true; // Let the operation go directly to disk.
1179 return ImportSeparateFile(index
, offset
+ buf_len
);
1182 bool EntryImpl::CopyToLocalBuffer(int index
) {
1183 Addr
address(entry_
.Data()->data_addr
[index
]);
1184 DCHECK(!user_buffers_
[index
].get());
1185 DCHECK(address
.is_initialized());
1187 int len
= std::min(entry_
.Data()->data_size
[index
], kMaxBlockSize
);
1188 user_buffers_
[index
].reset(new UserBuffer(backend_
.get()));
1189 user_buffers_
[index
]->Write(len
, NULL
, 0);
1191 File
* file
= GetBackingFile(address
, index
);
1194 if (address
.is_block_file())
1195 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
1198 !file
->Read(user_buffers_
[index
]->Data(), len
, offset
, NULL
, NULL
)) {
1199 user_buffers_
[index
].reset();
1205 bool EntryImpl::MoveToLocalBuffer(int index
) {
1206 if (!CopyToLocalBuffer(index
))
1209 Addr
address(entry_
.Data()->data_addr
[index
]);
1210 entry_
.Data()->data_addr
[index
] = 0;
1212 DeleteData(address
, index
);
1214 // If we lose this entry we'll see it as zero sized.
1215 int len
= entry_
.Data()->data_size
[index
];
1216 backend_
->ModifyStorageSize(len
- unreported_size_
[index
], 0);
1217 unreported_size_
[index
] = len
;
1221 bool EntryImpl::ImportSeparateFile(int index
, int new_size
) {
1222 if (entry_
.Data()->data_size
[index
] > new_size
)
1223 UpdateSize(index
, entry_
.Data()->data_size
[index
], new_size
);
1225 return MoveToLocalBuffer(index
);
1228 bool EntryImpl::PrepareBuffer(int index
, int offset
, int buf_len
) {
1229 DCHECK(user_buffers_
[index
].get());
1230 if ((user_buffers_
[index
]->End() && offset
> user_buffers_
[index
]->End()) ||
1231 offset
> entry_
.Data()->data_size
[index
]) {
1232 // We are about to extend the buffer or the file (with zeros), so make sure
1233 // that we are not overwriting anything.
1234 Addr
address(entry_
.Data()->data_addr
[index
]);
1235 if (address
.is_initialized() && address
.is_separate_file()) {
1236 if (!Flush(index
, 0))
1238 // There is an actual file already, and we don't want to keep track of
1239 // its length so we let this operation go straight to disk.
1240 // The only case when a buffer is allowed to extend the file (as in fill
1241 // with zeros before the start) is when there is no file yet to extend.
1242 user_buffers_
[index
].reset();
1247 if (!user_buffers_
[index
]->PreWrite(offset
, buf_len
)) {
1248 if (!Flush(index
, offset
+ buf_len
))
1252 if (offset
> user_buffers_
[index
]->End() ||
1253 !user_buffers_
[index
]->PreWrite(offset
, buf_len
)) {
1254 // We cannot complete the operation with a buffer.
1255 DCHECK(!user_buffers_
[index
]->Size());
1256 DCHECK(!user_buffers_
[index
]->Start());
1257 user_buffers_
[index
].reset();
1263 bool EntryImpl::Flush(int index
, int min_len
) {
1264 Addr
address(entry_
.Data()->data_addr
[index
]);
1265 DCHECK(user_buffers_
[index
].get());
1266 DCHECK(!address
.is_initialized() || address
.is_separate_file());
1267 DVLOG(3) << "Flush";
1269 int size
= std::max(entry_
.Data()->data_size
[index
], min_len
);
1270 if (size
&& !address
.is_initialized() && !CreateDataBlock(index
, size
))
1273 if (!entry_
.Data()->data_size
[index
]) {
1274 DCHECK(!user_buffers_
[index
]->Size());
1278 address
.set_value(entry_
.Data()->data_addr
[index
]);
1280 int len
= user_buffers_
[index
]->Size();
1281 int offset
= user_buffers_
[index
]->Start();
1282 if (!len
&& !offset
)
1285 if (address
.is_block_file()) {
1286 DCHECK_EQ(len
, entry_
.Data()->data_size
[index
]);
1288 offset
= address
.start_block() * address
.BlockSize() + kBlockHeaderSize
;
1291 File
* file
= GetBackingFile(address
, index
);
1295 if (!file
->Write(user_buffers_
[index
]->Data(), len
, offset
, NULL
, NULL
))
1297 user_buffers_
[index
]->Reset();
1302 void EntryImpl::UpdateSize(int index
, int old_size
, int new_size
) {
1303 if (entry_
.Data()->data_size
[index
] == new_size
)
1306 unreported_size_
[index
] += new_size
- old_size
;
1307 entry_
.Data()->data_size
[index
] = new_size
;
1308 entry_
.set_modified();
1311 int EntryImpl::InitSparseData() {
1315 // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1316 scoped_ptr
<SparseControl
> sparse(new SparseControl(this));
1317 int result
= sparse
->Init();
1318 if (net::OK
== result
)
1319 sparse_
.swap(sparse
);
1324 void EntryImpl::SetEntryFlags(uint32 flags
) {
1325 entry_
.Data()->flags
|= flags
;
1326 entry_
.set_modified();
1329 uint32
EntryImpl::GetEntryFlags() {
1330 return entry_
.Data()->flags
;
1333 void EntryImpl::GetData(int index
, char** buffer
, Addr
* address
) {
1335 if (user_buffers_
[index
].get() && user_buffers_
[index
]->Size() &&
1336 !user_buffers_
[index
]->Start()) {
1337 // The data is already in memory, just copy it and we're done.
1338 int data_len
= entry_
.Data()->data_size
[index
];
1339 if (data_len
<= user_buffers_
[index
]->Size()) {
1340 DCHECK(!user_buffers_
[index
]->Start());
1341 *buffer
= new char[data_len
];
1342 memcpy(*buffer
, user_buffers_
[index
]->Data(), data_len
);
1347 // Bad news: we'd have to read the info from disk so instead we'll just tell
1348 // the caller where to read from.
1350 address
->set_value(entry_
.Data()->data_addr
[index
]);
1351 if (address
->is_initialized()) {
1352 // Prevent us from deleting the block from the backing store.
1353 backend_
->ModifyStorageSize(entry_
.Data()->data_size
[index
] -
1354 unreported_size_
[index
], 0);
1355 entry_
.Data()->data_addr
[index
] = 0;
1356 entry_
.Data()->data_size
[index
] = 0;
1360 #endif // defined(V3_NOT_JUST_YET_READY).
1362 void EntryImplV3::ReportIOTime(Operation op
, const base::TimeTicks
& start
) {
1368 CACHE_UMA(AGE_MS
, "ReadTime", start
);
1371 CACHE_UMA(AGE_MS
, "WriteTime", start
);
1374 CACHE_UMA(AGE_MS
, "SparseReadTime", start
);
1377 CACHE_UMA(AGE_MS
, "SparseWriteTime", start
);
1380 CACHE_UMA(AGE_MS
, "AsyncIOTime", start
);
1383 CACHE_UMA(AGE_MS
, "AsyncReadDispatchTime", start
);
1386 CACHE_UMA(AGE_MS
, "AsyncWriteDispatchTime", start
);
1393 void EntryImplV3::Log(const char* msg
) {
1394 Trace("%s 0x%p 0x%x", msg
, reinterpret_cast<void*>(this), address_
);
1395 Trace(" data: 0x%x 0x%x", entry_
->data_addr
[0], entry_
->data_addr
[1]);
1396 Trace(" doomed: %d", doomed_
);
1399 void EntryImplV3::Doom() {
1403 void EntryImplV3::Close() {
1407 std::string
EntryImplV3::GetKey() const {
1408 return std::string();
1411 Time
EntryImplV3::GetLastUsed() const {
1415 Time
EntryImplV3::GetLastModified() const {
1419 int32
EntryImplV3::GetDataSize(int index
) const {
1423 int EntryImplV3::ReadData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
1424 const CompletionCallback
& callback
) {
1425 return net::ERR_FAILED
;
1428 int EntryImplV3::WriteData(int index
, int offset
, IOBuffer
* buf
, int buf_len
,
1429 const CompletionCallback
& callback
, bool truncate
) {
1430 return net::ERR_FAILED
;
1433 int EntryImplV3::ReadSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
1434 const CompletionCallback
& callback
) {
1435 return net::ERR_FAILED
;
1438 int EntryImplV3::WriteSparseData(int64 offset
, IOBuffer
* buf
, int buf_len
,
1439 const CompletionCallback
& callback
) {
1440 return net::ERR_FAILED
;
1443 int EntryImplV3::GetAvailableRange(int64 offset
, int len
, int64
* start
,
1444 const CompletionCallback
& callback
) {
1445 return net::ERR_FAILED
;
1448 bool EntryImplV3::CouldBeSparse() const {
1452 void EntryImplV3::CancelSparseIO() {
1456 int EntryImplV3::ReadyForSparseIO(const CompletionCallback
& callback
) {
1457 return net::ERR_FAILED
;
1460 EntryImplV3::~EntryImplV3() {
1464 } // namespace disk_cache