1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
7 #include "base/bind_helpers.h"
8 #include "base/files/file.h"
9 #include "base/files/file_util.h"
10 #include "base/strings/string_number_conversions.h"
11 #include "base/strings/string_util.h"
12 #include "base/threading/platform_thread.h"
13 #include "net/base/completion_callback.h"
14 #include "net/base/io_buffer.h"
15 #include "net/base/net_errors.h"
16 #include "net/base/test_completion_callback.h"
17 #include "net/disk_cache/blockfile/backend_impl.h"
18 #include "net/disk_cache/blockfile/entry_impl.h"
19 #include "net/disk_cache/disk_cache_test_base.h"
20 #include "net/disk_cache/disk_cache_test_util.h"
21 #include "net/disk_cache/memory/mem_entry_impl.h"
22 #include "net/disk_cache/simple/simple_entry_format.h"
23 #include "net/disk_cache/simple/simple_entry_impl.h"
24 #include "net/disk_cache/simple/simple_synchronous_entry.h"
25 #include "net/disk_cache/simple/simple_test_util.h"
26 #include "net/disk_cache/simple/simple_util.h"
27 #include "testing/gtest/include/gtest/gtest.h"
30 using disk_cache::ScopedEntryPtr
;
32 // Tests that can run with different types of caches.
33 class DiskCacheEntryTest
: public DiskCacheTestWithCache
{
35 void InternalSyncIOBackground(disk_cache::Entry
* entry
);
36 void ExternalSyncIOBackground(disk_cache::Entry
* entry
);
39 void InternalSyncIO();
40 void InternalAsyncIO();
41 void ExternalSyncIO();
42 void ExternalAsyncIO();
43 void ReleaseBuffer(int stream_index
);
46 void GetTimes(int stream_index
);
47 void GrowData(int stream_index
);
48 void TruncateData(int stream_index
);
49 void ZeroLengthIO(int stream_index
);
52 void SizeChanges(int stream_index
);
53 void ReuseEntry(int size
, int stream_index
);
54 void InvalidData(int stream_index
);
55 void ReadWriteDestroyBuffer(int stream_index
);
56 void DoomNormalEntry();
57 void DoomEntryNextToOpenEntry();
58 void DoomedEntry(int stream_index
);
61 void GetAvailableRange();
63 void UpdateSparseEntry();
64 void DoomSparseEntry();
65 void PartialSparseEntry();
66 bool SimpleCacheMakeBadChecksumEntry(const std::string
& key
, int* data_size
);
67 bool SimpleCacheThirdStreamFileExists(const char* key
);
68 void SyncDoomEntry(const char* key
);
71 // This part of the test runs on the background thread.
72 void DiskCacheEntryTest::InternalSyncIOBackground(disk_cache::Entry
* entry
) {
73 const int kSize1
= 10;
74 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
75 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
78 entry
->ReadData(0, 0, buffer1
.get(), kSize1
, net::CompletionCallback()));
79 base::strlcpy(buffer1
->data(), "the data", kSize1
);
82 0, 0, buffer1
.get(), kSize1
, net::CompletionCallback(), false));
83 memset(buffer1
->data(), 0, kSize1
);
86 entry
->ReadData(0, 0, buffer1
.get(), kSize1
, net::CompletionCallback()));
87 EXPECT_STREQ("the data", buffer1
->data());
89 const int kSize2
= 5000;
90 const int kSize3
= 10000;
91 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
92 scoped_refptr
<net::IOBuffer
> buffer3(new net::IOBuffer(kSize3
));
93 memset(buffer3
->data(), 0, kSize3
);
94 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
95 base::strlcpy(buffer2
->data(), "The really big data goes here", kSize2
);
99 1, 1500, buffer2
.get(), kSize2
, net::CompletionCallback(), false));
100 memset(buffer2
->data(), 0, kSize2
);
103 1, 1511, buffer2
.get(), kSize2
, net::CompletionCallback()));
104 EXPECT_STREQ("big data goes here", buffer2
->data());
107 entry
->ReadData(1, 0, buffer2
.get(), kSize2
, net::CompletionCallback()));
108 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer3
->data(), 1500));
111 1, 5000, buffer2
.get(), kSize2
, net::CompletionCallback()));
115 1, 6500, buffer2
.get(), kSize2
, net::CompletionCallback()));
118 entry
->ReadData(1, 0, buffer3
.get(), kSize3
, net::CompletionCallback()));
121 1, 0, buffer3
.get(), 8192, net::CompletionCallback(), false));
124 entry
->ReadData(1, 0, buffer3
.get(), kSize3
, net::CompletionCallback()));
125 EXPECT_EQ(8192, entry
->GetDataSize(1));
127 // We need to delete the memory buffer on this thread.
128 EXPECT_EQ(0, entry
->WriteData(
129 0, 0, NULL
, 0, net::CompletionCallback(), true));
130 EXPECT_EQ(0, entry
->WriteData(
131 1, 0, NULL
, 0, net::CompletionCallback(), true));
134 // We need to support synchronous IO even though it is not a supported operation
135 // from the point of view of the disk cache's public interface, because we use
136 // it internally, not just by a few tests, but as part of the implementation
137 // (see sparse_control.cc, for example).
138 void DiskCacheEntryTest::InternalSyncIO() {
139 disk_cache::Entry
* entry
= NULL
;
140 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
141 ASSERT_TRUE(NULL
!= entry
);
143 // The bulk of the test runs from within the callback, on the cache thread.
144 RunTaskForTest(base::Bind(&DiskCacheEntryTest::InternalSyncIOBackground
,
145 base::Unretained(this),
152 EXPECT_EQ(0, cache_
->GetEntryCount());
155 TEST_F(DiskCacheEntryTest
, InternalSyncIO
) {
160 TEST_F(DiskCacheEntryTest
, MemoryOnlyInternalSyncIO
) {
166 void DiskCacheEntryTest::InternalAsyncIO() {
167 disk_cache::Entry
* entry
= NULL
;
168 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
169 ASSERT_TRUE(NULL
!= entry
);
171 // Avoid using internal buffers for the test. We have to write something to
172 // the entry and close it so that we flush the internal buffer to disk. After
173 // that, IO operations will be really hitting the disk. We don't care about
174 // the content, so just extending the entry is enough (all extensions zero-
176 EXPECT_EQ(0, WriteData(entry
, 0, 15 * 1024, NULL
, 0, false));
177 EXPECT_EQ(0, WriteData(entry
, 1, 15 * 1024, NULL
, 0, false));
179 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry
));
181 MessageLoopHelper helper
;
182 // Let's verify that each IO goes to the right callback object.
183 CallbackTest
callback1(&helper
, false);
184 CallbackTest
callback2(&helper
, false);
185 CallbackTest
callback3(&helper
, false);
186 CallbackTest
callback4(&helper
, false);
187 CallbackTest
callback5(&helper
, false);
188 CallbackTest
callback6(&helper
, false);
189 CallbackTest
callback7(&helper
, false);
190 CallbackTest
callback8(&helper
, false);
191 CallbackTest
callback9(&helper
, false);
192 CallbackTest
callback10(&helper
, false);
193 CallbackTest
callback11(&helper
, false);
194 CallbackTest
callback12(&helper
, false);
195 CallbackTest
callback13(&helper
, false);
197 const int kSize1
= 10;
198 const int kSize2
= 5000;
199 const int kSize3
= 10000;
200 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
201 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
202 scoped_refptr
<net::IOBuffer
> buffer3(new net::IOBuffer(kSize3
));
203 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
204 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
205 CacheTestFillBuffer(buffer3
->data(), kSize3
, false);
213 base::Bind(&CallbackTest::Run
, base::Unretained(&callback1
))));
214 base::strlcpy(buffer1
->data(), "the data", kSize1
);
216 int ret
= entry
->WriteData(
221 base::Bind(&CallbackTest::Run
, base::Unretained(&callback2
)),
223 EXPECT_TRUE(10 == ret
|| net::ERR_IO_PENDING
== ret
);
224 if (net::ERR_IO_PENDING
== ret
)
227 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
228 memset(buffer2
->data(), 0, kSize2
);
229 ret
= entry
->ReadData(
234 base::Bind(&CallbackTest::Run
, base::Unretained(&callback3
)));
235 EXPECT_TRUE(10 == ret
|| net::ERR_IO_PENDING
== ret
);
236 if (net::ERR_IO_PENDING
== ret
)
239 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
240 EXPECT_STREQ("the data", buffer2
->data());
242 base::strlcpy(buffer2
->data(), "The really big data goes here", kSize2
);
243 ret
= entry
->WriteData(
248 base::Bind(&CallbackTest::Run
, base::Unretained(&callback4
)),
250 EXPECT_TRUE(5000 == ret
|| net::ERR_IO_PENDING
== ret
);
251 if (net::ERR_IO_PENDING
== ret
)
254 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
255 memset(buffer3
->data(), 0, kSize3
);
256 ret
= entry
->ReadData(
261 base::Bind(&CallbackTest::Run
, base::Unretained(&callback5
)));
262 EXPECT_TRUE(4989 == ret
|| net::ERR_IO_PENDING
== ret
);
263 if (net::ERR_IO_PENDING
== ret
)
266 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
267 EXPECT_STREQ("big data goes here", buffer3
->data());
268 ret
= entry
->ReadData(
273 base::Bind(&CallbackTest::Run
, base::Unretained(&callback6
)));
274 EXPECT_TRUE(5000 == ret
|| net::ERR_IO_PENDING
== ret
);
275 if (net::ERR_IO_PENDING
== ret
)
278 memset(buffer3
->data(), 0, kSize3
);
280 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
281 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer3
->data(), 1500));
282 ret
= entry
->ReadData(
287 base::Bind(&CallbackTest::Run
, base::Unretained(&callback7
)));
288 EXPECT_TRUE(1500 == ret
|| net::ERR_IO_PENDING
== ret
);
289 if (net::ERR_IO_PENDING
== ret
)
292 ret
= entry
->ReadData(
297 base::Bind(&CallbackTest::Run
, base::Unretained(&callback9
)));
298 EXPECT_TRUE(6500 == ret
|| net::ERR_IO_PENDING
== ret
);
299 if (net::ERR_IO_PENDING
== ret
)
302 ret
= entry
->WriteData(
307 base::Bind(&CallbackTest::Run
, base::Unretained(&callback10
)),
309 EXPECT_TRUE(8192 == ret
|| net::ERR_IO_PENDING
== ret
);
310 if (net::ERR_IO_PENDING
== ret
)
313 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
314 ret
= entry
->ReadData(
319 base::Bind(&CallbackTest::Run
, base::Unretained(&callback11
)));
320 EXPECT_TRUE(8192 == ret
|| net::ERR_IO_PENDING
== ret
);
321 if (net::ERR_IO_PENDING
== ret
)
324 EXPECT_EQ(8192, entry
->GetDataSize(1));
326 ret
= entry
->ReadData(
331 base::Bind(&CallbackTest::Run
, base::Unretained(&callback12
)));
332 EXPECT_TRUE(10 == ret
|| net::ERR_IO_PENDING
== ret
);
333 if (net::ERR_IO_PENDING
== ret
)
336 ret
= entry
->ReadData(
341 base::Bind(&CallbackTest::Run
, base::Unretained(&callback13
)));
342 EXPECT_TRUE(5000 == ret
|| net::ERR_IO_PENDING
== ret
);
343 if (net::ERR_IO_PENDING
== ret
)
346 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
348 EXPECT_FALSE(helper
.callback_reused_error());
353 EXPECT_EQ(0, cache_
->GetEntryCount());
356 TEST_F(DiskCacheEntryTest
, InternalAsyncIO
) {
361 TEST_F(DiskCacheEntryTest
, MemoryOnlyInternalAsyncIO
) {
367 // This part of the test runs on the background thread.
368 void DiskCacheEntryTest::ExternalSyncIOBackground(disk_cache::Entry
* entry
) {
369 const int kSize1
= 17000;
370 const int kSize2
= 25000;
371 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
372 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
373 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
374 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
375 base::strlcpy(buffer1
->data(), "the data", kSize1
);
378 0, 0, buffer1
.get(), kSize1
, net::CompletionCallback(), false));
379 memset(buffer1
->data(), 0, kSize1
);
382 entry
->ReadData(0, 0, buffer1
.get(), kSize1
, net::CompletionCallback()));
383 EXPECT_STREQ("the data", buffer1
->data());
385 base::strlcpy(buffer2
->data(), "The really big data goes here", kSize2
);
389 1, 10000, buffer2
.get(), kSize2
, net::CompletionCallback(), false));
390 memset(buffer2
->data(), 0, kSize2
);
393 1, 10011, buffer2
.get(), kSize2
, net::CompletionCallback()));
394 EXPECT_STREQ("big data goes here", buffer2
->data());
397 entry
->ReadData(1, 0, buffer2
.get(), kSize2
, net::CompletionCallback()));
400 1, 30000, buffer2
.get(), kSize2
, net::CompletionCallback()));
404 1, 35000, buffer2
.get(), kSize2
, net::CompletionCallback()));
407 entry
->ReadData(1, 0, buffer1
.get(), kSize1
, net::CompletionCallback()));
411 1, 20000, buffer1
.get(), kSize1
, net::CompletionCallback(), false));
412 EXPECT_EQ(37000, entry
->GetDataSize(1));
414 // We need to delete the memory buffer on this thread.
415 EXPECT_EQ(0, entry
->WriteData(
416 0, 0, NULL
, 0, net::CompletionCallback(), true));
417 EXPECT_EQ(0, entry
->WriteData(
418 1, 0, NULL
, 0, net::CompletionCallback(), true));
421 void DiskCacheEntryTest::ExternalSyncIO() {
422 disk_cache::Entry
* entry
;
423 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
425 // The bulk of the test runs from within the callback, on the cache thread.
426 RunTaskForTest(base::Bind(&DiskCacheEntryTest::ExternalSyncIOBackground
,
427 base::Unretained(this),
433 EXPECT_EQ(0, cache_
->GetEntryCount());
436 TEST_F(DiskCacheEntryTest
, ExternalSyncIO
) {
441 TEST_F(DiskCacheEntryTest
, ExternalSyncIONoBuffer
) {
443 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
447 TEST_F(DiskCacheEntryTest
, MemoryOnlyExternalSyncIO
) {
453 void DiskCacheEntryTest::ExternalAsyncIO() {
454 disk_cache::Entry
* entry
;
455 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
459 MessageLoopHelper helper
;
460 // Let's verify that each IO goes to the right callback object.
461 CallbackTest
callback1(&helper
, false);
462 CallbackTest
callback2(&helper
, false);
463 CallbackTest
callback3(&helper
, false);
464 CallbackTest
callback4(&helper
, false);
465 CallbackTest
callback5(&helper
, false);
466 CallbackTest
callback6(&helper
, false);
467 CallbackTest
callback7(&helper
, false);
468 CallbackTest
callback8(&helper
, false);
469 CallbackTest
callback9(&helper
, false);
471 const int kSize1
= 17000;
472 const int kSize2
= 25000;
473 const int kSize3
= 25000;
474 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
475 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
476 scoped_refptr
<net::IOBuffer
> buffer3(new net::IOBuffer(kSize3
));
477 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
478 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
479 CacheTestFillBuffer(buffer3
->data(), kSize3
, false);
480 base::strlcpy(buffer1
->data(), "the data", kSize1
);
481 int ret
= entry
->WriteData(
486 base::Bind(&CallbackTest::Run
, base::Unretained(&callback1
)),
488 EXPECT_TRUE(17000 == ret
|| net::ERR_IO_PENDING
== ret
);
489 if (net::ERR_IO_PENDING
== ret
)
492 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
494 memset(buffer2
->data(), 0, kSize1
);
495 ret
= entry
->ReadData(
500 base::Bind(&CallbackTest::Run
, base::Unretained(&callback2
)));
501 EXPECT_TRUE(17000 == ret
|| net::ERR_IO_PENDING
== ret
);
502 if (net::ERR_IO_PENDING
== ret
)
505 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
506 EXPECT_STREQ("the data", buffer2
->data());
508 base::strlcpy(buffer2
->data(), "The really big data goes here", kSize2
);
509 ret
= entry
->WriteData(
514 base::Bind(&CallbackTest::Run
, base::Unretained(&callback3
)),
516 EXPECT_TRUE(25000 == ret
|| net::ERR_IO_PENDING
== ret
);
517 if (net::ERR_IO_PENDING
== ret
)
520 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
522 memset(buffer3
->data(), 0, kSize3
);
523 ret
= entry
->ReadData(
528 base::Bind(&CallbackTest::Run
, base::Unretained(&callback4
)));
529 EXPECT_TRUE(24989 == ret
|| net::ERR_IO_PENDING
== ret
);
530 if (net::ERR_IO_PENDING
== ret
)
533 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
534 EXPECT_STREQ("big data goes here", buffer3
->data());
535 ret
= entry
->ReadData(
540 base::Bind(&CallbackTest::Run
, base::Unretained(&callback5
)));
541 EXPECT_TRUE(25000 == ret
|| net::ERR_IO_PENDING
== ret
);
542 if (net::ERR_IO_PENDING
== ret
)
545 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
546 memset(buffer3
->data(), 0, kSize3
);
547 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer3
->data(), 10000));
548 ret
= entry
->ReadData(
553 base::Bind(&CallbackTest::Run
, base::Unretained(&callback6
)));
554 EXPECT_TRUE(5000 == ret
|| net::ERR_IO_PENDING
== ret
);
555 if (net::ERR_IO_PENDING
== ret
)
564 base::Bind(&CallbackTest::Run
, base::Unretained(&callback7
))));
565 ret
= entry
->ReadData(
570 base::Bind(&CallbackTest::Run
, base::Unretained(&callback8
)));
571 EXPECT_TRUE(17000 == ret
|| net::ERR_IO_PENDING
== ret
);
572 if (net::ERR_IO_PENDING
== ret
)
574 ret
= entry
->WriteData(
579 base::Bind(&CallbackTest::Run
, base::Unretained(&callback9
)),
581 EXPECT_TRUE(17000 == ret
|| net::ERR_IO_PENDING
== ret
);
582 if (net::ERR_IO_PENDING
== ret
)
585 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
586 EXPECT_EQ(37000, entry
->GetDataSize(1));
588 EXPECT_FALSE(helper
.callback_reused_error());
593 EXPECT_EQ(0, cache_
->GetEntryCount());
596 TEST_F(DiskCacheEntryTest
, ExternalAsyncIO
) {
601 // TODO(ios): This test is flaky. http://crbug.com/497101
603 #define MAYBE_ExternalAsyncIONoBuffer DISABLED_ExternalAsyncIONoBuffer
605 #define MAYBE_ExternalAsyncIONoBuffer ExternalAsyncIONoBuffer
607 TEST_F(DiskCacheEntryTest
, MAYBE_ExternalAsyncIONoBuffer
) {
609 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
613 TEST_F(DiskCacheEntryTest
, MemoryOnlyExternalAsyncIO
) {
619 // Tests that IOBuffers are not referenced after IO completes.
620 void DiskCacheEntryTest::ReleaseBuffer(int stream_index
) {
621 disk_cache::Entry
* entry
= NULL
;
622 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
623 ASSERT_TRUE(NULL
!= entry
);
625 const int kBufferSize
= 1024;
626 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kBufferSize
));
627 CacheTestFillBuffer(buffer
->data(), kBufferSize
, false);
629 net::ReleaseBufferCompletionCallback
cb(buffer
.get());
630 int rv
= entry
->WriteData(
631 stream_index
, 0, buffer
.get(), kBufferSize
, cb
.callback(), false);
632 EXPECT_EQ(kBufferSize
, cb
.GetResult(rv
));
636 TEST_F(DiskCacheEntryTest
, ReleaseBuffer
) {
638 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
642 TEST_F(DiskCacheEntryTest
, MemoryOnlyReleaseBuffer
) {
648 void DiskCacheEntryTest::StreamAccess() {
649 disk_cache::Entry
* entry
= NULL
;
650 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
651 ASSERT_TRUE(NULL
!= entry
);
653 const int kBufferSize
= 1024;
654 const int kNumStreams
= 3;
655 scoped_refptr
<net::IOBuffer
> reference_buffers
[kNumStreams
];
656 for (int i
= 0; i
< kNumStreams
; i
++) {
657 reference_buffers
[i
] = new net::IOBuffer(kBufferSize
);
658 CacheTestFillBuffer(reference_buffers
[i
]->data(), kBufferSize
, false);
660 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kBufferSize
));
661 for (int i
= 0; i
< kNumStreams
; i
++) {
664 WriteData(entry
, i
, 0, reference_buffers
[i
].get(), kBufferSize
, false));
665 memset(buffer1
->data(), 0, kBufferSize
);
666 EXPECT_EQ(kBufferSize
, ReadData(entry
, i
, 0, buffer1
.get(), kBufferSize
));
668 0, memcmp(reference_buffers
[i
]->data(), buffer1
->data(), kBufferSize
));
670 EXPECT_EQ(net::ERR_INVALID_ARGUMENT
,
671 ReadData(entry
, kNumStreams
, 0, buffer1
.get(), kBufferSize
));
674 // Open the entry and read it in chunks, including a read past the end.
675 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry
));
676 ASSERT_TRUE(NULL
!= entry
);
677 const int kReadBufferSize
= 600;
678 const int kFinalReadSize
= kBufferSize
- kReadBufferSize
;
679 static_assert(kFinalReadSize
< kReadBufferSize
,
680 "should be exactly two reads");
681 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kReadBufferSize
));
682 for (int i
= 0; i
< kNumStreams
; i
++) {
683 memset(buffer2
->data(), 0, kReadBufferSize
);
684 EXPECT_EQ(kReadBufferSize
,
685 ReadData(entry
, i
, 0, buffer2
.get(), kReadBufferSize
));
688 memcmp(reference_buffers
[i
]->data(), buffer2
->data(), kReadBufferSize
));
690 memset(buffer2
->data(), 0, kReadBufferSize
);
693 ReadData(entry
, i
, kReadBufferSize
, buffer2
.get(), kReadBufferSize
));
695 memcmp(reference_buffers
[i
]->data() + kReadBufferSize
,
703 TEST_F(DiskCacheEntryTest
, StreamAccess
) {
708 TEST_F(DiskCacheEntryTest
, MemoryOnlyStreamAccess
) {
714 void DiskCacheEntryTest::GetKey() {
715 std::string
key("the first key");
716 disk_cache::Entry
* entry
;
717 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
718 EXPECT_EQ(key
, entry
->GetKey()) << "short key";
721 int seed
= static_cast<int>(Time::Now().ToInternalValue());
723 char key_buffer
[20000];
725 CacheTestFillBuffer(key_buffer
, 3000, true);
726 key_buffer
[1000] = '\0';
729 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
730 EXPECT_TRUE(key
== entry
->GetKey()) << "1000 bytes key";
733 key_buffer
[1000] = 'p';
734 key_buffer
[3000] = '\0';
736 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
737 EXPECT_TRUE(key
== entry
->GetKey()) << "medium size key";
740 CacheTestFillBuffer(key_buffer
, sizeof(key_buffer
), true);
741 key_buffer
[19999] = '\0';
744 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
745 EXPECT_TRUE(key
== entry
->GetKey()) << "long key";
748 CacheTestFillBuffer(key_buffer
, 0x4000, true);
749 key_buffer
[0x4000] = '\0';
752 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
753 EXPECT_TRUE(key
== entry
->GetKey()) << "16KB key";
757 TEST_F(DiskCacheEntryTest
, GetKey
) {
762 TEST_F(DiskCacheEntryTest
, MemoryOnlyGetKey
) {
768 void DiskCacheEntryTest::GetTimes(int stream_index
) {
769 std::string
key("the first key");
770 disk_cache::Entry
* entry
;
772 Time t1
= Time::Now();
773 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
774 EXPECT_TRUE(entry
->GetLastModified() >= t1
);
775 EXPECT_TRUE(entry
->GetLastModified() == entry
->GetLastUsed());
778 Time t2
= Time::Now();
779 EXPECT_TRUE(t2
> t1
);
780 EXPECT_EQ(0, WriteData(entry
, stream_index
, 200, NULL
, 0, false));
781 if (type_
== net::APP_CACHE
) {
782 EXPECT_TRUE(entry
->GetLastModified() < t2
);
784 EXPECT_TRUE(entry
->GetLastModified() >= t2
);
786 EXPECT_TRUE(entry
->GetLastModified() == entry
->GetLastUsed());
789 Time t3
= Time::Now();
790 EXPECT_TRUE(t3
> t2
);
791 const int kSize
= 200;
792 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
793 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 0, buffer
.get(), kSize
));
794 if (type_
== net::APP_CACHE
) {
795 EXPECT_TRUE(entry
->GetLastUsed() < t2
);
796 EXPECT_TRUE(entry
->GetLastModified() < t2
);
797 } else if (type_
== net::SHADER_CACHE
) {
798 EXPECT_TRUE(entry
->GetLastUsed() < t3
);
799 EXPECT_TRUE(entry
->GetLastModified() < t3
);
801 EXPECT_TRUE(entry
->GetLastUsed() >= t3
);
802 EXPECT_TRUE(entry
->GetLastModified() < t3
);
807 TEST_F(DiskCacheEntryTest
, GetTimes
) {
812 TEST_F(DiskCacheEntryTest
, MemoryOnlyGetTimes
) {
818 TEST_F(DiskCacheEntryTest
, AppCacheGetTimes
) {
819 SetCacheType(net::APP_CACHE
);
824 TEST_F(DiskCacheEntryTest
, ShaderCacheGetTimes
) {
825 SetCacheType(net::SHADER_CACHE
);
830 void DiskCacheEntryTest::GrowData(int stream_index
) {
831 std::string
key1("the first key");
832 disk_cache::Entry
* entry
;
833 ASSERT_EQ(net::OK
, CreateEntry(key1
, &entry
));
835 const int kSize
= 20000;
836 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
837 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
838 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
839 memset(buffer2
->data(), 0, kSize
);
841 base::strlcpy(buffer1
->data(), "the data", kSize
);
842 EXPECT_EQ(10, WriteData(entry
, stream_index
, 0, buffer1
.get(), 10, false));
843 EXPECT_EQ(10, ReadData(entry
, stream_index
, 0, buffer2
.get(), 10));
844 EXPECT_STREQ("the data", buffer2
->data());
845 EXPECT_EQ(10, entry
->GetDataSize(stream_index
));
848 WriteData(entry
, stream_index
, 0, buffer1
.get(), 2000, false));
849 EXPECT_EQ(2000, entry
->GetDataSize(stream_index
));
850 EXPECT_EQ(2000, ReadData(entry
, stream_index
, 0, buffer2
.get(), 2000));
851 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 2000));
854 WriteData(entry
, stream_index
, 0, buffer1
.get(), kSize
, false));
855 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
856 EXPECT_EQ(20000, ReadData(entry
, stream_index
, 0, buffer2
.get(), kSize
));
857 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), kSize
));
860 memset(buffer2
->data(), 0, kSize
);
861 std::string
key2("Second key");
862 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry
));
863 EXPECT_EQ(10, WriteData(entry
, stream_index
, 0, buffer1
.get(), 10, false));
864 EXPECT_EQ(10, entry
->GetDataSize(stream_index
));
867 // Go from an internal address to a bigger block size.
868 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
870 WriteData(entry
, stream_index
, 0, buffer1
.get(), 2000, false));
871 EXPECT_EQ(2000, entry
->GetDataSize(stream_index
));
872 EXPECT_EQ(2000, ReadData(entry
, stream_index
, 0, buffer2
.get(), 2000));
873 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 2000));
875 memset(buffer2
->data(), 0, kSize
);
877 // Go from an internal address to an external one.
878 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
880 WriteData(entry
, stream_index
, 0, buffer1
.get(), kSize
, false));
881 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
882 EXPECT_EQ(20000, ReadData(entry
, stream_index
, 0, buffer2
.get(), kSize
));
883 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), kSize
));
886 // Double check the size from disk.
887 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
888 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
890 // Now extend the entry without actual data.
891 EXPECT_EQ(0, WriteData(entry
, stream_index
, 45500, buffer1
.get(), 0, false));
894 // And check again from disk.
895 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
896 EXPECT_EQ(45500, entry
->GetDataSize(stream_index
));
900 TEST_F(DiskCacheEntryTest
, GrowData
) {
905 TEST_F(DiskCacheEntryTest
, GrowDataNoBuffer
) {
907 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
911 TEST_F(DiskCacheEntryTest
, MemoryOnlyGrowData
) {
917 void DiskCacheEntryTest::TruncateData(int stream_index
) {
918 std::string
key("the first key");
919 disk_cache::Entry
* entry
;
920 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
922 const int kSize1
= 20000;
923 const int kSize2
= 20000;
924 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
925 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
927 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
928 memset(buffer2
->data(), 0, kSize2
);
930 // Simple truncation:
931 EXPECT_EQ(200, WriteData(entry
, stream_index
, 0, buffer1
.get(), 200, false));
932 EXPECT_EQ(200, entry
->GetDataSize(stream_index
));
933 EXPECT_EQ(100, WriteData(entry
, stream_index
, 0, buffer1
.get(), 100, false));
934 EXPECT_EQ(200, entry
->GetDataSize(stream_index
));
935 EXPECT_EQ(100, WriteData(entry
, stream_index
, 0, buffer1
.get(), 100, true));
936 EXPECT_EQ(100, entry
->GetDataSize(stream_index
));
937 EXPECT_EQ(0, WriteData(entry
, stream_index
, 50, buffer1
.get(), 0, true));
938 EXPECT_EQ(50, entry
->GetDataSize(stream_index
));
939 EXPECT_EQ(0, WriteData(entry
, stream_index
, 0, buffer1
.get(), 0, true));
940 EXPECT_EQ(0, entry
->GetDataSize(stream_index
));
942 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
944 // Go to an external file.
946 WriteData(entry
, stream_index
, 0, buffer1
.get(), 20000, true));
947 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
948 EXPECT_EQ(20000, ReadData(entry
, stream_index
, 0, buffer2
.get(), 20000));
949 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 20000));
950 memset(buffer2
->data(), 0, kSize2
);
952 // External file truncation
954 WriteData(entry
, stream_index
, 0, buffer1
.get(), 18000, false));
955 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
957 WriteData(entry
, stream_index
, 0, buffer1
.get(), 18000, true));
958 EXPECT_EQ(18000, entry
->GetDataSize(stream_index
));
959 EXPECT_EQ(0, WriteData(entry
, stream_index
, 17500, buffer1
.get(), 0, true));
960 EXPECT_EQ(17500, entry
->GetDataSize(stream_index
));
962 // And back to an internal block.
964 WriteData(entry
, stream_index
, 1000, buffer1
.get(), 600, true));
965 EXPECT_EQ(1600, entry
->GetDataSize(stream_index
));
966 EXPECT_EQ(600, ReadData(entry
, stream_index
, 1000, buffer2
.get(), 600));
967 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 600));
968 EXPECT_EQ(1000, ReadData(entry
, stream_index
, 0, buffer2
.get(), 1000));
969 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 1000))
970 << "Preserves previous data";
972 // Go from external file to zero length.
974 WriteData(entry
, stream_index
, 0, buffer1
.get(), 20000, true));
975 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
976 EXPECT_EQ(0, WriteData(entry
, stream_index
, 0, buffer1
.get(), 0, true));
977 EXPECT_EQ(0, entry
->GetDataSize(stream_index
));
982 TEST_F(DiskCacheEntryTest
, TruncateData
) {
987 TEST_F(DiskCacheEntryTest
, TruncateDataNoBuffer
) {
989 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
993 TEST_F(DiskCacheEntryTest
, MemoryOnlyTruncateData
) {
999 void DiskCacheEntryTest::ZeroLengthIO(int stream_index
) {
1000 std::string
key("the first key");
1001 disk_cache::Entry
* entry
;
1002 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1004 EXPECT_EQ(0, ReadData(entry
, stream_index
, 0, NULL
, 0));
1005 EXPECT_EQ(0, WriteData(entry
, stream_index
, 0, NULL
, 0, false));
1007 // This write should extend the entry.
1008 EXPECT_EQ(0, WriteData(entry
, stream_index
, 1000, NULL
, 0, false));
1009 EXPECT_EQ(0, ReadData(entry
, stream_index
, 500, NULL
, 0));
1010 EXPECT_EQ(0, ReadData(entry
, stream_index
, 2000, NULL
, 0));
1011 EXPECT_EQ(1000, entry
->GetDataSize(stream_index
));
1013 EXPECT_EQ(0, WriteData(entry
, stream_index
, 100000, NULL
, 0, true));
1014 EXPECT_EQ(0, ReadData(entry
, stream_index
, 50000, NULL
, 0));
1015 EXPECT_EQ(100000, entry
->GetDataSize(stream_index
));
1017 // Let's verify the actual content.
1018 const int kSize
= 20;
1019 const char zeros
[kSize
] = {};
1020 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1022 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1023 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 500, buffer
.get(), kSize
));
1024 EXPECT_TRUE(!memcmp(buffer
->data(), zeros
, kSize
));
1026 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1027 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 5000, buffer
.get(), kSize
));
1028 EXPECT_TRUE(!memcmp(buffer
->data(), zeros
, kSize
));
1030 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1031 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 50000, buffer
.get(), kSize
));
1032 EXPECT_TRUE(!memcmp(buffer
->data(), zeros
, kSize
));
1037 TEST_F(DiskCacheEntryTest
, ZeroLengthIO
) {
1042 TEST_F(DiskCacheEntryTest
, ZeroLengthIONoBuffer
) {
1044 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
1048 TEST_F(DiskCacheEntryTest
, MemoryOnlyZeroLengthIO
) {
1049 SetMemoryOnlyMode();
1054 // Tests that we handle the content correctly when buffering, a feature of the
1055 // standard cache that permits fast responses to certain reads.
1056 void DiskCacheEntryTest::Buffering() {
1057 std::string
key("the first key");
1058 disk_cache::Entry
* entry
;
1059 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1061 const int kSize
= 200;
1062 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
1063 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
1064 CacheTestFillBuffer(buffer1
->data(), kSize
, true);
1065 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1067 EXPECT_EQ(kSize
, WriteData(entry
, 1, 0, buffer1
.get(), kSize
, false));
1070 // Write a little more and read what we wrote before.
1071 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1072 EXPECT_EQ(kSize
, WriteData(entry
, 1, 5000, buffer1
.get(), kSize
, false));
1073 EXPECT_EQ(kSize
, ReadData(entry
, 1, 0, buffer2
.get(), kSize
));
1074 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1076 // Now go to an external file.
1077 EXPECT_EQ(kSize
, WriteData(entry
, 1, 18000, buffer1
.get(), kSize
, false));
1080 // Write something else and verify old data.
1081 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1082 EXPECT_EQ(kSize
, WriteData(entry
, 1, 10000, buffer1
.get(), kSize
, false));
1083 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1084 EXPECT_EQ(kSize
, ReadData(entry
, 1, 5000, buffer2
.get(), kSize
));
1085 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1086 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1087 EXPECT_EQ(kSize
, ReadData(entry
, 1, 0, buffer2
.get(), kSize
));
1088 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1089 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1090 EXPECT_EQ(kSize
, ReadData(entry
, 1, 18000, buffer2
.get(), kSize
));
1091 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1093 // Extend the file some more.
1094 EXPECT_EQ(kSize
, WriteData(entry
, 1, 23000, buffer1
.get(), kSize
, false));
1097 // And now make sure that we can deal with data in both places (ram/disk).
1098 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1099 EXPECT_EQ(kSize
, WriteData(entry
, 1, 17000, buffer1
.get(), kSize
, false));
1101 // We should not overwrite the data at 18000 with this.
1102 EXPECT_EQ(kSize
, WriteData(entry
, 1, 19000, buffer1
.get(), kSize
, false));
1103 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1104 EXPECT_EQ(kSize
, ReadData(entry
, 1, 18000, buffer2
.get(), kSize
));
1105 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1106 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1107 EXPECT_EQ(kSize
, ReadData(entry
, 1, 17000, buffer2
.get(), kSize
));
1108 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1110 EXPECT_EQ(kSize
, WriteData(entry
, 1, 22900, buffer1
.get(), kSize
, false));
1111 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1112 EXPECT_EQ(100, ReadData(entry
, 1, 23000, buffer2
.get(), kSize
));
1113 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data() + 100, 100));
1115 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1116 EXPECT_EQ(100, ReadData(entry
, 1, 23100, buffer2
.get(), kSize
));
1117 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data() + 100, 100));
1119 // Extend the file again and read before without closing the entry.
1120 EXPECT_EQ(kSize
, WriteData(entry
, 1, 25000, buffer1
.get(), kSize
, false));
1121 EXPECT_EQ(kSize
, WriteData(entry
, 1, 45000, buffer1
.get(), kSize
, false));
1122 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1123 EXPECT_EQ(kSize
, ReadData(entry
, 1, 25000, buffer2
.get(), kSize
));
1124 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1125 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1126 EXPECT_EQ(kSize
, ReadData(entry
, 1, 45000, buffer2
.get(), kSize
));
1127 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1132 TEST_F(DiskCacheEntryTest
, Buffering
) {
1137 TEST_F(DiskCacheEntryTest
, BufferingNoBuffer
) {
1139 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
1143 // Checks that entries are zero length when created.
1144 void DiskCacheEntryTest::SizeAtCreate() {
1145 const char key
[] = "the first key";
1146 disk_cache::Entry
* entry
;
1147 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1149 const int kNumStreams
= 3;
1150 for (int i
= 0; i
< kNumStreams
; ++i
)
1151 EXPECT_EQ(0, entry
->GetDataSize(i
));
1155 TEST_F(DiskCacheEntryTest
, SizeAtCreate
) {
1160 TEST_F(DiskCacheEntryTest
, MemoryOnlySizeAtCreate
) {
1161 SetMemoryOnlyMode();
1166 // Some extra tests to make sure that buffering works properly when changing
1168 void DiskCacheEntryTest::SizeChanges(int stream_index
) {
1169 std::string
key("the first key");
1170 disk_cache::Entry
* entry
;
1171 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1173 const int kSize
= 200;
1174 const char zeros
[kSize
] = {};
1175 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
1176 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
1177 CacheTestFillBuffer(buffer1
->data(), kSize
, true);
1178 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1181 WriteData(entry
, stream_index
, 0, buffer1
.get(), kSize
, true));
1183 WriteData(entry
, stream_index
, 17000, buffer1
.get(), kSize
, true));
1185 WriteData(entry
, stream_index
, 23000, buffer1
.get(), kSize
, true));
1188 // Extend the file and read between the old size and the new write.
1189 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1190 EXPECT_EQ(23000 + kSize
, entry
->GetDataSize(stream_index
));
1192 WriteData(entry
, stream_index
, 25000, buffer1
.get(), kSize
, true));
1193 EXPECT_EQ(25000 + kSize
, entry
->GetDataSize(stream_index
));
1194 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 24000, buffer2
.get(), kSize
));
1195 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, kSize
));
1197 // Read at the end of the old file size.
1200 ReadData(entry
, stream_index
, 23000 + kSize
- 35, buffer2
.get(), kSize
));
1201 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data() + kSize
- 35, 35));
1203 // Read slightly before the last write.
1204 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1205 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 24900, buffer2
.get(), kSize
));
1206 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, 100));
1207 EXPECT_TRUE(!memcmp(buffer2
->data() + 100, buffer1
->data(), kSize
- 100));
1209 // Extend the entry a little more.
1211 WriteData(entry
, stream_index
, 26000, buffer1
.get(), kSize
, true));
1212 EXPECT_EQ(26000 + kSize
, entry
->GetDataSize(stream_index
));
1213 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1214 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 25900, buffer2
.get(), kSize
));
1215 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, 100));
1216 EXPECT_TRUE(!memcmp(buffer2
->data() + 100, buffer1
->data(), kSize
- 100));
1218 // And now reduce the size.
1220 WriteData(entry
, stream_index
, 25000, buffer1
.get(), kSize
, true));
1221 EXPECT_EQ(25000 + kSize
, entry
->GetDataSize(stream_index
));
1224 ReadData(entry
, stream_index
, 25000 + kSize
- 28, buffer2
.get(), kSize
));
1225 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data() + kSize
- 28, 28));
1227 // Reduce the size with a buffer that is not extending the size.
1229 WriteData(entry
, stream_index
, 24000, buffer1
.get(), kSize
, false));
1230 EXPECT_EQ(25000 + kSize
, entry
->GetDataSize(stream_index
));
1232 WriteData(entry
, stream_index
, 24500, buffer1
.get(), kSize
, true));
1233 EXPECT_EQ(24500 + kSize
, entry
->GetDataSize(stream_index
));
1234 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 23900, buffer2
.get(), kSize
));
1235 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, 100));
1236 EXPECT_TRUE(!memcmp(buffer2
->data() + 100, buffer1
->data(), kSize
- 100));
1238 // And now reduce the size below the old size.
1240 WriteData(entry
, stream_index
, 19000, buffer1
.get(), kSize
, true));
1241 EXPECT_EQ(19000 + kSize
, entry
->GetDataSize(stream_index
));
1242 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 18900, buffer2
.get(), kSize
));
1243 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, 100));
1244 EXPECT_TRUE(!memcmp(buffer2
->data() + 100, buffer1
->data(), kSize
- 100));
1246 // Verify that the actual file is truncated.
1248 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1249 EXPECT_EQ(19000 + kSize
, entry
->GetDataSize(stream_index
));
1251 // Extend the newly opened file with a zero length write, expect zero fill.
1254 WriteData(entry
, stream_index
, 20000 + kSize
, buffer1
.get(), 0, false));
1256 ReadData(entry
, stream_index
, 19000 + kSize
, buffer1
.get(), kSize
));
1257 EXPECT_EQ(0, memcmp(buffer1
->data(), zeros
, kSize
));
1262 TEST_F(DiskCacheEntryTest
, SizeChanges
) {
1267 TEST_F(DiskCacheEntryTest
, SizeChangesNoBuffer
) {
1269 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
1273 // Write more than the total cache capacity but to a single entry. |size| is the
1274 // amount of bytes to write each time.
1275 void DiskCacheEntryTest::ReuseEntry(int size
, int stream_index
) {
1276 std::string
key1("the first key");
1277 disk_cache::Entry
* entry
;
1278 ASSERT_EQ(net::OK
, CreateEntry(key1
, &entry
));
1281 std::string
key2("the second key");
1282 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry
));
1284 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(size
));
1285 CacheTestFillBuffer(buffer
->data(), size
, false);
1287 for (int i
= 0; i
< 15; i
++) {
1288 EXPECT_EQ(0, WriteData(entry
, stream_index
, 0, buffer
.get(), 0, true));
1290 WriteData(entry
, stream_index
, 0, buffer
.get(), size
, false));
1292 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
1296 ASSERT_EQ(net::OK
, OpenEntry(key1
, &entry
)) << "have not evicted this entry";
1300 TEST_F(DiskCacheEntryTest
, ReuseExternalEntry
) {
1301 SetMaxSize(200 * 1024);
1303 ReuseEntry(20 * 1024, 0);
1306 TEST_F(DiskCacheEntryTest
, MemoryOnlyReuseExternalEntry
) {
1307 SetMemoryOnlyMode();
1308 SetMaxSize(200 * 1024);
1310 ReuseEntry(20 * 1024, 0);
1313 TEST_F(DiskCacheEntryTest
, ReuseInternalEntry
) {
1314 SetMaxSize(100 * 1024);
1316 ReuseEntry(10 * 1024, 0);
1319 TEST_F(DiskCacheEntryTest
, MemoryOnlyReuseInternalEntry
) {
1320 SetMemoryOnlyMode();
1321 SetMaxSize(100 * 1024);
1323 ReuseEntry(10 * 1024, 0);
1326 // Reading somewhere that was not written should return zeros.
1327 void DiskCacheEntryTest::InvalidData(int stream_index
) {
1328 std::string
key("the first key");
1329 disk_cache::Entry
* entry
;
1330 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1332 const int kSize1
= 20000;
1333 const int kSize2
= 20000;
1334 const int kSize3
= 20000;
1335 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
1336 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
1337 scoped_refptr
<net::IOBuffer
> buffer3(new net::IOBuffer(kSize3
));
1339 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
1340 memset(buffer2
->data(), 0, kSize2
);
1342 // Simple data grow:
1344 WriteData(entry
, stream_index
, 400, buffer1
.get(), 200, false));
1345 EXPECT_EQ(600, entry
->GetDataSize(stream_index
));
1346 EXPECT_EQ(100, ReadData(entry
, stream_index
, 300, buffer3
.get(), 100));
1347 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 100));
1349 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1351 // The entry is now on disk. Load it and extend it.
1353 WriteData(entry
, stream_index
, 800, buffer1
.get(), 200, false));
1354 EXPECT_EQ(1000, entry
->GetDataSize(stream_index
));
1355 EXPECT_EQ(100, ReadData(entry
, stream_index
, 700, buffer3
.get(), 100));
1356 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 100));
1358 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1360 // This time using truncate.
1362 WriteData(entry
, stream_index
, 1800, buffer1
.get(), 200, true));
1363 EXPECT_EQ(2000, entry
->GetDataSize(stream_index
));
1364 EXPECT_EQ(100, ReadData(entry
, stream_index
, 1500, buffer3
.get(), 100));
1365 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 100));
1367 // Go to an external file.
1369 WriteData(entry
, stream_index
, 19800, buffer1
.get(), 200, false));
1370 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
1371 EXPECT_EQ(4000, ReadData(entry
, stream_index
, 14000, buffer3
.get(), 4000));
1372 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 4000));
1374 // And back to an internal block.
1376 WriteData(entry
, stream_index
, 1000, buffer1
.get(), 600, true));
1377 EXPECT_EQ(1600, entry
->GetDataSize(stream_index
));
1378 EXPECT_EQ(600, ReadData(entry
, stream_index
, 1000, buffer3
.get(), 600));
1379 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer1
->data(), 600));
1383 WriteData(entry
, stream_index
, 2000, buffer1
.get(), 600, false));
1384 EXPECT_EQ(2600, entry
->GetDataSize(stream_index
));
1385 EXPECT_EQ(200, ReadData(entry
, stream_index
, 1800, buffer3
.get(), 200));
1386 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 200));
1388 // And again (with truncation flag).
1390 WriteData(entry
, stream_index
, 3000, buffer1
.get(), 600, true));
1391 EXPECT_EQ(3600, entry
->GetDataSize(stream_index
));
1392 EXPECT_EQ(200, ReadData(entry
, stream_index
, 2800, buffer3
.get(), 200));
1393 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 200));
1398 TEST_F(DiskCacheEntryTest
, InvalidData
) {
1403 TEST_F(DiskCacheEntryTest
, InvalidDataNoBuffer
) {
1405 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
1409 TEST_F(DiskCacheEntryTest
, MemoryOnlyInvalidData
) {
1410 SetMemoryOnlyMode();
1415 // Tests that the cache preserves the buffer of an IO operation.
1416 void DiskCacheEntryTest::ReadWriteDestroyBuffer(int stream_index
) {
1417 std::string
key("the first key");
1418 disk_cache::Entry
* entry
;
1419 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1421 const int kSize
= 200;
1422 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1423 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1425 net::TestCompletionCallback cb
;
1426 EXPECT_EQ(net::ERR_IO_PENDING
,
1428 stream_index
, 0, buffer
.get(), kSize
, cb
.callback(), false));
1430 // Release our reference to the buffer.
1432 EXPECT_EQ(kSize
, cb
.WaitForResult());
1434 // And now test with a Read().
1435 buffer
= new net::IOBuffer(kSize
);
1436 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1439 net::ERR_IO_PENDING
,
1440 entry
->ReadData(stream_index
, 0, buffer
.get(), kSize
, cb
.callback()));
1442 EXPECT_EQ(kSize
, cb
.WaitForResult());
1447 TEST_F(DiskCacheEntryTest
, ReadWriteDestroyBuffer
) {
1449 ReadWriteDestroyBuffer(0);
1452 void DiskCacheEntryTest::DoomNormalEntry() {
1453 std::string
key("the first key");
1454 disk_cache::Entry
* entry
;
1455 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1459 const int kSize
= 20000;
1460 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1461 CacheTestFillBuffer(buffer
->data(), kSize
, true);
1462 buffer
->data()[19999] = '\0';
1464 key
= buffer
->data();
1465 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1466 EXPECT_EQ(20000, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1467 EXPECT_EQ(20000, WriteData(entry
, 1, 0, buffer
.get(), kSize
, false));
1471 FlushQueueForTest();
1472 EXPECT_EQ(0, cache_
->GetEntryCount());
1475 TEST_F(DiskCacheEntryTest
, DoomEntry
) {
1480 TEST_F(DiskCacheEntryTest
, MemoryOnlyDoomEntry
) {
1481 SetMemoryOnlyMode();
1486 // Tests dooming an entry that's linked to an open entry.
1487 void DiskCacheEntryTest::DoomEntryNextToOpenEntry() {
1488 disk_cache::Entry
* entry1
;
1489 disk_cache::Entry
* entry2
;
1490 ASSERT_EQ(net::OK
, CreateEntry("fixed", &entry1
));
1492 ASSERT_EQ(net::OK
, CreateEntry("foo", &entry1
));
1494 ASSERT_EQ(net::OK
, CreateEntry("bar", &entry1
));
1497 ASSERT_EQ(net::OK
, OpenEntry("foo", &entry1
));
1498 ASSERT_EQ(net::OK
, OpenEntry("bar", &entry2
));
1502 ASSERT_EQ(net::OK
, OpenEntry("foo", &entry2
));
1507 ASSERT_EQ(net::OK
, OpenEntry("fixed", &entry1
));
1511 TEST_F(DiskCacheEntryTest
, DoomEntryNextToOpenEntry
) {
1513 DoomEntryNextToOpenEntry();
1516 TEST_F(DiskCacheEntryTest
, NewEvictionDoomEntryNextToOpenEntry
) {
1519 DoomEntryNextToOpenEntry();
1522 TEST_F(DiskCacheEntryTest
, AppCacheDoomEntryNextToOpenEntry
) {
1523 SetCacheType(net::APP_CACHE
);
1525 DoomEntryNextToOpenEntry();
1528 // Verify that basic operations work as expected with doomed entries.
1529 void DiskCacheEntryTest::DoomedEntry(int stream_index
) {
1530 std::string
key("the first key");
1531 disk_cache::Entry
* entry
;
1532 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1535 FlushQueueForTest();
1536 EXPECT_EQ(0, cache_
->GetEntryCount());
1537 Time initial
= Time::Now();
1540 const int kSize1
= 2000;
1541 const int kSize2
= 2000;
1542 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
1543 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
1544 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
1545 memset(buffer2
->data(), 0, kSize2
);
1548 WriteData(entry
, stream_index
, 0, buffer1
.get(), 2000, false));
1549 EXPECT_EQ(2000, ReadData(entry
, stream_index
, 0, buffer2
.get(), 2000));
1550 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer2
->data(), kSize1
));
1551 EXPECT_EQ(key
, entry
->GetKey());
1552 EXPECT_TRUE(initial
< entry
->GetLastModified());
1553 EXPECT_TRUE(initial
< entry
->GetLastUsed());
1558 TEST_F(DiskCacheEntryTest
, DoomedEntry
) {
1563 TEST_F(DiskCacheEntryTest
, MemoryOnlyDoomedEntry
) {
1564 SetMemoryOnlyMode();
1569 // Tests that we discard entries if the data is missing.
1570 TEST_F(DiskCacheEntryTest
, MissingData
) {
1573 std::string
key("the first key");
1574 disk_cache::Entry
* entry
;
1575 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1577 // Write to an external file.
1578 const int kSize
= 20000;
1579 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1580 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1581 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1583 FlushQueueForTest();
1585 disk_cache::Addr
address(0x80000001);
1586 base::FilePath name
= cache_impl_
->GetFileName(address
);
1587 EXPECT_TRUE(base::DeleteFile(name
, false));
1589 // Attempt to read the data.
1590 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1591 EXPECT_EQ(net::ERR_FILE_NOT_FOUND
,
1592 ReadData(entry
, 0, 0, buffer
.get(), kSize
));
1595 // The entry should be gone.
1596 ASSERT_NE(net::OK
, OpenEntry(key
, &entry
));
1599 // Test that child entries in a memory cache backend are not visible from
1601 TEST_F(DiskCacheEntryTest
, MemoryOnlyEnumerationWithSparseEntries
) {
1602 SetMemoryOnlyMode();
1605 const int kSize
= 4096;
1606 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1607 CacheTestFillBuffer(buf
->data(), kSize
, false);
1609 std::string
key("the first key");
1610 disk_cache::Entry
* parent_entry
;
1611 ASSERT_EQ(net::OK
, CreateEntry(key
, &parent_entry
));
1613 // Writes to the parent entry.
1615 parent_entry
->WriteSparseData(
1616 0, buf
.get(), kSize
, net::CompletionCallback()));
1618 // This write creates a child entry and writes to it.
1620 parent_entry
->WriteSparseData(
1621 8192, buf
.get(), kSize
, net::CompletionCallback()));
1623 parent_entry
->Close();
1625 // Perform the enumerations.
1626 scoped_ptr
<TestIterator
> iter
= CreateIterator();
1627 disk_cache::Entry
* entry
= NULL
;
1629 while (iter
->OpenNextEntry(&entry
) == net::OK
) {
1630 ASSERT_TRUE(entry
!= NULL
);
1632 disk_cache::MemEntryImpl
* mem_entry
=
1633 reinterpret_cast<disk_cache::MemEntryImpl
*>(entry
);
1634 EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry
, mem_entry
->type());
1637 EXPECT_EQ(1, count
);
1640 // Writes |buf_1| to offset and reads it back as |buf_2|.
1641 void VerifySparseIO(disk_cache::Entry
* entry
, int64 offset
,
1642 net::IOBuffer
* buf_1
, int size
, net::IOBuffer
* buf_2
) {
1643 net::TestCompletionCallback cb
;
1645 memset(buf_2
->data(), 0, size
);
1646 int ret
= entry
->ReadSparseData(offset
, buf_2
, size
, cb
.callback());
1647 EXPECT_EQ(0, cb
.GetResult(ret
));
1649 ret
= entry
->WriteSparseData(offset
, buf_1
, size
, cb
.callback());
1650 EXPECT_EQ(size
, cb
.GetResult(ret
));
1652 ret
= entry
->ReadSparseData(offset
, buf_2
, size
, cb
.callback());
1653 EXPECT_EQ(size
, cb
.GetResult(ret
));
1655 EXPECT_EQ(0, memcmp(buf_1
->data(), buf_2
->data(), size
));
1658 // Reads |size| bytes from |entry| at |offset| and verifies that they are the
1659 // same as the content of the provided |buffer|.
1660 void VerifyContentSparseIO(disk_cache::Entry
* entry
, int64 offset
, char* buffer
,
1662 net::TestCompletionCallback cb
;
1664 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(size
));
1665 memset(buf_1
->data(), 0, size
);
1666 int ret
= entry
->ReadSparseData(offset
, buf_1
.get(), size
, cb
.callback());
1667 EXPECT_EQ(size
, cb
.GetResult(ret
));
1668 EXPECT_EQ(0, memcmp(buf_1
->data(), buffer
, size
));
1671 void DiskCacheEntryTest::BasicSparseIO() {
1672 std::string
key("the first key");
1673 disk_cache::Entry
* entry
;
1674 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1676 const int kSize
= 2048;
1677 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1678 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1679 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1681 // Write at offset 0.
1682 VerifySparseIO(entry
, 0, buf_1
.get(), kSize
, buf_2
.get());
1684 // Write at offset 0x400000 (4 MB).
1685 VerifySparseIO(entry
, 0x400000, buf_1
.get(), kSize
, buf_2
.get());
1687 // Write at offset 0x800000000 (32 GB).
1688 VerifySparseIO(entry
, 0x800000000LL
, buf_1
.get(), kSize
, buf_2
.get());
1692 // Check everything again.
1693 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1694 VerifyContentSparseIO(entry
, 0, buf_1
->data(), kSize
);
1695 VerifyContentSparseIO(entry
, 0x400000, buf_1
->data(), kSize
);
1696 VerifyContentSparseIO(entry
, 0x800000000LL
, buf_1
->data(), kSize
);
1700 TEST_F(DiskCacheEntryTest
, BasicSparseIO
) {
1705 TEST_F(DiskCacheEntryTest
, MemoryOnlyBasicSparseIO
) {
1706 SetMemoryOnlyMode();
1711 void DiskCacheEntryTest::HugeSparseIO() {
1712 std::string
key("the first key");
1713 disk_cache::Entry
* entry
;
1714 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1716 // Write 1.2 MB so that we cover multiple entries.
1717 const int kSize
= 1200 * 1024;
1718 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1719 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1720 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1722 // Write at offset 0x20F0000 (33 MB - 64 KB).
1723 VerifySparseIO(entry
, 0x20F0000, buf_1
.get(), kSize
, buf_2
.get());
1727 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1728 VerifyContentSparseIO(entry
, 0x20F0000, buf_1
->data(), kSize
);
1732 TEST_F(DiskCacheEntryTest
, HugeSparseIO
) {
1737 TEST_F(DiskCacheEntryTest
, MemoryOnlyHugeSparseIO
) {
1738 SetMemoryOnlyMode();
1743 void DiskCacheEntryTest::GetAvailableRange() {
1744 std::string
key("the first key");
1745 disk_cache::Entry
* entry
;
1746 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1748 const int kSize
= 16 * 1024;
1749 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1750 CacheTestFillBuffer(buf
->data(), kSize
, false);
1752 // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
1753 EXPECT_EQ(kSize
, WriteSparseData(entry
, 0x20F0000, buf
.get(), kSize
));
1754 EXPECT_EQ(kSize
, WriteSparseData(entry
, 0x20F4400, buf
.get(), kSize
));
1756 // We stop at the first empty block.
1758 net::TestCompletionCallback cb
;
1759 int rv
= entry
->GetAvailableRange(
1760 0x20F0000, kSize
* 2, &start
, cb
.callback());
1761 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1762 EXPECT_EQ(0x20F0000, start
);
1765 rv
= entry
->GetAvailableRange(0, kSize
, &start
, cb
.callback());
1766 EXPECT_EQ(0, cb
.GetResult(rv
));
1767 rv
= entry
->GetAvailableRange(
1768 0x20F0000 - kSize
, kSize
, &start
, cb
.callback());
1769 EXPECT_EQ(0, cb
.GetResult(rv
));
1770 rv
= entry
->GetAvailableRange(0, 0x2100000, &start
, cb
.callback());
1771 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1772 EXPECT_EQ(0x20F0000, start
);
1774 // We should be able to Read based on the results of GetAvailableRange.
1776 rv
= entry
->GetAvailableRange(0x2100000, kSize
, &start
, cb
.callback());
1777 EXPECT_EQ(0, cb
.GetResult(rv
));
1778 rv
= entry
->ReadSparseData(start
, buf
.get(), kSize
, cb
.callback());
1779 EXPECT_EQ(0, cb
.GetResult(rv
));
1782 rv
= entry
->GetAvailableRange(0x20F2000, kSize
, &start
, cb
.callback());
1783 EXPECT_EQ(0x2000, cb
.GetResult(rv
));
1784 EXPECT_EQ(0x20F2000, start
);
1785 EXPECT_EQ(0x2000, ReadSparseData(entry
, start
, buf
.get(), kSize
));
1787 // Make sure that we respect the |len| argument.
1789 rv
= entry
->GetAvailableRange(
1790 0x20F0001 - kSize
, kSize
, &start
, cb
.callback());
1791 EXPECT_EQ(1, cb
.GetResult(rv
));
1792 EXPECT_EQ(0x20F0000, start
);
1797 TEST_F(DiskCacheEntryTest
, GetAvailableRange
) {
1799 GetAvailableRange();
1802 TEST_F(DiskCacheEntryTest
, MemoryOnlyGetAvailableRange
) {
1803 SetMemoryOnlyMode();
1805 GetAvailableRange();
1808 // Tests that non-sequential writes that are not aligned with the minimum sparse
1809 // data granularity (1024 bytes) do in fact result in dropped data.
1810 TEST_F(DiskCacheEntryTest
, SparseWriteDropped
) {
1812 std::string
key("the first key");
1813 disk_cache::Entry
* entry
;
1814 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1816 const int kSize
= 180;
1817 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1818 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1819 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1821 // Do small writes (180 bytes) that get increasingly close to a 1024-byte
1822 // boundary. All data should be dropped until a boundary is crossed, at which
1823 // point the data after the boundary is saved (at least for a while).
1824 int offset
= 1024 - 500;
1826 net::TestCompletionCallback cb
;
1828 for (int i
= 0; i
< 5; i
++) {
1829 // Check result of last GetAvailableRange.
1832 rv
= entry
->WriteSparseData(offset
, buf_1
.get(), kSize
, cb
.callback());
1833 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1835 rv
= entry
->GetAvailableRange(offset
- 100, kSize
, &start
, cb
.callback());
1836 EXPECT_EQ(0, cb
.GetResult(rv
));
1838 rv
= entry
->GetAvailableRange(offset
, kSize
, &start
, cb
.callback());
1839 rv
= cb
.GetResult(rv
);
1841 rv
= entry
->ReadSparseData(offset
, buf_2
.get(), kSize
, cb
.callback());
1842 EXPECT_EQ(0, cb
.GetResult(rv
));
1845 offset
+= 1024 * i
+ 100;
1848 // The last write started 100 bytes below a bundary, so there should be 80
1849 // bytes after the boundary.
1851 EXPECT_EQ(1024 * 7, start
);
1852 rv
= entry
->ReadSparseData(start
, buf_2
.get(), kSize
, cb
.callback());
1853 EXPECT_EQ(80, cb
.GetResult(rv
));
1854 EXPECT_EQ(0, memcmp(buf_1
.get()->data() + 100, buf_2
.get()->data(), 80));
1856 // And even that part is dropped when another write changes the offset.
1858 rv
= entry
->WriteSparseData(0, buf_1
.get(), kSize
, cb
.callback());
1859 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1861 rv
= entry
->GetAvailableRange(offset
, kSize
, &start
, cb
.callback());
1862 EXPECT_EQ(0, cb
.GetResult(rv
));
1866 // Tests that small sequential writes are not dropped.
1867 TEST_F(DiskCacheEntryTest
, SparseSquentialWriteNotDropped
) {
1869 std::string
key("the first key");
1870 disk_cache::Entry
* entry
;
1871 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1873 const int kSize
= 180;
1874 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1875 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1876 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1878 // Any starting offset is fine as long as it is 1024-bytes aligned.
1880 net::TestCompletionCallback cb
;
1882 int64 offset
= 1024 * 11;
1883 for (; offset
< 20000; offset
+= kSize
) {
1884 rv
= entry
->WriteSparseData(offset
, buf_1
.get(), kSize
, cb
.callback());
1885 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1887 rv
= entry
->GetAvailableRange(offset
, kSize
, &start
, cb
.callback());
1888 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1889 EXPECT_EQ(offset
, start
);
1891 rv
= entry
->ReadSparseData(offset
, buf_2
.get(), kSize
, cb
.callback());
1892 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1893 EXPECT_EQ(0, memcmp(buf_1
.get()->data(), buf_2
.get()->data(), kSize
));
1897 FlushQueueForTest();
1899 // Verify again the last write made.
1900 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1902 rv
= entry
->GetAvailableRange(offset
, kSize
, &start
, cb
.callback());
1903 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1904 EXPECT_EQ(offset
, start
);
1906 rv
= entry
->ReadSparseData(offset
, buf_2
.get(), kSize
, cb
.callback());
1907 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1908 EXPECT_EQ(0, memcmp(buf_1
.get()->data(), buf_2
.get()->data(), kSize
));
1913 void DiskCacheEntryTest::CouldBeSparse() {
1914 std::string
key("the first key");
1915 disk_cache::Entry
* entry
;
1916 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1918 const int kSize
= 16 * 1024;
1919 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1920 CacheTestFillBuffer(buf
->data(), kSize
, false);
1922 // Write at offset 0x20F0000 (33 MB - 64 KB).
1923 EXPECT_EQ(kSize
, WriteSparseData(entry
, 0x20F0000, buf
.get(), kSize
));
1925 EXPECT_TRUE(entry
->CouldBeSparse());
1928 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1929 EXPECT_TRUE(entry
->CouldBeSparse());
1932 // Now verify a regular entry.
1933 key
.assign("another key");
1934 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1935 EXPECT_FALSE(entry
->CouldBeSparse());
1937 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buf
.get(), kSize
, false));
1938 EXPECT_EQ(kSize
, WriteData(entry
, 1, 0, buf
.get(), kSize
, false));
1939 EXPECT_EQ(kSize
, WriteData(entry
, 2, 0, buf
.get(), kSize
, false));
1941 EXPECT_FALSE(entry
->CouldBeSparse());
1944 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1945 EXPECT_FALSE(entry
->CouldBeSparse());
1949 TEST_F(DiskCacheEntryTest
, CouldBeSparse
) {
1954 TEST_F(DiskCacheEntryTest
, MemoryCouldBeSparse
) {
1955 SetMemoryOnlyMode();
1960 TEST_F(DiskCacheEntryTest
, MemoryOnlyMisalignedSparseIO
) {
1961 SetMemoryOnlyMode();
1964 const int kSize
= 8192;
1965 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1966 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1967 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1969 std::string
key("the first key");
1970 disk_cache::Entry
* entry
;
1971 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1973 // This loop writes back to back starting from offset 0 and 9000.
1974 for (int i
= 0; i
< kSize
; i
+= 1024) {
1975 scoped_refptr
<net::WrappedIOBuffer
> buf_3(
1976 new net::WrappedIOBuffer(buf_1
->data() + i
));
1977 VerifySparseIO(entry
, i
, buf_3
.get(), 1024, buf_2
.get());
1978 VerifySparseIO(entry
, 9000 + i
, buf_3
.get(), 1024, buf_2
.get());
1981 // Make sure we have data written.
1982 VerifyContentSparseIO(entry
, 0, buf_1
->data(), kSize
);
1983 VerifyContentSparseIO(entry
, 9000, buf_1
->data(), kSize
);
1985 // This tests a large write that spans 3 entries from a misaligned offset.
1986 VerifySparseIO(entry
, 20481, buf_1
.get(), 8192, buf_2
.get());
1991 TEST_F(DiskCacheEntryTest
, MemoryOnlyMisalignedGetAvailableRange
) {
1992 SetMemoryOnlyMode();
1995 const int kSize
= 8192;
1996 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1997 CacheTestFillBuffer(buf
->data(), kSize
, false);
1999 disk_cache::Entry
* entry
;
2000 std::string
key("the first key");
2001 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2003 // Writes in the middle of an entry.
2006 entry
->WriteSparseData(0, buf
.get(), 1024, net::CompletionCallback()));
2009 entry
->WriteSparseData(5120, buf
.get(), 1024, net::CompletionCallback()));
2011 entry
->WriteSparseData(
2012 10000, buf
.get(), 1024, net::CompletionCallback()));
2014 // Writes in the middle of an entry and spans 2 child entries.
2016 entry
->WriteSparseData(
2017 50000, buf
.get(), 8192, net::CompletionCallback()));
2020 net::TestCompletionCallback cb
;
2021 // Test that we stop at a discontinuous child at the second block.
2022 int rv
= entry
->GetAvailableRange(0, 10000, &start
, cb
.callback());
2023 EXPECT_EQ(1024, cb
.GetResult(rv
));
2024 EXPECT_EQ(0, start
);
2026 // Test that number of bytes is reported correctly when we start from the
2027 // middle of a filled region.
2028 rv
= entry
->GetAvailableRange(512, 10000, &start
, cb
.callback());
2029 EXPECT_EQ(512, cb
.GetResult(rv
));
2030 EXPECT_EQ(512, start
);
2032 // Test that we found bytes in the child of next block.
2033 rv
= entry
->GetAvailableRange(1024, 10000, &start
, cb
.callback());
2034 EXPECT_EQ(1024, cb
.GetResult(rv
));
2035 EXPECT_EQ(5120, start
);
2037 // Test that the desired length is respected. It starts within a filled
2039 rv
= entry
->GetAvailableRange(5500, 512, &start
, cb
.callback());
2040 EXPECT_EQ(512, cb
.GetResult(rv
));
2041 EXPECT_EQ(5500, start
);
2043 // Test that the desired length is respected. It starts before a filled
2045 rv
= entry
->GetAvailableRange(5000, 620, &start
, cb
.callback());
2046 EXPECT_EQ(500, cb
.GetResult(rv
));
2047 EXPECT_EQ(5120, start
);
2049 // Test that multiple blocks are scanned.
2050 rv
= entry
->GetAvailableRange(40000, 20000, &start
, cb
.callback());
2051 EXPECT_EQ(8192, cb
.GetResult(rv
));
2052 EXPECT_EQ(50000, start
);
2057 void DiskCacheEntryTest::UpdateSparseEntry() {
2058 std::string
key("the first key");
2059 disk_cache::Entry
* entry1
;
2060 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry1
));
2062 const int kSize
= 2048;
2063 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
2064 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
2065 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
2067 // Write at offset 0.
2068 VerifySparseIO(entry1
, 0, buf_1
.get(), kSize
, buf_2
.get());
2071 // Write at offset 2048.
2072 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry1
));
2073 VerifySparseIO(entry1
, 2048, buf_1
.get(), kSize
, buf_2
.get());
2075 disk_cache::Entry
* entry2
;
2076 ASSERT_EQ(net::OK
, CreateEntry("the second key", &entry2
));
2080 FlushQueueForTest();
2081 if (memory_only_
|| simple_cache_mode_
)
2082 EXPECT_EQ(2, cache_
->GetEntryCount());
2084 EXPECT_EQ(3, cache_
->GetEntryCount());
2087 TEST_F(DiskCacheEntryTest
, UpdateSparseEntry
) {
2088 SetCacheType(net::MEDIA_CACHE
);
2090 UpdateSparseEntry();
2093 TEST_F(DiskCacheEntryTest
, MemoryOnlyUpdateSparseEntry
) {
2094 SetMemoryOnlyMode();
2095 SetCacheType(net::MEDIA_CACHE
);
2097 UpdateSparseEntry();
2100 void DiskCacheEntryTest::DoomSparseEntry() {
2101 std::string
key1("the first key");
2102 std::string
key2("the second key");
2103 disk_cache::Entry
*entry1
, *entry2
;
2104 ASSERT_EQ(net::OK
, CreateEntry(key1
, &entry1
));
2105 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry2
));
2107 const int kSize
= 4 * 1024;
2108 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
2109 CacheTestFillBuffer(buf
->data(), kSize
, false);
2111 int64 offset
= 1024;
2112 // Write to a bunch of ranges.
2113 for (int i
= 0; i
< 12; i
++) {
2114 EXPECT_EQ(kSize
, WriteSparseData(entry1
, offset
, buf
.get(), kSize
));
2115 // Keep the second map under the default size.
2117 EXPECT_EQ(kSize
, WriteSparseData(entry2
, offset
, buf
.get(), kSize
));
2122 if (memory_only_
|| simple_cache_mode_
)
2123 EXPECT_EQ(2, cache_
->GetEntryCount());
2125 EXPECT_EQ(15, cache_
->GetEntryCount());
2127 // Doom the first entry while it's still open.
2132 // Doom the second entry after it's fully saved.
2133 EXPECT_EQ(net::OK
, DoomEntry(key2
));
2135 // Make sure we do all needed work. This may fail for entry2 if between Close
2136 // and DoomEntry the system decides to remove all traces of the file from the
2137 // system cache so we don't see that there is pending IO.
2138 base::MessageLoop::current()->RunUntilIdle();
2141 EXPECT_EQ(0, cache_
->GetEntryCount());
2143 if (5 == cache_
->GetEntryCount()) {
2144 // Most likely we are waiting for the result of reading the sparse info
2145 // (it's always async on Posix so it is easy to miss). Unfortunately we
2146 // don't have any signal to watch for so we can only wait.
2147 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500));
2148 base::MessageLoop::current()->RunUntilIdle();
2150 EXPECT_EQ(0, cache_
->GetEntryCount());
2154 TEST_F(DiskCacheEntryTest
, DoomSparseEntry
) {
2160 TEST_F(DiskCacheEntryTest
, MemoryOnlyDoomSparseEntry
) {
2161 SetMemoryOnlyMode();
2166 // A CompletionCallback wrapper that deletes the cache from within the callback.
2167 // The way a CompletionCallback works means that all tasks (even new ones)
2168 // are executed by the message loop before returning to the caller so the only
2169 // way to simulate a race is to execute what we want on the callback.
2170 class SparseTestCompletionCallback
: public net::TestCompletionCallback
{
2172 explicit SparseTestCompletionCallback(scoped_ptr
<disk_cache::Backend
> cache
)
2173 : cache_(cache
.Pass()) {
2177 void SetResult(int result
) override
{
2179 TestCompletionCallback::SetResult(result
);
2182 scoped_ptr
<disk_cache::Backend
> cache_
;
2183 DISALLOW_COPY_AND_ASSIGN(SparseTestCompletionCallback
);
2186 // Tests that we don't crash when the backend is deleted while we are working
2187 // deleting the sub-entries of a sparse entry.
2188 TEST_F(DiskCacheEntryTest
, DoomSparseEntry2
) {
2191 std::string
key("the key");
2192 disk_cache::Entry
* entry
;
2193 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2195 const int kSize
= 4 * 1024;
2196 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
2197 CacheTestFillBuffer(buf
->data(), kSize
, false);
2199 int64 offset
= 1024;
2200 // Write to a bunch of ranges.
2201 for (int i
= 0; i
< 12; i
++) {
2203 entry
->WriteSparseData(
2204 offset
, buf
.get(), kSize
, net::CompletionCallback()));
2207 EXPECT_EQ(9, cache_
->GetEntryCount());
2210 disk_cache::Backend
* cache
= cache_
.get();
2211 SparseTestCompletionCallback
cb(cache_
.Pass());
2212 int rv
= cache
->DoomEntry(key
, cb
.callback());
2213 EXPECT_EQ(net::ERR_IO_PENDING
, rv
);
2214 EXPECT_EQ(net::OK
, cb
.WaitForResult());
2217 void DiskCacheEntryTest::PartialSparseEntry() {
2218 std::string
key("the first key");
2219 disk_cache::Entry
* entry
;
2220 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2222 // We should be able to deal with IO that is not aligned to the block size
2223 // of a sparse entry, at least to write a big range without leaving holes.
2224 const int kSize
= 4 * 1024;
2225 const int kSmallSize
= 128;
2226 scoped_refptr
<net::IOBuffer
> buf1(new net::IOBuffer(kSize
));
2227 CacheTestFillBuffer(buf1
->data(), kSize
, false);
2229 // The first write is just to extend the entry. The third write occupies
2230 // a 1KB block partially, it may not be written internally depending on the
2232 EXPECT_EQ(kSize
, WriteSparseData(entry
, 20000, buf1
.get(), kSize
));
2233 EXPECT_EQ(kSize
, WriteSparseData(entry
, 500, buf1
.get(), kSize
));
2234 EXPECT_EQ(kSmallSize
,
2235 WriteSparseData(entry
, 1080321, buf1
.get(), kSmallSize
));
2237 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2239 scoped_refptr
<net::IOBuffer
> buf2(new net::IOBuffer(kSize
));
2240 memset(buf2
->data(), 0, kSize
);
2241 EXPECT_EQ(0, ReadSparseData(entry
, 8000, buf2
.get(), kSize
));
2243 EXPECT_EQ(500, ReadSparseData(entry
, kSize
, buf2
.get(), kSize
));
2244 EXPECT_EQ(0, memcmp(buf2
->data(), buf1
->data() + kSize
- 500, 500));
2245 EXPECT_EQ(0, ReadSparseData(entry
, 0, buf2
.get(), kSize
));
2247 // This read should not change anything.
2248 if (memory_only_
|| simple_cache_mode_
)
2249 EXPECT_EQ(96, ReadSparseData(entry
, 24000, buf2
.get(), kSize
));
2251 EXPECT_EQ(0, ReadSparseData(entry
, 24000, buf2
.get(), kSize
));
2253 EXPECT_EQ(500, ReadSparseData(entry
, kSize
, buf2
.get(), kSize
));
2254 EXPECT_EQ(0, ReadSparseData(entry
, 99, buf2
.get(), kSize
));
2258 net::TestCompletionCallback cb
;
2259 if (memory_only_
|| simple_cache_mode_
) {
2260 rv
= entry
->GetAvailableRange(0, 600, &start
, cb
.callback());
2261 EXPECT_EQ(100, cb
.GetResult(rv
));
2262 EXPECT_EQ(500, start
);
2264 rv
= entry
->GetAvailableRange(0, 2048, &start
, cb
.callback());
2265 EXPECT_EQ(1024, cb
.GetResult(rv
));
2266 EXPECT_EQ(1024, start
);
2268 rv
= entry
->GetAvailableRange(kSize
, kSize
, &start
, cb
.callback());
2269 EXPECT_EQ(500, cb
.GetResult(rv
));
2270 EXPECT_EQ(kSize
, start
);
2271 rv
= entry
->GetAvailableRange(20 * 1024, 10000, &start
, cb
.callback());
2272 if (memory_only_
|| simple_cache_mode_
)
2273 EXPECT_EQ(3616, cb
.GetResult(rv
));
2275 EXPECT_EQ(3072, cb
.GetResult(rv
));
2277 EXPECT_EQ(20 * 1024, start
);
2279 // 1. Query before a filled 1KB block.
2280 // 2. Query within a filled 1KB block.
2281 // 3. Query beyond a filled 1KB block.
2282 if (memory_only_
|| simple_cache_mode_
) {
2283 rv
= entry
->GetAvailableRange(19400, kSize
, &start
, cb
.callback());
2284 EXPECT_EQ(3496, cb
.GetResult(rv
));
2285 EXPECT_EQ(20000, start
);
2287 rv
= entry
->GetAvailableRange(19400, kSize
, &start
, cb
.callback());
2288 EXPECT_EQ(3016, cb
.GetResult(rv
));
2289 EXPECT_EQ(20480, start
);
2291 rv
= entry
->GetAvailableRange(3073, kSize
, &start
, cb
.callback());
2292 EXPECT_EQ(1523, cb
.GetResult(rv
));
2293 EXPECT_EQ(3073, start
);
2294 rv
= entry
->GetAvailableRange(4600, kSize
, &start
, cb
.callback());
2295 EXPECT_EQ(0, cb
.GetResult(rv
));
2296 EXPECT_EQ(4600, start
);
2298 // Now make another write and verify that there is no hole in between.
2299 EXPECT_EQ(kSize
, WriteSparseData(entry
, 500 + kSize
, buf1
.get(), kSize
));
2300 rv
= entry
->GetAvailableRange(1024, 10000, &start
, cb
.callback());
2301 EXPECT_EQ(7 * 1024 + 500, cb
.GetResult(rv
));
2302 EXPECT_EQ(1024, start
);
2303 EXPECT_EQ(kSize
, ReadSparseData(entry
, kSize
, buf2
.get(), kSize
));
2304 EXPECT_EQ(0, memcmp(buf2
->data(), buf1
->data() + kSize
- 500, 500));
2305 EXPECT_EQ(0, memcmp(buf2
->data() + 500, buf1
->data(), kSize
- 500));
2310 TEST_F(DiskCacheEntryTest
, PartialSparseEntry
) {
2312 PartialSparseEntry();
2315 TEST_F(DiskCacheEntryTest
, MemoryPartialSparseEntry
) {
2316 SetMemoryOnlyMode();
2318 PartialSparseEntry();
2321 // Tests that corrupt sparse children are removed automatically.
2322 TEST_F(DiskCacheEntryTest
, CleanupSparseEntry
) {
2324 std::string
key("the first key");
2325 disk_cache::Entry
* entry
;
2326 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2328 const int kSize
= 4 * 1024;
2329 scoped_refptr
<net::IOBuffer
> buf1(new net::IOBuffer(kSize
));
2330 CacheTestFillBuffer(buf1
->data(), kSize
, false);
2332 const int k1Meg
= 1024 * 1024;
2333 EXPECT_EQ(kSize
, WriteSparseData(entry
, 8192, buf1
.get(), kSize
));
2334 EXPECT_EQ(kSize
, WriteSparseData(entry
, k1Meg
+ 8192, buf1
.get(), kSize
));
2335 EXPECT_EQ(kSize
, WriteSparseData(entry
, 2 * k1Meg
+ 8192, buf1
.get(), kSize
));
2337 EXPECT_EQ(4, cache_
->GetEntryCount());
2339 scoped_ptr
<TestIterator
> iter
= CreateIterator();
2341 std::string child_key
[2];
2342 while (iter
->OpenNextEntry(&entry
) == net::OK
) {
2343 ASSERT_TRUE(entry
!= NULL
);
2344 // Writing to an entry will alter the LRU list and invalidate the iterator.
2345 if (entry
->GetKey() != key
&& count
< 2)
2346 child_key
[count
++] = entry
->GetKey();
2349 for (int i
= 0; i
< 2; i
++) {
2350 ASSERT_EQ(net::OK
, OpenEntry(child_key
[i
], &entry
));
2351 // Overwrite the header's magic and signature.
2352 EXPECT_EQ(12, WriteData(entry
, 2, 0, buf1
.get(), 12, false));
2356 EXPECT_EQ(4, cache_
->GetEntryCount());
2357 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2359 // Two children should be gone. One while reading and one while writing.
2360 EXPECT_EQ(0, ReadSparseData(entry
, 2 * k1Meg
+ 8192, buf1
.get(), kSize
));
2361 EXPECT_EQ(kSize
, WriteSparseData(entry
, k1Meg
+ 16384, buf1
.get(), kSize
));
2362 EXPECT_EQ(0, ReadSparseData(entry
, k1Meg
+ 8192, buf1
.get(), kSize
));
2364 // We never touched this one.
2365 EXPECT_EQ(kSize
, ReadSparseData(entry
, 8192, buf1
.get(), kSize
));
2368 // We re-created one of the corrupt children.
2369 EXPECT_EQ(3, cache_
->GetEntryCount());
2372 TEST_F(DiskCacheEntryTest
, CancelSparseIO
) {
2375 std::string
key("the first key");
2376 disk_cache::Entry
* entry
;
2377 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2379 const int kSize
= 40 * 1024;
2380 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
2381 CacheTestFillBuffer(buf
->data(), kSize
, false);
2383 // This will open and write two "real" entries.
2384 net::TestCompletionCallback cb1
, cb2
, cb3
, cb4
, cb5
;
2385 int rv
= entry
->WriteSparseData(
2386 1024 * 1024 - 4096, buf
.get(), kSize
, cb1
.callback());
2387 EXPECT_EQ(net::ERR_IO_PENDING
, rv
);
2390 rv
= entry
->GetAvailableRange(offset
, kSize
, &offset
, cb5
.callback());
2391 rv
= cb5
.GetResult(rv
);
2392 if (!cb1
.have_result()) {
2393 // We may or may not have finished writing to the entry. If we have not,
2394 // we cannot start another operation at this time.
2395 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED
, rv
);
2398 // We cancel the pending operation, and register multiple notifications.
2399 entry
->CancelSparseIO();
2400 EXPECT_EQ(net::ERR_IO_PENDING
, entry
->ReadyForSparseIO(cb2
.callback()));
2401 EXPECT_EQ(net::ERR_IO_PENDING
, entry
->ReadyForSparseIO(cb3
.callback()));
2402 entry
->CancelSparseIO(); // Should be a no op at this point.
2403 EXPECT_EQ(net::ERR_IO_PENDING
, entry
->ReadyForSparseIO(cb4
.callback()));
2405 if (!cb1
.have_result()) {
2406 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED
,
2407 entry
->ReadSparseData(
2408 offset
, buf
.get(), kSize
, net::CompletionCallback()));
2409 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED
,
2410 entry
->WriteSparseData(
2411 offset
, buf
.get(), kSize
, net::CompletionCallback()));
2414 // Now see if we receive all notifications. Note that we should not be able
2415 // to write everything (unless the timing of the system is really weird).
2416 rv
= cb1
.WaitForResult();
2417 EXPECT_TRUE(rv
== 4096 || rv
== kSize
);
2418 EXPECT_EQ(net::OK
, cb2
.WaitForResult());
2419 EXPECT_EQ(net::OK
, cb3
.WaitForResult());
2420 EXPECT_EQ(net::OK
, cb4
.WaitForResult());
2422 rv
= entry
->GetAvailableRange(offset
, kSize
, &offset
, cb5
.callback());
2423 EXPECT_EQ(0, cb5
.GetResult(rv
));
2427 // Tests that we perform sanity checks on an entry's key. Note that there are
2428 // other tests that exercise sanity checks by using saved corrupt files.
2429 TEST_F(DiskCacheEntryTest
, KeySanityCheck
) {
2432 std::string
key("the first key");
2433 disk_cache::Entry
* entry
;
2434 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2436 disk_cache::EntryImpl
* entry_impl
=
2437 static_cast<disk_cache::EntryImpl
*>(entry
);
2438 disk_cache::EntryStore
* store
= entry_impl
->entry()->Data();
2440 // We have reserved space for a short key (one block), let's say that the key
2441 // takes more than one block, and remove the NULLs after the actual key.
2442 store
->key_len
= 800;
2443 memset(store
->key
+ key
.size(), 'k', sizeof(store
->key
) - key
.size());
2444 entry_impl
->entry()->set_modified();
2447 // We have a corrupt entry. Now reload it. We should NOT read beyond the
2448 // allocated buffer here.
2449 ASSERT_NE(net::OK
, OpenEntry(key
, &entry
));
2450 DisableIntegrityCheck();
2453 TEST_F(DiskCacheEntryTest
, SimpleCacheInternalAsyncIO
) {
2454 SetSimpleCacheMode();
2459 TEST_F(DiskCacheEntryTest
, SimpleCacheExternalAsyncIO
) {
2460 SetSimpleCacheMode();
2465 TEST_F(DiskCacheEntryTest
, SimpleCacheReleaseBuffer
) {
2466 SetSimpleCacheMode();
2468 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2469 EXPECT_EQ(net::OK
, DoomAllEntries());
2474 TEST_F(DiskCacheEntryTest
, SimpleCacheStreamAccess
) {
2475 SetSimpleCacheMode();
2480 TEST_F(DiskCacheEntryTest
, SimpleCacheGetKey
) {
2481 SetSimpleCacheMode();
2486 TEST_F(DiskCacheEntryTest
, SimpleCacheGetTimes
) {
2487 SetSimpleCacheMode();
2489 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2490 EXPECT_EQ(net::OK
, DoomAllEntries());
2495 TEST_F(DiskCacheEntryTest
, SimpleCacheGrowData
) {
2496 SetSimpleCacheMode();
2498 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2499 EXPECT_EQ(net::OK
, DoomAllEntries());
2504 TEST_F(DiskCacheEntryTest
, SimpleCacheTruncateData
) {
2505 SetSimpleCacheMode();
2507 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2508 EXPECT_EQ(net::OK
, DoomAllEntries());
2513 TEST_F(DiskCacheEntryTest
, SimpleCacheZeroLengthIO
) {
2514 SetSimpleCacheMode();
2516 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2517 EXPECT_EQ(net::OK
, DoomAllEntries());
2522 TEST_F(DiskCacheEntryTest
, SimpleCacheSizeAtCreate
) {
2523 SetSimpleCacheMode();
2528 TEST_F(DiskCacheEntryTest
, SimpleCacheReuseExternalEntry
) {
2529 SetSimpleCacheMode();
2530 SetMaxSize(200 * 1024);
2532 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2533 EXPECT_EQ(net::OK
, DoomAllEntries());
2534 ReuseEntry(20 * 1024, i
);
2538 TEST_F(DiskCacheEntryTest
, SimpleCacheReuseInternalEntry
) {
2539 SetSimpleCacheMode();
2540 SetMaxSize(100 * 1024);
2542 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2543 EXPECT_EQ(net::OK
, DoomAllEntries());
2544 ReuseEntry(10 * 1024, i
);
2548 TEST_F(DiskCacheEntryTest
, SimpleCacheSizeChanges
) {
2549 SetSimpleCacheMode();
2551 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2552 EXPECT_EQ(net::OK
, DoomAllEntries());
2557 TEST_F(DiskCacheEntryTest
, SimpleCacheInvalidData
) {
2558 SetSimpleCacheMode();
2560 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2561 EXPECT_EQ(net::OK
, DoomAllEntries());
2566 TEST_F(DiskCacheEntryTest
, SimpleCacheReadWriteDestroyBuffer
) {
2567 // Proving that the test works well with optimistic operations enabled is
2568 // subtle, instead run only in APP_CACHE mode to disable optimistic
2569 // operations. Stream 0 always uses optimistic operations, so the test is not
2571 SetCacheType(net::APP_CACHE
);
2572 SetSimpleCacheMode();
2574 for (int i
= 1; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2575 EXPECT_EQ(net::OK
, DoomAllEntries());
2576 ReadWriteDestroyBuffer(i
);
2580 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomEntry
) {
2581 SetSimpleCacheMode();
2586 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomEntryNextToOpenEntry
) {
2587 SetSimpleCacheMode();
2589 DoomEntryNextToOpenEntry();
2592 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomedEntry
) {
2593 SetSimpleCacheMode();
2595 // Stream 2 is excluded because the implementation does not support writing to
2596 // it on a doomed entry, if it was previously lazily omitted.
2597 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
- 1; ++i
) {
2598 EXPECT_EQ(net::OK
, DoomAllEntries());
2603 // Creates an entry with corrupted last byte in stream 0.
2604 // Requires SimpleCacheMode.
2605 bool DiskCacheEntryTest::SimpleCacheMakeBadChecksumEntry(const std::string
& key
,
2607 disk_cache::Entry
* entry
= NULL
;
2609 if (CreateEntry(key
, &entry
) != net::OK
|| !entry
) {
2610 LOG(ERROR
) << "Could not create entry";
2614 const char data
[] = "this is very good data";
2615 const int kDataSize
= arraysize(data
);
2616 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kDataSize
));
2617 base::strlcpy(buffer
->data(), data
, kDataSize
);
2619 EXPECT_EQ(kDataSize
, WriteData(entry
, 1, 0, buffer
.get(), kDataSize
, false));
2623 // Corrupt the last byte of the data.
2624 base::FilePath entry_file0_path
= cache_path_
.AppendASCII(
2625 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, 0));
2626 base::File
entry_file0(entry_file0_path
,
2627 base::File::FLAG_WRITE
| base::File::FLAG_OPEN
);
2628 if (!entry_file0
.IsValid())
2632 sizeof(disk_cache::SimpleFileHeader
) + key
.size() + kDataSize
- 2;
2633 EXPECT_EQ(1, entry_file0
.Write(file_offset
, "X", 1));
2634 *data_size
= kDataSize
;
2638 // Tests that the simple cache can detect entries that have bad data.
2639 TEST_F(DiskCacheEntryTest
, SimpleCacheBadChecksum
) {
2640 SetSimpleCacheMode();
2643 const char key
[] = "the first key";
2645 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key
, &size_unused
));
2647 disk_cache::Entry
* entry
= NULL
;
2650 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2651 ScopedEntryPtr
entry_closer(entry
);
2653 const int kReadBufferSize
= 200;
2654 EXPECT_GE(kReadBufferSize
, entry
->GetDataSize(1));
2655 scoped_refptr
<net::IOBuffer
> read_buffer(new net::IOBuffer(kReadBufferSize
));
2656 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH
,
2657 ReadData(entry
, 1, 0, read_buffer
.get(), kReadBufferSize
));
2660 // Tests that an entry that has had an IO error occur can still be Doomed().
2661 TEST_F(DiskCacheEntryTest
, SimpleCacheErrorThenDoom
) {
2662 SetSimpleCacheMode();
2665 const char key
[] = "the first key";
2667 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key
, &size_unused
));
2669 disk_cache::Entry
* entry
= NULL
;
2671 // Open the entry, forcing an IO error.
2672 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2673 ScopedEntryPtr
entry_closer(entry
);
2675 const int kReadBufferSize
= 200;
2676 EXPECT_GE(kReadBufferSize
, entry
->GetDataSize(1));
2677 scoped_refptr
<net::IOBuffer
> read_buffer(new net::IOBuffer(kReadBufferSize
));
2678 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH
,
2679 ReadData(entry
, 1, 0, read_buffer
.get(), kReadBufferSize
));
2681 entry
->Doom(); // Should not crash.
2684 bool TruncatePath(const base::FilePath
& file_path
, int64 length
) {
2685 base::File
file(file_path
, base::File::FLAG_WRITE
| base::File::FLAG_OPEN
);
2686 if (!file
.IsValid())
2688 return file
.SetLength(length
);
2691 TEST_F(DiskCacheEntryTest
, SimpleCacheNoEOF
) {
2692 SetSimpleCacheMode();
2695 const char key
[] = "the first key";
2697 disk_cache::Entry
* entry
= NULL
;
2698 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2699 disk_cache::Entry
* null
= NULL
;
2700 EXPECT_NE(null
, entry
);
2704 // Force the entry to flush to disk, so subsequent platform file operations
2706 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2710 // Truncate the file such that the length isn't sufficient to have an EOF
2712 int kTruncationBytes
= -implicit_cast
<int>(sizeof(disk_cache::SimpleFileEOF
));
2713 const base::FilePath entry_path
= cache_path_
.AppendASCII(
2714 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, 0));
2715 const int64 invalid_size
=
2716 disk_cache::simple_util::GetFileSizeFromKeyAndDataSize(key
,
2718 EXPECT_TRUE(TruncatePath(entry_path
, invalid_size
));
2719 EXPECT_EQ(net::ERR_FAILED
, OpenEntry(key
, &entry
));
2720 DisableIntegrityCheck();
2723 TEST_F(DiskCacheEntryTest
, SimpleCacheNonOptimisticOperationsBasic
) {
2725 // Create, Write, Read, Close.
2726 SetCacheType(net::APP_CACHE
); // APP_CACHE doesn't use optimistic operations.
2727 SetSimpleCacheMode();
2729 disk_cache::Entry
* const null_entry
= NULL
;
2731 disk_cache::Entry
* entry
= NULL
;
2732 EXPECT_EQ(net::OK
, CreateEntry("my key", &entry
));
2733 ASSERT_NE(null_entry
, entry
);
2734 ScopedEntryPtr
entry_closer(entry
);
2736 const int kBufferSize
= 10;
2737 scoped_refptr
<net::IOBufferWithSize
> write_buffer(
2738 new net::IOBufferWithSize(kBufferSize
));
2739 CacheTestFillBuffer(write_buffer
->data(), write_buffer
->size(), false);
2741 write_buffer
->size(),
2742 WriteData(entry
, 1, 0, write_buffer
.get(), write_buffer
->size(), false));
2744 scoped_refptr
<net::IOBufferWithSize
> read_buffer(
2745 new net::IOBufferWithSize(kBufferSize
));
2746 EXPECT_EQ(read_buffer
->size(),
2747 ReadData(entry
, 1, 0, read_buffer
.get(), read_buffer
->size()));
2750 TEST_F(DiskCacheEntryTest
, SimpleCacheNonOptimisticOperationsDontBlock
) {
2752 // Create, Write, Close.
2753 SetCacheType(net::APP_CACHE
); // APP_CACHE doesn't use optimistic operations.
2754 SetSimpleCacheMode();
2756 disk_cache::Entry
* const null_entry
= NULL
;
2758 MessageLoopHelper helper
;
2759 CallbackTest
create_callback(&helper
, false);
2761 int expected_callback_runs
= 0;
2762 const int kBufferSize
= 10;
2763 scoped_refptr
<net::IOBufferWithSize
> write_buffer(
2764 new net::IOBufferWithSize(kBufferSize
));
2766 disk_cache::Entry
* entry
= NULL
;
2767 EXPECT_EQ(net::OK
, CreateEntry("my key", &entry
));
2768 ASSERT_NE(null_entry
, entry
);
2769 ScopedEntryPtr
entry_closer(entry
);
2771 CacheTestFillBuffer(write_buffer
->data(), write_buffer
->size(), false);
2772 CallbackTest
write_callback(&helper
, false);
2773 int ret
= entry
->WriteData(
2777 write_buffer
->size(),
2778 base::Bind(&CallbackTest::Run
, base::Unretained(&write_callback
)),
2780 ASSERT_EQ(net::ERR_IO_PENDING
, ret
);
2781 helper
.WaitUntilCacheIoFinished(++expected_callback_runs
);
2784 TEST_F(DiskCacheEntryTest
,
2785 SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting
) {
2787 // Create, Write, Read, Close.
2788 SetCacheType(net::APP_CACHE
); // APP_CACHE doesn't use optimistic operations.
2789 SetSimpleCacheMode();
2791 disk_cache::Entry
* const null_entry
= NULL
;
2792 MessageLoopHelper helper
;
2794 disk_cache::Entry
* entry
= NULL
;
2795 // Note that |entry| is only set once CreateEntry() completed which is why we
2796 // have to wait (i.e. use the helper CreateEntry() function).
2797 EXPECT_EQ(net::OK
, CreateEntry("my key", &entry
));
2798 ASSERT_NE(null_entry
, entry
);
2799 ScopedEntryPtr
entry_closer(entry
);
2801 const int kBufferSize
= 10;
2802 scoped_refptr
<net::IOBufferWithSize
> write_buffer(
2803 new net::IOBufferWithSize(kBufferSize
));
2804 CacheTestFillBuffer(write_buffer
->data(), write_buffer
->size(), false);
2805 CallbackTest
write_callback(&helper
, false);
2806 int ret
= entry
->WriteData(
2810 write_buffer
->size(),
2811 base::Bind(&CallbackTest::Run
, base::Unretained(&write_callback
)),
2813 EXPECT_EQ(net::ERR_IO_PENDING
, ret
);
2814 int expected_callback_runs
= 1;
2816 scoped_refptr
<net::IOBufferWithSize
> read_buffer(
2817 new net::IOBufferWithSize(kBufferSize
));
2818 CallbackTest
read_callback(&helper
, false);
2819 ret
= entry
->ReadData(
2823 read_buffer
->size(),
2824 base::Bind(&CallbackTest::Run
, base::Unretained(&read_callback
)));
2825 EXPECT_EQ(net::ERR_IO_PENDING
, ret
);
2826 ++expected_callback_runs
;
2828 helper
.WaitUntilCacheIoFinished(expected_callback_runs
);
2829 ASSERT_EQ(read_buffer
->size(), write_buffer
->size());
2832 memcmp(read_buffer
->data(), write_buffer
->data(), read_buffer
->size()));
2835 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic
) {
2837 // Create, Write, Read, Write, Read, Close.
2838 SetSimpleCacheMode();
2840 disk_cache::Entry
* null
= NULL
;
2841 const char key
[] = "the first key";
2843 MessageLoopHelper helper
;
2844 CallbackTest
callback1(&helper
, false);
2845 CallbackTest
callback2(&helper
, false);
2846 CallbackTest
callback3(&helper
, false);
2847 CallbackTest
callback4(&helper
, false);
2848 CallbackTest
callback5(&helper
, false);
2851 const int kSize1
= 10;
2852 const int kSize2
= 20;
2853 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
2854 scoped_refptr
<net::IOBuffer
> buffer1_read(new net::IOBuffer(kSize1
));
2855 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
2856 scoped_refptr
<net::IOBuffer
> buffer2_read(new net::IOBuffer(kSize2
));
2857 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
2858 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
2860 disk_cache::Entry
* entry
= NULL
;
2861 // Create is optimistic, must return OK.
2863 cache_
->CreateEntry(key
, &entry
,
2864 base::Bind(&CallbackTest::Run
,
2865 base::Unretained(&callback1
))));
2866 EXPECT_NE(null
, entry
);
2867 ScopedEntryPtr
entry_closer(entry
);
2869 // This write may or may not be optimistic (it depends if the previous
2870 // optimistic create already finished by the time we call the write here).
2871 int ret
= entry
->WriteData(
2876 base::Bind(&CallbackTest::Run
, base::Unretained(&callback2
)),
2878 EXPECT_TRUE(kSize1
== ret
|| net::ERR_IO_PENDING
== ret
);
2879 if (net::ERR_IO_PENDING
== ret
)
2882 // This Read must not be optimistic, since we don't support that yet.
2883 EXPECT_EQ(net::ERR_IO_PENDING
,
2889 base::Bind(&CallbackTest::Run
, base::Unretained(&callback3
))));
2891 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
2892 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read
->data(), kSize1
));
2894 // At this point after waiting, the pending operations queue on the entry
2895 // should be empty, so the next Write operation must run as optimistic.
2902 base::Bind(&CallbackTest::Run
, base::Unretained(&callback4
)),
2905 // Lets do another read so we block until both the write and the read
2906 // operation finishes and we can then test for HasOneRef() below.
2907 EXPECT_EQ(net::ERR_IO_PENDING
,
2913 base::Bind(&CallbackTest::Run
, base::Unretained(&callback5
))));
2916 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
2917 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer2_read
->data(), kSize2
));
2919 // Check that we are not leaking.
2920 EXPECT_NE(entry
, null
);
2922 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
2925 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic2
) {
2927 // Create, Open, Close, Close.
2928 SetSimpleCacheMode();
2930 disk_cache::Entry
* null
= NULL
;
2931 const char key
[] = "the first key";
2933 MessageLoopHelper helper
;
2934 CallbackTest
callback1(&helper
, false);
2935 CallbackTest
callback2(&helper
, false);
2937 disk_cache::Entry
* entry
= NULL
;
2939 cache_
->CreateEntry(key
, &entry
,
2940 base::Bind(&CallbackTest::Run
,
2941 base::Unretained(&callback1
))));
2942 EXPECT_NE(null
, entry
);
2943 ScopedEntryPtr
entry_closer(entry
);
2945 disk_cache::Entry
* entry2
= NULL
;
2946 ASSERT_EQ(net::ERR_IO_PENDING
,
2947 cache_
->OpenEntry(key
, &entry2
,
2948 base::Bind(&CallbackTest::Run
,
2949 base::Unretained(&callback2
))));
2950 ASSERT_TRUE(helper
.WaitUntilCacheIoFinished(1));
2952 EXPECT_NE(null
, entry2
);
2953 EXPECT_EQ(entry
, entry2
);
2955 // We have to call close twice, since we called create and open above.
2958 // Check that we are not leaking.
2960 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
2963 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic3
) {
2965 // Create, Close, Open, Close.
2966 SetSimpleCacheMode();
2968 disk_cache::Entry
* null
= NULL
;
2969 const char key
[] = "the first key";
2971 disk_cache::Entry
* entry
= NULL
;
2973 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
2974 EXPECT_NE(null
, entry
);
2977 net::TestCompletionCallback cb
;
2978 disk_cache::Entry
* entry2
= NULL
;
2979 ASSERT_EQ(net::ERR_IO_PENDING
,
2980 cache_
->OpenEntry(key
, &entry2
, cb
.callback()));
2981 ASSERT_EQ(net::OK
, cb
.GetResult(net::ERR_IO_PENDING
));
2982 ScopedEntryPtr
entry_closer(entry2
);
2984 EXPECT_NE(null
, entry2
);
2985 EXPECT_EQ(entry
, entry2
);
2987 // Check that we are not leaking.
2989 static_cast<disk_cache::SimpleEntryImpl
*>(entry2
)->HasOneRef());
2992 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic4
) {
2994 // Create, Close, Write, Open, Open, Close, Write, Read, Close.
2995 SetSimpleCacheMode();
2997 disk_cache::Entry
* null
= NULL
;
2998 const char key
[] = "the first key";
3000 net::TestCompletionCallback cb
;
3001 const int kSize1
= 10;
3002 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
3003 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
3004 disk_cache::Entry
* entry
= NULL
;
3007 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
3008 EXPECT_NE(null
, entry
);
3011 // Lets do a Write so we block until both the Close and the Write
3012 // operation finishes. Write must fail since we are writing in a closed entry.
3014 net::ERR_IO_PENDING
,
3015 entry
->WriteData(1, 0, buffer1
.get(), kSize1
, cb
.callback(), false));
3016 EXPECT_EQ(net::ERR_FAILED
, cb
.GetResult(net::ERR_IO_PENDING
));
3018 // Finish running the pending tasks so that we fully complete the close
3019 // operation and destroy the entry object.
3020 base::MessageLoop::current()->RunUntilIdle();
3022 // At this point the |entry| must have been destroyed, and called
3023 // RemoveSelfFromBackend().
3024 disk_cache::Entry
* entry2
= NULL
;
3025 ASSERT_EQ(net::ERR_IO_PENDING
,
3026 cache_
->OpenEntry(key
, &entry2
, cb
.callback()));
3027 ASSERT_EQ(net::OK
, cb
.GetResult(net::ERR_IO_PENDING
));
3028 EXPECT_NE(null
, entry2
);
3030 disk_cache::Entry
* entry3
= NULL
;
3031 ASSERT_EQ(net::ERR_IO_PENDING
,
3032 cache_
->OpenEntry(key
, &entry3
, cb
.callback()));
3033 ASSERT_EQ(net::OK
, cb
.GetResult(net::ERR_IO_PENDING
));
3034 EXPECT_NE(null
, entry3
);
3035 EXPECT_EQ(entry2
, entry3
);
3038 // The previous Close doesn't actually closes the entry since we opened it
3039 // twice, so the next Write operation must succeed and it must be able to
3040 // perform it optimistically, since there is no operation running on this
3044 1, 0, buffer1
.get(), kSize1
, net::CompletionCallback(), false));
3046 // Lets do another read so we block until both the write and the read
3047 // operation finishes and we can then test for HasOneRef() below.
3048 EXPECT_EQ(net::ERR_IO_PENDING
,
3049 entry2
->ReadData(1, 0, buffer1
.get(), kSize1
, cb
.callback()));
3050 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
3052 // Check that we are not leaking.
3054 static_cast<disk_cache::SimpleEntryImpl
*>(entry2
)->HasOneRef());
3058 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic5
) {
3060 // Create, Doom, Write, Read, Close.
3061 SetSimpleCacheMode();
3063 disk_cache::Entry
* null
= NULL
;
3064 const char key
[] = "the first key";
3066 net::TestCompletionCallback cb
;
3067 const int kSize1
= 10;
3068 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
3069 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
3070 disk_cache::Entry
* entry
= NULL
;
3073 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
3074 EXPECT_NE(null
, entry
);
3075 ScopedEntryPtr
entry_closer(entry
);
3079 net::ERR_IO_PENDING
,
3080 entry
->WriteData(1, 0, buffer1
.get(), kSize1
, cb
.callback(), false));
3081 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
3083 EXPECT_EQ(net::ERR_IO_PENDING
,
3084 entry
->ReadData(1, 0, buffer1
.get(), kSize1
, cb
.callback()));
3085 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
3087 // Check that we are not leaking.
3089 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
3092 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic6
) {
3094 // Create, Write, Doom, Doom, Read, Doom, Close.
3095 SetSimpleCacheMode();
3097 disk_cache::Entry
* null
= NULL
;
3098 const char key
[] = "the first key";
3100 net::TestCompletionCallback cb
;
3101 const int kSize1
= 10;
3102 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
3103 scoped_refptr
<net::IOBuffer
> buffer1_read(new net::IOBuffer(kSize1
));
3104 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
3105 disk_cache::Entry
* entry
= NULL
;
3108 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
3109 EXPECT_NE(null
, entry
);
3110 ScopedEntryPtr
entry_closer(entry
);
3113 net::ERR_IO_PENDING
,
3114 entry
->WriteData(1, 0, buffer1
.get(), kSize1
, cb
.callback(), false));
3115 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
3120 // This Read must not be optimistic, since we don't support that yet.
3121 EXPECT_EQ(net::ERR_IO_PENDING
,
3122 entry
->ReadData(1, 0, buffer1_read
.get(), kSize1
, cb
.callback()));
3123 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
3124 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read
->data(), kSize1
));
3129 // Confirm that IO buffers are not referenced by the Simple Cache after a write
3131 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimisticWriteReleases
) {
3132 SetSimpleCacheMode();
3135 const char key
[] = "the first key";
3136 disk_cache::Entry
* entry
= NULL
;
3138 // First, an optimistic create.
3140 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
3142 ScopedEntryPtr
entry_closer(entry
);
3144 const int kWriteSize
= 512;
3145 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kWriteSize
));
3146 EXPECT_TRUE(buffer1
->HasOneRef());
3147 CacheTestFillBuffer(buffer1
->data(), kWriteSize
, false);
3149 // An optimistic write happens only when there is an empty queue of pending
3150 // operations. To ensure the queue is empty, we issue a write and wait until
3152 EXPECT_EQ(kWriteSize
,
3153 WriteData(entry
, 1, 0, buffer1
.get(), kWriteSize
, false));
3154 EXPECT_TRUE(buffer1
->HasOneRef());
3156 // Finally, we should perform an optimistic write and confirm that all
3157 // references to the IO buffer have been released.
3161 1, 0, buffer1
.get(), kWriteSize
, net::CompletionCallback(), false));
3162 EXPECT_TRUE(buffer1
->HasOneRef());
3165 TEST_F(DiskCacheEntryTest
, SimpleCacheCreateDoomRace
) {
3167 // Create, Doom, Write, Close, Check files are not on disk anymore.
3168 SetSimpleCacheMode();
3170 disk_cache::Entry
* null
= NULL
;
3171 const char key
[] = "the first key";
3173 net::TestCompletionCallback cb
;
3174 const int kSize1
= 10;
3175 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
3176 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
3177 disk_cache::Entry
* entry
= NULL
;
3180 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
3181 EXPECT_NE(null
, entry
);
3183 EXPECT_EQ(net::ERR_IO_PENDING
, cache_
->DoomEntry(key
, cb
.callback()));
3184 EXPECT_EQ(net::OK
, cb
.GetResult(net::ERR_IO_PENDING
));
3188 entry
->WriteData(0, 0, buffer1
.get(), kSize1
, cb
.callback(), false));
3192 // Finish running the pending tasks so that we fully complete the close
3193 // operation and destroy the entry object.
3194 base::MessageLoop::current()->RunUntilIdle();
3196 for (int i
= 0; i
< disk_cache::kSimpleEntryFileCount
; ++i
) {
3197 base::FilePath entry_file_path
= cache_path_
.AppendASCII(
3198 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, i
));
3199 base::File::Info info
;
3200 EXPECT_FALSE(base::GetFileInfo(entry_file_path
, &info
));
3204 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomCreateRace
) {
3205 // This test runs as APP_CACHE to make operations more synchronous. Test
3207 // Create, Doom, Create.
3208 SetCacheType(net::APP_CACHE
);
3209 SetSimpleCacheMode();
3211 disk_cache::Entry
* null
= NULL
;
3212 const char key
[] = "the first key";
3214 net::TestCompletionCallback create_callback
;
3216 disk_cache::Entry
* entry1
= NULL
;
3218 create_callback
.GetResult(
3219 cache_
->CreateEntry(key
, &entry1
, create_callback
.callback())));
3220 ScopedEntryPtr
entry1_closer(entry1
);
3221 EXPECT_NE(null
, entry1
);
3223 net::TestCompletionCallback doom_callback
;
3224 EXPECT_EQ(net::ERR_IO_PENDING
,
3225 cache_
->DoomEntry(key
, doom_callback
.callback()));
3227 disk_cache::Entry
* entry2
= NULL
;
3229 create_callback
.GetResult(
3230 cache_
->CreateEntry(key
, &entry2
, create_callback
.callback())));
3231 ScopedEntryPtr
entry2_closer(entry2
);
3232 EXPECT_EQ(net::OK
, doom_callback
.GetResult(net::ERR_IO_PENDING
));
3235 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomDoom
) {
3237 // Create, Doom, Create, Doom (1st entry), Open.
3238 SetSimpleCacheMode();
3240 disk_cache::Entry
* null
= NULL
;
3242 const char key
[] = "the first key";
3244 disk_cache::Entry
* entry1
= NULL
;
3245 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry1
));
3246 ScopedEntryPtr
entry1_closer(entry1
);
3247 EXPECT_NE(null
, entry1
);
3249 EXPECT_EQ(net::OK
, DoomEntry(key
));
3251 disk_cache::Entry
* entry2
= NULL
;
3252 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry2
));
3253 ScopedEntryPtr
entry2_closer(entry2
);
3254 EXPECT_NE(null
, entry2
);
3256 // Redundantly dooming entry1 should not delete entry2.
3257 disk_cache::SimpleEntryImpl
* simple_entry1
=
3258 static_cast<disk_cache::SimpleEntryImpl
*>(entry1
);
3259 net::TestCompletionCallback cb
;
3261 cb
.GetResult(simple_entry1
->DoomEntry(cb
.callback())));
3263 disk_cache::Entry
* entry3
= NULL
;
3264 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry3
));
3265 ScopedEntryPtr
entry3_closer(entry3
);
3266 EXPECT_NE(null
, entry3
);
3269 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomCreateDoom
) {
3271 // Create, Doom, Create, Doom.
3272 SetSimpleCacheMode();
3275 disk_cache::Entry
* null
= NULL
;
3277 const char key
[] = "the first key";
3279 disk_cache::Entry
* entry1
= NULL
;
3280 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry1
));
3281 ScopedEntryPtr
entry1_closer(entry1
);
3282 EXPECT_NE(null
, entry1
);
3286 disk_cache::Entry
* entry2
= NULL
;
3287 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry2
));
3288 ScopedEntryPtr
entry2_closer(entry2
);
3289 EXPECT_NE(null
, entry2
);
3293 // This test passes if it doesn't crash.
3296 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomCloseCreateCloseOpen
) {
3297 // Test sequence: Create, Doom, Close, Create, Close, Open.
3298 SetSimpleCacheMode();
3301 disk_cache::Entry
* null
= NULL
;
3303 const char key
[] = "this is a key";
3305 disk_cache::Entry
* entry1
= NULL
;
3306 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry1
));
3307 ScopedEntryPtr
entry1_closer(entry1
);
3308 EXPECT_NE(null
, entry1
);
3311 entry1_closer
.reset();
3314 disk_cache::Entry
* entry2
= NULL
;
3315 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry2
));
3316 ScopedEntryPtr
entry2_closer(entry2
);
3317 EXPECT_NE(null
, entry2
);
3319 entry2_closer
.reset();
3322 disk_cache::Entry
* entry3
= NULL
;
3323 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry3
));
3324 ScopedEntryPtr
entry3_closer(entry3
);
3325 EXPECT_NE(null
, entry3
);
3328 // Checks that an optimistic Create would fail later on a racing Open.
3329 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimisticCreateFailsOnOpen
) {
3330 SetSimpleCacheMode();
3333 // Create a corrupt file in place of a future entry. Optimistic create should
3334 // initially succeed, but realize later that creation failed.
3335 const std::string key
= "the key";
3336 net::TestCompletionCallback cb
;
3337 disk_cache::Entry
* entry
= NULL
;
3338 disk_cache::Entry
* entry2
= NULL
;
3340 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3342 EXPECT_EQ(net::OK
, cache_
->CreateEntry(key
, &entry
, cb
.callback()));
3344 ScopedEntryPtr
entry_closer(entry
);
3345 ASSERT_NE(net::OK
, OpenEntry(key
, &entry2
));
3347 // Check that we are not leaking.
3349 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
3351 DisableIntegrityCheck();
3354 // Tests that old entries are evicted while new entries remain in the index.
3355 // This test relies on non-mandatory properties of the simple Cache Backend:
3356 // LRU eviction, specific values of high-watermark and low-watermark etc.
3357 // When changing the eviction algorithm, the test will have to be re-engineered.
3358 TEST_F(DiskCacheEntryTest
, SimpleCacheEvictOldEntries
) {
3359 const int kMaxSize
= 200 * 1024;
3360 const int kWriteSize
= kMaxSize
/ 10;
3361 const int kNumExtraEntries
= 12;
3362 SetSimpleCacheMode();
3363 SetMaxSize(kMaxSize
);
3366 std::string
key1("the first key");
3367 disk_cache::Entry
* entry
;
3368 ASSERT_EQ(net::OK
, CreateEntry(key1
, &entry
));
3369 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kWriteSize
));
3370 CacheTestFillBuffer(buffer
->data(), kWriteSize
, false);
3371 EXPECT_EQ(kWriteSize
,
3372 WriteData(entry
, 1, 0, buffer
.get(), kWriteSize
, false));
3376 std::string
key2("the key prefix");
3377 for (int i
= 0; i
< kNumExtraEntries
; i
++) {
3378 if (i
== kNumExtraEntries
- 2) {
3379 // Create a distinct timestamp for the last two entries. These entries
3380 // will be checked for outliving the eviction.
3383 ASSERT_EQ(net::OK
, CreateEntry(key2
+ base::IntToString(i
), &entry
));
3384 ScopedEntryPtr
entry_closer(entry
);
3385 EXPECT_EQ(kWriteSize
,
3386 WriteData(entry
, 1, 0, buffer
.get(), kWriteSize
, false));
3389 // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using
3390 // the internal knowledge about |SimpleBackendImpl|.
3391 ASSERT_NE(net::OK
, OpenEntry(key1
, &entry
))
3392 << "Should have evicted the old entry";
3393 for (int i
= 0; i
< 2; i
++) {
3394 int entry_no
= kNumExtraEntries
- i
- 1;
3395 // Generally there is no guarantee that at this point the backround eviction
3396 // is finished. We are testing the positive case, i.e. when the eviction
3397 // never reaches this entry, should be non-flaky.
3398 ASSERT_EQ(net::OK
, OpenEntry(key2
+ base::IntToString(entry_no
), &entry
))
3399 << "Should not have evicted fresh entry " << entry_no
;
3404 // Tests that if a read and a following in-flight truncate are both in progress
3405 // simultaniously that they both can occur successfully. See
3406 // http://crbug.com/239223
3407 TEST_F(DiskCacheEntryTest
, SimpleCacheInFlightTruncate
) {
3408 SetSimpleCacheMode();
3411 const char key
[] = "the first key";
3413 const int kBufferSize
= 1024;
3414 scoped_refptr
<net::IOBuffer
> write_buffer(new net::IOBuffer(kBufferSize
));
3415 CacheTestFillBuffer(write_buffer
->data(), kBufferSize
, false);
3417 disk_cache::Entry
* entry
= NULL
;
3418 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3420 EXPECT_EQ(kBufferSize
,
3421 WriteData(entry
, 1, 0, write_buffer
.get(), kBufferSize
, false));
3425 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3426 ScopedEntryPtr
entry_closer(entry
);
3428 MessageLoopHelper helper
;
3431 // Make a short read.
3432 const int kReadBufferSize
= 512;
3433 scoped_refptr
<net::IOBuffer
> read_buffer(new net::IOBuffer(kReadBufferSize
));
3434 CallbackTest
read_callback(&helper
, false);
3435 EXPECT_EQ(net::ERR_IO_PENDING
,
3440 base::Bind(&CallbackTest::Run
,
3441 base::Unretained(&read_callback
))));
3444 // Truncate the entry to the length of that read.
3445 scoped_refptr
<net::IOBuffer
>
3446 truncate_buffer(new net::IOBuffer(kReadBufferSize
));
3447 CacheTestFillBuffer(truncate_buffer
->data(), kReadBufferSize
, false);
3448 CallbackTest
truncate_callback(&helper
, false);
3449 EXPECT_EQ(net::ERR_IO_PENDING
,
3452 truncate_buffer
.get(),
3454 base::Bind(&CallbackTest::Run
,
3455 base::Unretained(&truncate_callback
)),
3459 // Wait for both the read and truncation to finish, and confirm that both
3461 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
3462 EXPECT_EQ(kReadBufferSize
, read_callback
.last_result());
3463 EXPECT_EQ(kReadBufferSize
, truncate_callback
.last_result());
3465 memcmp(write_buffer
->data(), read_buffer
->data(), kReadBufferSize
));
3468 // Tests that if a write and a read dependant on it are both in flight
3469 // simultaneiously that they both can complete successfully without erroneous
3470 // early returns. See http://crbug.com/239223
3471 TEST_F(DiskCacheEntryTest
, SimpleCacheInFlightRead
) {
3472 SetSimpleCacheMode();
3475 const char key
[] = "the first key";
3476 disk_cache::Entry
* entry
= NULL
;
3478 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
3479 ScopedEntryPtr
entry_closer(entry
);
3481 const int kBufferSize
= 1024;
3482 scoped_refptr
<net::IOBuffer
> write_buffer(new net::IOBuffer(kBufferSize
));
3483 CacheTestFillBuffer(write_buffer
->data(), kBufferSize
, false);
3485 MessageLoopHelper helper
;
3488 CallbackTest
write_callback(&helper
, false);
3489 EXPECT_EQ(net::ERR_IO_PENDING
,
3494 base::Bind(&CallbackTest::Run
,
3495 base::Unretained(&write_callback
)),
3499 scoped_refptr
<net::IOBuffer
> read_buffer(new net::IOBuffer(kBufferSize
));
3500 CallbackTest
read_callback(&helper
, false);
3501 EXPECT_EQ(net::ERR_IO_PENDING
,
3506 base::Bind(&CallbackTest::Run
,
3507 base::Unretained(&read_callback
))));
3510 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
3511 EXPECT_EQ(kBufferSize
, write_callback
.last_result());
3512 EXPECT_EQ(kBufferSize
, read_callback
.last_result());
3513 EXPECT_EQ(0, memcmp(write_buffer
->data(), read_buffer
->data(), kBufferSize
));
3516 TEST_F(DiskCacheEntryTest
, SimpleCacheOpenCreateRaceWithNoIndex
) {
3517 SetSimpleCacheMode();
3518 DisableSimpleCacheWaitForIndex();
3519 DisableIntegrityCheck();
3522 // Assume the index is not initialized, which is likely, since we are blocking
3523 // the IO thread from executing the index finalization step.
3524 disk_cache::Entry
* entry1
;
3525 net::TestCompletionCallback cb1
;
3526 disk_cache::Entry
* entry2
;
3527 net::TestCompletionCallback cb2
;
3528 int rv1
= cache_
->OpenEntry("key", &entry1
, cb1
.callback());
3529 int rv2
= cache_
->CreateEntry("key", &entry2
, cb2
.callback());
3531 EXPECT_EQ(net::ERR_FAILED
, cb1
.GetResult(rv1
));
3532 ASSERT_EQ(net::OK
, cb2
.GetResult(rv2
));
3536 // Checking one more scenario of overlapped reading of a bad entry.
3537 // Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of
3539 TEST_F(DiskCacheEntryTest
, SimpleCacheMultipleReadersCheckCRC2
) {
3540 SetSimpleCacheMode();
3543 const char key
[] = "key";
3545 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key
, &size
));
3547 scoped_refptr
<net::IOBuffer
> read_buffer1(new net::IOBuffer(size
));
3548 scoped_refptr
<net::IOBuffer
> read_buffer2(new net::IOBuffer(size
));
3550 // Advance the first reader a little.
3551 disk_cache::Entry
* entry
= NULL
;
3552 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3553 ScopedEntryPtr
entry_closer(entry
);
3554 EXPECT_EQ(1, ReadData(entry
, 1, 0, read_buffer1
.get(), 1));
3556 // Advance the 2nd reader by the same amount.
3557 disk_cache::Entry
* entry2
= NULL
;
3558 EXPECT_EQ(net::OK
, OpenEntry(key
, &entry2
));
3559 ScopedEntryPtr
entry2_closer(entry2
);
3560 EXPECT_EQ(1, ReadData(entry2
, 1, 0, read_buffer2
.get(), 1));
3562 // Continue reading 1st.
3563 EXPECT_GT(0, ReadData(entry
, 1, 1, read_buffer1
.get(), size
));
3565 // This read should fail as well because we have previous read failures.
3566 EXPECT_GT(0, ReadData(entry2
, 1, 1, read_buffer2
.get(), 1));
3567 DisableIntegrityCheck();
3570 // Test if we can sequentially read each subset of the data until all the data
3571 // is read, then the CRC is calculated correctly and the reads are successful.
3572 TEST_F(DiskCacheEntryTest
, SimpleCacheReadCombineCRC
) {
3574 // Create, Write, Read (first half of data), Read (second half of data),
3576 SetSimpleCacheMode();
3578 disk_cache::Entry
* null
= NULL
;
3579 const char key
[] = "the first key";
3581 const int kHalfSize
= 200;
3582 const int kSize
= 2 * kHalfSize
;
3583 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3584 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
3585 disk_cache::Entry
* entry
= NULL
;
3587 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3588 EXPECT_NE(null
, entry
);
3590 EXPECT_EQ(kSize
, WriteData(entry
, 1, 0, buffer1
.get(), kSize
, false));
3593 disk_cache::Entry
* entry2
= NULL
;
3594 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry2
));
3595 EXPECT_EQ(entry
, entry2
);
3597 // Read the first half of the data.
3599 int buf_len
= kHalfSize
;
3600 scoped_refptr
<net::IOBuffer
> buffer1_read1(new net::IOBuffer(buf_len
));
3601 EXPECT_EQ(buf_len
, ReadData(entry2
, 1, offset
, buffer1_read1
.get(), buf_len
));
3602 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read1
->data(), buf_len
));
3604 // Read the second half of the data.
3606 buf_len
= kHalfSize
;
3607 scoped_refptr
<net::IOBuffer
> buffer1_read2(new net::IOBuffer(buf_len
));
3608 EXPECT_EQ(buf_len
, ReadData(entry2
, 1, offset
, buffer1_read2
.get(), buf_len
));
3609 char* buffer1_data
= buffer1
->data() + offset
;
3610 EXPECT_EQ(0, memcmp(buffer1_data
, buffer1_read2
->data(), buf_len
));
3612 // Check that we are not leaking.
3613 EXPECT_NE(entry
, null
);
3615 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
3620 // Test if we can write the data not in sequence and read correctly. In
3621 // this case the CRC will not be present.
3622 TEST_F(DiskCacheEntryTest
, SimpleCacheNonSequentialWrite
) {
3624 // Create, Write (second half of data), Write (first half of data), Read,
3626 SetSimpleCacheMode();
3628 disk_cache::Entry
* null
= NULL
;
3629 const char key
[] = "the first key";
3631 const int kHalfSize
= 200;
3632 const int kSize
= 2 * kHalfSize
;
3633 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3634 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
3635 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
3636 char* buffer1_data
= buffer1
->data() + kHalfSize
;
3637 memcpy(buffer2
->data(), buffer1_data
, kHalfSize
);
3639 disk_cache::Entry
* entry
= NULL
;
3640 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3642 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
3643 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3644 EXPECT_NE(null
, entry
);
3646 int offset
= kHalfSize
;
3647 int buf_len
= kHalfSize
;
3650 WriteData(entry
, i
, offset
, buffer2
.get(), buf_len
, false));
3652 buf_len
= kHalfSize
;
3654 WriteData(entry
, i
, offset
, buffer1
.get(), buf_len
, false));
3657 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3659 scoped_refptr
<net::IOBuffer
> buffer1_read1(new net::IOBuffer(kSize
));
3660 EXPECT_EQ(kSize
, ReadData(entry
, i
, 0, buffer1_read1
.get(), kSize
));
3661 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read1
->data(), kSize
));
3662 // Check that we are not leaking.
3663 ASSERT_NE(entry
, null
);
3664 EXPECT_TRUE(static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
3669 // Test that changing stream1 size does not affect stream0 (stream0 and stream1
3670 // are stored in the same file in Simple Cache).
3671 TEST_F(DiskCacheEntryTest
, SimpleCacheStream1SizeChanges
) {
3672 SetSimpleCacheMode();
3674 disk_cache::Entry
* entry
= NULL
;
3675 const char key
[] = "the key";
3676 const int kSize
= 100;
3677 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3678 scoped_refptr
<net::IOBuffer
> buffer_read(new net::IOBuffer(kSize
));
3679 CacheTestFillBuffer(buffer
->data(), kSize
, false);
3681 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3684 // Write something into stream0.
3685 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
3686 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer_read
.get(), kSize
));
3687 EXPECT_EQ(0, memcmp(buffer
->data(), buffer_read
->data(), kSize
));
3691 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3692 int stream1_size
= 100;
3693 EXPECT_EQ(0, WriteData(entry
, 1, stream1_size
, buffer
.get(), 0, false));
3694 EXPECT_EQ(stream1_size
, entry
->GetDataSize(1));
3697 // Check that stream0 data has not been modified and that the EOF record for
3698 // stream 0 contains a crc.
3699 // The entry needs to be reopened before checking the crc: Open will perform
3700 // the synchronization with the previous Close. This ensures the EOF records
3701 // have been written to disk before we attempt to read them independently.
3702 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3703 base::FilePath entry_file0_path
= cache_path_
.AppendASCII(
3704 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, 0));
3705 base::File
entry_file0(entry_file0_path
,
3706 base::File::FLAG_READ
| base::File::FLAG_OPEN
);
3707 ASSERT_TRUE(entry_file0
.IsValid());
3709 int data_size
[disk_cache::kSimpleEntryStreamCount
] = {kSize
, stream1_size
, 0};
3710 int sparse_data_size
= 0;
3711 disk_cache::SimpleEntryStat
entry_stat(
3712 base::Time::Now(), base::Time::Now(), data_size
, sparse_data_size
);
3713 int eof_offset
= entry_stat
.GetEOFOffsetInFile(key
, 0);
3714 disk_cache::SimpleFileEOF eof_record
;
3715 ASSERT_EQ(static_cast<int>(sizeof(eof_record
)),
3716 entry_file0
.Read(eof_offset
, reinterpret_cast<char*>(&eof_record
),
3717 sizeof(eof_record
)));
3718 EXPECT_EQ(disk_cache::kSimpleFinalMagicNumber
, eof_record
.final_magic_number
);
3719 EXPECT_TRUE((eof_record
.flags
& disk_cache::SimpleFileEOF::FLAG_HAS_CRC32
) ==
3720 disk_cache::SimpleFileEOF::FLAG_HAS_CRC32
);
3722 buffer_read
= new net::IOBuffer(kSize
);
3723 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer_read
.get(), kSize
));
3724 EXPECT_EQ(0, memcmp(buffer
->data(), buffer_read
->data(), kSize
));
3728 EXPECT_EQ(0, WriteData(entry
, 1, stream1_size
, buffer
.get(), 0, true));
3729 EXPECT_EQ(stream1_size
, entry
->GetDataSize(1));
3732 // Check that stream0 data has not been modified.
3733 buffer_read
= new net::IOBuffer(kSize
);
3734 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3735 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer_read
.get(), kSize
));
3736 EXPECT_EQ(0, memcmp(buffer
->data(), buffer_read
->data(), kSize
));
3741 // Test that writing within the range for which the crc has already been
3742 // computed will properly invalidate the computed crc.
3743 TEST_F(DiskCacheEntryTest
, SimpleCacheCRCRewrite
) {
3745 // Create, Write (big data), Write (small data in the middle), Close.
3746 // Open, Read (all), Close.
3747 SetSimpleCacheMode();
3749 disk_cache::Entry
* null
= NULL
;
3750 const char key
[] = "the first key";
3752 const int kHalfSize
= 200;
3753 const int kSize
= 2 * kHalfSize
;
3754 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3755 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kHalfSize
));
3756 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
3757 CacheTestFillBuffer(buffer2
->data(), kHalfSize
, false);
3759 disk_cache::Entry
* entry
= NULL
;
3760 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3761 EXPECT_NE(null
, entry
);
3764 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
3765 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3767 int buf_len
= kSize
;
3770 WriteData(entry
, i
, offset
, buffer1
.get(), buf_len
, false));
3772 buf_len
= kHalfSize
;
3774 WriteData(entry
, i
, offset
, buffer2
.get(), buf_len
, false));
3777 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3779 scoped_refptr
<net::IOBuffer
> buffer1_read1(new net::IOBuffer(kSize
));
3780 EXPECT_EQ(kSize
, ReadData(entry
, i
, 0, buffer1_read1
.get(), kSize
));
3781 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read1
->data(), kHalfSize
));
3784 memcmp(buffer2
->data(), buffer1_read1
->data() + kHalfSize
, kHalfSize
));
3790 bool DiskCacheEntryTest::SimpleCacheThirdStreamFileExists(const char* key
) {
3791 int third_stream_file_index
=
3792 disk_cache::simple_util::GetFileIndexFromStreamIndex(2);
3793 base::FilePath third_stream_file_path
= cache_path_
.AppendASCII(
3794 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(
3795 key
, third_stream_file_index
));
3796 return PathExists(third_stream_file_path
);
3799 void DiskCacheEntryTest::SyncDoomEntry(const char* key
) {
3800 net::TestCompletionCallback callback
;
3801 cache_
->DoomEntry(key
, callback
.callback());
3802 callback
.WaitForResult();
3805 // Check that a newly-created entry with no third-stream writes omits the
3806 // third stream file.
3807 TEST_F(DiskCacheEntryTest
, SimpleCacheOmittedThirdStream1
) {
3808 SetSimpleCacheMode();
3811 const char key
[] = "key";
3813 disk_cache::Entry
* entry
;
3815 // Create entry and close without writing: third stream file should be
3816 // omitted, since the stream is empty.
3817 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3819 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3822 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3825 // Check that a newly-created entry with only a single zero-offset, zero-length
3826 // write omits the third stream file.
3827 TEST_F(DiskCacheEntryTest
, SimpleCacheOmittedThirdStream2
) {
3828 SetSimpleCacheMode();
3831 const int kHalfSize
= 8;
3832 const int kSize
= kHalfSize
* 2;
3833 const char key
[] = "key";
3834 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3835 CacheTestFillBuffer(buffer
->data(), kHalfSize
, false);
3837 disk_cache::Entry
* entry
;
3839 // Create entry, write empty buffer to third stream, and close: third stream
3840 // should still be omitted, since the entry ignores writes that don't modify
3841 // data or change the length.
3842 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3843 EXPECT_EQ(0, WriteData(entry
, 2, 0, buffer
.get(), 0, true));
3845 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3848 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3851 // Check that we can read back data written to the third stream.
3852 TEST_F(DiskCacheEntryTest
, SimpleCacheOmittedThirdStream3
) {
3853 SetSimpleCacheMode();
3856 const int kHalfSize
= 8;
3857 const int kSize
= kHalfSize
* 2;
3858 const char key
[] = "key";
3859 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3860 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
3861 CacheTestFillBuffer(buffer1
->data(), kHalfSize
, false);
3863 disk_cache::Entry
* entry
;
3865 // Create entry, write data to third stream, and close: third stream should
3866 // not be omitted, since it contains data. Re-open entry and ensure there
3867 // are that many bytes in the third stream.
3868 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3869 EXPECT_EQ(kHalfSize
, WriteData(entry
, 2, 0, buffer1
.get(), kHalfSize
, true));
3871 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key
));
3873 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3874 EXPECT_EQ(kHalfSize
, ReadData(entry
, 2, 0, buffer2
.get(), kSize
));
3875 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer2
->data(), kHalfSize
));
3877 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key
));
3880 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3883 // Check that we remove the third stream file upon opening an entry and finding
3884 // the third stream empty. (This is the upgrade path for entries written
3885 // before the third stream was optional.)
3886 TEST_F(DiskCacheEntryTest
, SimpleCacheOmittedThirdStream4
) {
3887 SetSimpleCacheMode();
3890 const int kHalfSize
= 8;
3891 const int kSize
= kHalfSize
* 2;
3892 const char key
[] = "key";
3893 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3894 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
3895 CacheTestFillBuffer(buffer1
->data(), kHalfSize
, false);
3897 disk_cache::Entry
* entry
;
3899 // Create entry, write data to third stream, truncate third stream back to
3900 // empty, and close: third stream will not initially be omitted, since entry
3901 // creates the file when the first significant write comes in, and only
3902 // removes it on open if it is empty. Reopen, ensure that the file is
3903 // deleted, and that there's no data in the third stream.
3904 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3905 EXPECT_EQ(kHalfSize
, WriteData(entry
, 2, 0, buffer1
.get(), kHalfSize
, true));
3906 EXPECT_EQ(0, WriteData(entry
, 2, 0, buffer1
.get(), 0, true));
3908 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key
));
3910 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3911 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3912 EXPECT_EQ(0, ReadData(entry
, 2, 0, buffer2
.get(), kSize
));
3914 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3917 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3920 // Check that we don't accidentally create the third stream file once the entry
3922 TEST_F(DiskCacheEntryTest
, SimpleCacheOmittedThirdStream5
) {
3923 SetSimpleCacheMode();
3926 const int kHalfSize
= 8;
3927 const int kSize
= kHalfSize
* 2;
3928 const char key
[] = "key";
3929 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3930 CacheTestFillBuffer(buffer
->data(), kHalfSize
, false);
3932 disk_cache::Entry
* entry
;
3934 // Create entry, doom entry, write data to third stream, and close: third
3935 // stream should not exist. (Note: We don't care if the write fails, just
3936 // that it doesn't cause the file to be created on disk.)
3937 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3939 WriteData(entry
, 2, 0, buffer
.get(), kHalfSize
, true);
3941 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3944 // There could be a race between Doom and an optimistic write.
3945 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomOptimisticWritesRace
) {
3947 // Create, first Write, second Write, Close.
3949 SetSimpleCacheMode();
3951 disk_cache::Entry
* null
= NULL
;
3952 const char key
[] = "the first key";
3954 const int kSize
= 200;
3955 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3956 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
3957 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
3958 CacheTestFillBuffer(buffer2
->data(), kSize
, false);
3960 // The race only happens on stream 1 and stream 2.
3961 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
3962 ASSERT_EQ(net::OK
, DoomAllEntries());
3963 disk_cache::Entry
* entry
= NULL
;
3965 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3966 EXPECT_NE(null
, entry
);
3970 ASSERT_EQ(net::OK
, DoomAllEntries());
3971 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3972 EXPECT_NE(null
, entry
);
3975 int buf_len
= kSize
;
3976 // This write should not be optimistic (since create is).
3978 WriteData(entry
, i
, offset
, buffer1
.get(), buf_len
, false));
3981 // This write should be optimistic.
3983 WriteData(entry
, i
, offset
, buffer2
.get(), buf_len
, false));
3986 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3987 EXPECT_NE(null
, entry
);
3994 // Tests for a regression in crbug.com/317138 , in which deleting an already
3995 // doomed entry was removing the active entry from the index.
3996 TEST_F(DiskCacheEntryTest
, SimpleCachePreserveActiveEntries
) {
3997 SetSimpleCacheMode();
4000 disk_cache::Entry
* null
= NULL
;
4002 const char key
[] = "this is a key";
4004 disk_cache::Entry
* entry1
= NULL
;
4005 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry1
));
4006 ScopedEntryPtr
entry1_closer(entry1
);
4007 EXPECT_NE(null
, entry1
);
4010 disk_cache::Entry
* entry2
= NULL
;
4011 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry2
));
4012 ScopedEntryPtr
entry2_closer(entry2
);
4013 EXPECT_NE(null
, entry2
);
4014 entry2_closer
.reset();
4016 // Closing then reopening entry2 insures that entry2 is serialized, and so
4017 // it can be opened from files without error.
4019 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry2
));
4020 EXPECT_NE(null
, entry2
);
4021 entry2_closer
.reset(entry2
);
4023 scoped_refptr
<disk_cache::SimpleEntryImpl
>
4024 entry1_refptr
= static_cast<disk_cache::SimpleEntryImpl
*>(entry1
);
4026 // If crbug.com/317138 has regressed, this will remove |entry2| from
4027 // the backend's |active_entries_| while |entry2| is still alive and its
4028 // files are still on disk.
4029 entry1_closer
.reset();
4032 // Close does not have a callback. However, we need to be sure the close is
4033 // finished before we continue the test. We can take advantage of how the ref
4034 // counting of a SimpleEntryImpl works to fake out a callback: When the
4035 // last Close() call is made to an entry, an IO operation is sent to the
4036 // synchronous entry to close the platform files. This IO operation holds a
4037 // ref pointer to the entry, which expires when the operation is done. So,
4038 // we take a refpointer, and watch the SimpleEntry object until it has only
4039 // one ref; this indicates the IO operation is complete.
4040 while (!entry1_refptr
->HasOneRef()) {
4041 base::PlatformThread::YieldCurrentThread();
4042 base::MessageLoop::current()->RunUntilIdle();
4044 entry1_refptr
= NULL
;
4046 // In the bug case, this new entry ends up being a duplicate object pointing
4047 // at the same underlying files.
4048 disk_cache::Entry
* entry3
= NULL
;
4049 EXPECT_EQ(net::OK
, OpenEntry(key
, &entry3
));
4050 ScopedEntryPtr
entry3_closer(entry3
);
4051 EXPECT_NE(null
, entry3
);
4053 // The test passes if these two dooms do not crash.
4058 TEST_F(DiskCacheEntryTest
, SimpleCacheBasicSparseIO
) {
4059 SetSimpleCacheMode();
4064 TEST_F(DiskCacheEntryTest
, SimpleCacheHugeSparseIO
) {
4065 SetSimpleCacheMode();
4070 TEST_F(DiskCacheEntryTest
, SimpleCacheGetAvailableRange
) {
4071 SetSimpleCacheMode();
4073 GetAvailableRange();
4076 TEST_F(DiskCacheEntryTest
, SimpleCacheUpdateSparseEntry
) {
4077 SetSimpleCacheMode();
4079 UpdateSparseEntry();
4082 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomSparseEntry
) {
4083 SetSimpleCacheMode();
4088 TEST_F(DiskCacheEntryTest
, SimpleCachePartialSparseEntry
) {
4089 SetSimpleCacheMode();
4091 PartialSparseEntry();
4094 TEST_F(DiskCacheEntryTest
, SimpleCacheTruncateLargeSparseFile
) {
4095 const int kSize
= 1024;
4097 SetSimpleCacheMode();
4098 // An entry is allowed sparse data 1/10 the size of the cache, so this size
4099 // allows for one |kSize|-sized range plus overhead, but not two ranges.
4100 SetMaxSize(kSize
* 15);
4103 const char key
[] = "key";
4104 disk_cache::Entry
* null
= NULL
;
4105 disk_cache::Entry
* entry
;
4106 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
4107 EXPECT_NE(null
, entry
);
4109 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
4110 CacheTestFillBuffer(buffer
->data(), kSize
, false);
4111 net::TestCompletionCallback callback
;
4114 // Verify initial conditions.
4115 ret
= entry
->ReadSparseData(0, buffer
.get(), kSize
, callback
.callback());
4116 EXPECT_EQ(0, callback
.GetResult(ret
));
4118 ret
= entry
->ReadSparseData(kSize
, buffer
.get(), kSize
, callback
.callback());
4119 EXPECT_EQ(0, callback
.GetResult(ret
));
4121 // Write a range and make sure it reads back.
4122 ret
= entry
->WriteSparseData(0, buffer
.get(), kSize
, callback
.callback());
4123 EXPECT_EQ(kSize
, callback
.GetResult(ret
));
4125 ret
= entry
->ReadSparseData(0, buffer
.get(), kSize
, callback
.callback());
4126 EXPECT_EQ(kSize
, callback
.GetResult(ret
));
4128 // Write another range and make sure it reads back.
4129 ret
= entry
->WriteSparseData(kSize
, buffer
.get(), kSize
, callback
.callback());
4130 EXPECT_EQ(kSize
, callback
.GetResult(ret
));
4132 ret
= entry
->ReadSparseData(kSize
, buffer
.get(), kSize
, callback
.callback());
4133 EXPECT_EQ(kSize
, callback
.GetResult(ret
));
4135 // Make sure the first range was removed when the second was written.
4136 ret
= entry
->ReadSparseData(0, buffer
.get(), kSize
, callback
.callback());
4137 EXPECT_EQ(0, callback
.GetResult(ret
));