1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
7 #include "base/bind_helpers.h"
8 #include "base/file_util.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/threading/platform_thread.h"
12 #include "base/timer/timer.h"
13 #include "net/base/completion_callback.h"
14 #include "net/base/io_buffer.h"
15 #include "net/base/net_errors.h"
16 #include "net/base/test_completion_callback.h"
17 #include "net/disk_cache/backend_impl.h"
18 #include "net/disk_cache/disk_cache_test_base.h"
19 #include "net/disk_cache/disk_cache_test_util.h"
20 #include "net/disk_cache/entry_impl.h"
21 #include "net/disk_cache/mem_entry_impl.h"
22 #include "net/disk_cache/simple/simple_entry_format.h"
23 #include "net/disk_cache/simple/simple_entry_impl.h"
24 #include "net/disk_cache/simple/simple_synchronous_entry.h"
25 #include "net/disk_cache/simple/simple_test_util.h"
26 #include "net/disk_cache/simple/simple_util.h"
27 #include "testing/gtest/include/gtest/gtest.h"
30 using disk_cache::ScopedEntryPtr
;
32 // Tests that can run with different types of caches.
33 class DiskCacheEntryTest
: public DiskCacheTestWithCache
{
35 void InternalSyncIOBackground(disk_cache::Entry
* entry
);
36 void ExternalSyncIOBackground(disk_cache::Entry
* entry
);
39 void InternalSyncIO();
40 void InternalAsyncIO();
41 void ExternalSyncIO();
42 void ExternalAsyncIO();
43 void ReleaseBuffer(int stream_index
);
46 void GetTimes(int stream_index
);
47 void GrowData(int stream_index
);
48 void TruncateData(int stream_index
);
49 void ZeroLengthIO(int stream_index
);
52 void SizeChanges(int stream_index
);
53 void ReuseEntry(int size
, int stream_index
);
54 void InvalidData(int stream_index
);
55 void ReadWriteDestroyBuffer(int stream_index
);
56 void DoomNormalEntry();
57 void DoomEntryNextToOpenEntry();
58 void DoomedEntry(int stream_index
);
61 void GetAvailableRange();
63 void UpdateSparseEntry();
64 void DoomSparseEntry();
65 void PartialSparseEntry();
66 bool SimpleCacheMakeBadChecksumEntry(const std::string
& key
, int* data_size
);
67 bool SimpleCacheThirdStreamFileExists(const char* key
);
68 void SyncDoomEntry(const char* key
);
71 // This part of the test runs on the background thread.
72 void DiskCacheEntryTest::InternalSyncIOBackground(disk_cache::Entry
* entry
) {
73 const int kSize1
= 10;
74 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
75 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
78 entry
->ReadData(0, 0, buffer1
.get(), kSize1
, net::CompletionCallback()));
79 base::strlcpy(buffer1
->data(), "the data", kSize1
);
82 0, 0, buffer1
.get(), kSize1
, net::CompletionCallback(), false));
83 memset(buffer1
->data(), 0, kSize1
);
86 entry
->ReadData(0, 0, buffer1
.get(), kSize1
, net::CompletionCallback()));
87 EXPECT_STREQ("the data", buffer1
->data());
89 const int kSize2
= 5000;
90 const int kSize3
= 10000;
91 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
92 scoped_refptr
<net::IOBuffer
> buffer3(new net::IOBuffer(kSize3
));
93 memset(buffer3
->data(), 0, kSize3
);
94 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
95 base::strlcpy(buffer2
->data(), "The really big data goes here", kSize2
);
99 1, 1500, buffer2
.get(), kSize2
, net::CompletionCallback(), false));
100 memset(buffer2
->data(), 0, kSize2
);
103 1, 1511, buffer2
.get(), kSize2
, net::CompletionCallback()));
104 EXPECT_STREQ("big data goes here", buffer2
->data());
107 entry
->ReadData(1, 0, buffer2
.get(), kSize2
, net::CompletionCallback()));
108 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer3
->data(), 1500));
111 1, 5000, buffer2
.get(), kSize2
, net::CompletionCallback()));
115 1, 6500, buffer2
.get(), kSize2
, net::CompletionCallback()));
118 entry
->ReadData(1, 0, buffer3
.get(), kSize3
, net::CompletionCallback()));
121 1, 0, buffer3
.get(), 8192, net::CompletionCallback(), false));
124 entry
->ReadData(1, 0, buffer3
.get(), kSize3
, net::CompletionCallback()));
125 EXPECT_EQ(8192, entry
->GetDataSize(1));
127 // We need to delete the memory buffer on this thread.
128 EXPECT_EQ(0, entry
->WriteData(
129 0, 0, NULL
, 0, net::CompletionCallback(), true));
130 EXPECT_EQ(0, entry
->WriteData(
131 1, 0, NULL
, 0, net::CompletionCallback(), true));
134 // We need to support synchronous IO even though it is not a supported operation
135 // from the point of view of the disk cache's public interface, because we use
136 // it internally, not just by a few tests, but as part of the implementation
137 // (see sparse_control.cc, for example).
138 void DiskCacheEntryTest::InternalSyncIO() {
139 disk_cache::Entry
* entry
= NULL
;
140 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
141 ASSERT_TRUE(NULL
!= entry
);
143 // The bulk of the test runs from within the callback, on the cache thread.
144 RunTaskForTest(base::Bind(&DiskCacheEntryTest::InternalSyncIOBackground
,
145 base::Unretained(this),
152 EXPECT_EQ(0, cache_
->GetEntryCount());
155 TEST_F(DiskCacheEntryTest
, InternalSyncIO
) {
160 TEST_F(DiskCacheEntryTest
, MemoryOnlyInternalSyncIO
) {
166 void DiskCacheEntryTest::InternalAsyncIO() {
167 disk_cache::Entry
* entry
= NULL
;
168 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
169 ASSERT_TRUE(NULL
!= entry
);
171 // Avoid using internal buffers for the test. We have to write something to
172 // the entry and close it so that we flush the internal buffer to disk. After
173 // that, IO operations will be really hitting the disk. We don't care about
174 // the content, so just extending the entry is enough (all extensions zero-
176 EXPECT_EQ(0, WriteData(entry
, 0, 15 * 1024, NULL
, 0, false));
177 EXPECT_EQ(0, WriteData(entry
, 1, 15 * 1024, NULL
, 0, false));
179 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry
));
181 MessageLoopHelper helper
;
182 // Let's verify that each IO goes to the right callback object.
183 CallbackTest
callback1(&helper
, false);
184 CallbackTest
callback2(&helper
, false);
185 CallbackTest
callback3(&helper
, false);
186 CallbackTest
callback4(&helper
, false);
187 CallbackTest
callback5(&helper
, false);
188 CallbackTest
callback6(&helper
, false);
189 CallbackTest
callback7(&helper
, false);
190 CallbackTest
callback8(&helper
, false);
191 CallbackTest
callback9(&helper
, false);
192 CallbackTest
callback10(&helper
, false);
193 CallbackTest
callback11(&helper
, false);
194 CallbackTest
callback12(&helper
, false);
195 CallbackTest
callback13(&helper
, false);
197 const int kSize1
= 10;
198 const int kSize2
= 5000;
199 const int kSize3
= 10000;
200 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
201 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
202 scoped_refptr
<net::IOBuffer
> buffer3(new net::IOBuffer(kSize3
));
203 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
204 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
205 CacheTestFillBuffer(buffer3
->data(), kSize3
, false);
213 base::Bind(&CallbackTest::Run
, base::Unretained(&callback1
))));
214 base::strlcpy(buffer1
->data(), "the data", kSize1
);
216 int ret
= entry
->WriteData(
221 base::Bind(&CallbackTest::Run
, base::Unretained(&callback2
)),
223 EXPECT_TRUE(10 == ret
|| net::ERR_IO_PENDING
== ret
);
224 if (net::ERR_IO_PENDING
== ret
)
227 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
228 memset(buffer2
->data(), 0, kSize2
);
229 ret
= entry
->ReadData(
234 base::Bind(&CallbackTest::Run
, base::Unretained(&callback3
)));
235 EXPECT_TRUE(10 == ret
|| net::ERR_IO_PENDING
== ret
);
236 if (net::ERR_IO_PENDING
== ret
)
239 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
240 EXPECT_STREQ("the data", buffer2
->data());
242 base::strlcpy(buffer2
->data(), "The really big data goes here", kSize2
);
243 ret
= entry
->WriteData(
248 base::Bind(&CallbackTest::Run
, base::Unretained(&callback4
)),
250 EXPECT_TRUE(5000 == ret
|| net::ERR_IO_PENDING
== ret
);
251 if (net::ERR_IO_PENDING
== ret
)
254 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
255 memset(buffer3
->data(), 0, kSize3
);
256 ret
= entry
->ReadData(
261 base::Bind(&CallbackTest::Run
, base::Unretained(&callback5
)));
262 EXPECT_TRUE(4989 == ret
|| net::ERR_IO_PENDING
== ret
);
263 if (net::ERR_IO_PENDING
== ret
)
266 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
267 EXPECT_STREQ("big data goes here", buffer3
->data());
268 ret
= entry
->ReadData(
273 base::Bind(&CallbackTest::Run
, base::Unretained(&callback6
)));
274 EXPECT_TRUE(5000 == ret
|| net::ERR_IO_PENDING
== ret
);
275 if (net::ERR_IO_PENDING
== ret
)
278 memset(buffer3
->data(), 0, kSize3
);
280 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
281 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer3
->data(), 1500));
282 ret
= entry
->ReadData(
287 base::Bind(&CallbackTest::Run
, base::Unretained(&callback7
)));
288 EXPECT_TRUE(1500 == ret
|| net::ERR_IO_PENDING
== ret
);
289 if (net::ERR_IO_PENDING
== ret
)
292 ret
= entry
->ReadData(
297 base::Bind(&CallbackTest::Run
, base::Unretained(&callback9
)));
298 EXPECT_TRUE(6500 == ret
|| net::ERR_IO_PENDING
== ret
);
299 if (net::ERR_IO_PENDING
== ret
)
302 ret
= entry
->WriteData(
307 base::Bind(&CallbackTest::Run
, base::Unretained(&callback10
)),
309 EXPECT_TRUE(8192 == ret
|| net::ERR_IO_PENDING
== ret
);
310 if (net::ERR_IO_PENDING
== ret
)
313 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
314 ret
= entry
->ReadData(
319 base::Bind(&CallbackTest::Run
, base::Unretained(&callback11
)));
320 EXPECT_TRUE(8192 == ret
|| net::ERR_IO_PENDING
== ret
);
321 if (net::ERR_IO_PENDING
== ret
)
324 EXPECT_EQ(8192, entry
->GetDataSize(1));
326 ret
= entry
->ReadData(
331 base::Bind(&CallbackTest::Run
, base::Unretained(&callback12
)));
332 EXPECT_TRUE(10 == ret
|| net::ERR_IO_PENDING
== ret
);
333 if (net::ERR_IO_PENDING
== ret
)
336 ret
= entry
->ReadData(
341 base::Bind(&CallbackTest::Run
, base::Unretained(&callback13
)));
342 EXPECT_TRUE(5000 == ret
|| net::ERR_IO_PENDING
== ret
);
343 if (net::ERR_IO_PENDING
== ret
)
346 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
348 EXPECT_FALSE(helper
.callback_reused_error());
353 EXPECT_EQ(0, cache_
->GetEntryCount());
356 TEST_F(DiskCacheEntryTest
, InternalAsyncIO
) {
361 TEST_F(DiskCacheEntryTest
, MemoryOnlyInternalAsyncIO
) {
367 // This part of the test runs on the background thread.
368 void DiskCacheEntryTest::ExternalSyncIOBackground(disk_cache::Entry
* entry
) {
369 const int kSize1
= 17000;
370 const int kSize2
= 25000;
371 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
372 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
373 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
374 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
375 base::strlcpy(buffer1
->data(), "the data", kSize1
);
378 0, 0, buffer1
.get(), kSize1
, net::CompletionCallback(), false));
379 memset(buffer1
->data(), 0, kSize1
);
382 entry
->ReadData(0, 0, buffer1
.get(), kSize1
, net::CompletionCallback()));
383 EXPECT_STREQ("the data", buffer1
->data());
385 base::strlcpy(buffer2
->data(), "The really big data goes here", kSize2
);
389 1, 10000, buffer2
.get(), kSize2
, net::CompletionCallback(), false));
390 memset(buffer2
->data(), 0, kSize2
);
393 1, 10011, buffer2
.get(), kSize2
, net::CompletionCallback()));
394 EXPECT_STREQ("big data goes here", buffer2
->data());
397 entry
->ReadData(1, 0, buffer2
.get(), kSize2
, net::CompletionCallback()));
400 1, 30000, buffer2
.get(), kSize2
, net::CompletionCallback()));
404 1, 35000, buffer2
.get(), kSize2
, net::CompletionCallback()));
407 entry
->ReadData(1, 0, buffer1
.get(), kSize1
, net::CompletionCallback()));
411 1, 20000, buffer1
.get(), kSize1
, net::CompletionCallback(), false));
412 EXPECT_EQ(37000, entry
->GetDataSize(1));
414 // We need to delete the memory buffer on this thread.
415 EXPECT_EQ(0, entry
->WriteData(
416 0, 0, NULL
, 0, net::CompletionCallback(), true));
417 EXPECT_EQ(0, entry
->WriteData(
418 1, 0, NULL
, 0, net::CompletionCallback(), true));
421 void DiskCacheEntryTest::ExternalSyncIO() {
422 disk_cache::Entry
* entry
;
423 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
425 // The bulk of the test runs from within the callback, on the cache thread.
426 RunTaskForTest(base::Bind(&DiskCacheEntryTest::ExternalSyncIOBackground
,
427 base::Unretained(this),
433 EXPECT_EQ(0, cache_
->GetEntryCount());
436 TEST_F(DiskCacheEntryTest
, ExternalSyncIO
) {
441 TEST_F(DiskCacheEntryTest
, ExternalSyncIONoBuffer
) {
443 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
447 TEST_F(DiskCacheEntryTest
, MemoryOnlyExternalSyncIO
) {
453 void DiskCacheEntryTest::ExternalAsyncIO() {
454 disk_cache::Entry
* entry
;
455 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
459 MessageLoopHelper helper
;
460 // Let's verify that each IO goes to the right callback object.
461 CallbackTest
callback1(&helper
, false);
462 CallbackTest
callback2(&helper
, false);
463 CallbackTest
callback3(&helper
, false);
464 CallbackTest
callback4(&helper
, false);
465 CallbackTest
callback5(&helper
, false);
466 CallbackTest
callback6(&helper
, false);
467 CallbackTest
callback7(&helper
, false);
468 CallbackTest
callback8(&helper
, false);
469 CallbackTest
callback9(&helper
, false);
471 const int kSize1
= 17000;
472 const int kSize2
= 25000;
473 const int kSize3
= 25000;
474 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
475 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
476 scoped_refptr
<net::IOBuffer
> buffer3(new net::IOBuffer(kSize3
));
477 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
478 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
479 CacheTestFillBuffer(buffer3
->data(), kSize3
, false);
480 base::strlcpy(buffer1
->data(), "the data", kSize1
);
481 int ret
= entry
->WriteData(
486 base::Bind(&CallbackTest::Run
, base::Unretained(&callback1
)),
488 EXPECT_TRUE(17000 == ret
|| net::ERR_IO_PENDING
== ret
);
489 if (net::ERR_IO_PENDING
== ret
)
492 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
494 memset(buffer2
->data(), 0, kSize1
);
495 ret
= entry
->ReadData(
500 base::Bind(&CallbackTest::Run
, base::Unretained(&callback2
)));
501 EXPECT_TRUE(17000 == ret
|| net::ERR_IO_PENDING
== ret
);
502 if (net::ERR_IO_PENDING
== ret
)
505 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
506 EXPECT_STREQ("the data", buffer2
->data());
508 base::strlcpy(buffer2
->data(), "The really big data goes here", kSize2
);
509 ret
= entry
->WriteData(
514 base::Bind(&CallbackTest::Run
, base::Unretained(&callback3
)),
516 EXPECT_TRUE(25000 == ret
|| net::ERR_IO_PENDING
== ret
);
517 if (net::ERR_IO_PENDING
== ret
)
520 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
522 memset(buffer3
->data(), 0, kSize3
);
523 ret
= entry
->ReadData(
528 base::Bind(&CallbackTest::Run
, base::Unretained(&callback4
)));
529 EXPECT_TRUE(24989 == ret
|| net::ERR_IO_PENDING
== ret
);
530 if (net::ERR_IO_PENDING
== ret
)
533 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
534 EXPECT_STREQ("big data goes here", buffer3
->data());
535 ret
= entry
->ReadData(
540 base::Bind(&CallbackTest::Run
, base::Unretained(&callback5
)));
541 EXPECT_TRUE(25000 == ret
|| net::ERR_IO_PENDING
== ret
);
542 if (net::ERR_IO_PENDING
== ret
)
545 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
546 memset(buffer3
->data(), 0, kSize3
);
547 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer3
->data(), 10000));
548 ret
= entry
->ReadData(
553 base::Bind(&CallbackTest::Run
, base::Unretained(&callback6
)));
554 EXPECT_TRUE(5000 == ret
|| net::ERR_IO_PENDING
== ret
);
555 if (net::ERR_IO_PENDING
== ret
)
564 base::Bind(&CallbackTest::Run
, base::Unretained(&callback7
))));
565 ret
= entry
->ReadData(
570 base::Bind(&CallbackTest::Run
, base::Unretained(&callback8
)));
571 EXPECT_TRUE(17000 == ret
|| net::ERR_IO_PENDING
== ret
);
572 if (net::ERR_IO_PENDING
== ret
)
574 ret
= entry
->WriteData(
579 base::Bind(&CallbackTest::Run
, base::Unretained(&callback9
)),
581 EXPECT_TRUE(17000 == ret
|| net::ERR_IO_PENDING
== ret
);
582 if (net::ERR_IO_PENDING
== ret
)
585 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
586 EXPECT_EQ(37000, entry
->GetDataSize(1));
588 EXPECT_FALSE(helper
.callback_reused_error());
593 EXPECT_EQ(0, cache_
->GetEntryCount());
596 TEST_F(DiskCacheEntryTest
, ExternalAsyncIO
) {
601 TEST_F(DiskCacheEntryTest
, ExternalAsyncIONoBuffer
) {
603 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
607 TEST_F(DiskCacheEntryTest
, MemoryOnlyExternalAsyncIO
) {
613 // Tests that IOBuffers are not referenced after IO completes.
614 void DiskCacheEntryTest::ReleaseBuffer(int stream_index
) {
615 disk_cache::Entry
* entry
= NULL
;
616 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
617 ASSERT_TRUE(NULL
!= entry
);
619 const int kBufferSize
= 1024;
620 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kBufferSize
));
621 CacheTestFillBuffer(buffer
->data(), kBufferSize
, false);
623 net::ReleaseBufferCompletionCallback
cb(buffer
.get());
624 int rv
= entry
->WriteData(
625 stream_index
, 0, buffer
.get(), kBufferSize
, cb
.callback(), false);
626 EXPECT_EQ(kBufferSize
, cb
.GetResult(rv
));
630 TEST_F(DiskCacheEntryTest
, ReleaseBuffer
) {
632 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
636 TEST_F(DiskCacheEntryTest
, MemoryOnlyReleaseBuffer
) {
642 void DiskCacheEntryTest::StreamAccess() {
643 disk_cache::Entry
* entry
= NULL
;
644 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
645 ASSERT_TRUE(NULL
!= entry
);
647 const int kBufferSize
= 1024;
648 const int kNumStreams
= 3;
649 scoped_refptr
<net::IOBuffer
> reference_buffers
[kNumStreams
];
650 for (int i
= 0; i
< kNumStreams
; i
++) {
651 reference_buffers
[i
] = new net::IOBuffer(kBufferSize
);
652 CacheTestFillBuffer(reference_buffers
[i
]->data(), kBufferSize
, false);
654 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kBufferSize
));
655 for (int i
= 0; i
< kNumStreams
; i
++) {
658 WriteData(entry
, i
, 0, reference_buffers
[i
].get(), kBufferSize
, false));
659 memset(buffer1
->data(), 0, kBufferSize
);
660 EXPECT_EQ(kBufferSize
, ReadData(entry
, i
, 0, buffer1
.get(), kBufferSize
));
662 0, memcmp(reference_buffers
[i
]->data(), buffer1
->data(), kBufferSize
));
664 EXPECT_EQ(net::ERR_INVALID_ARGUMENT
,
665 ReadData(entry
, kNumStreams
, 0, buffer1
.get(), kBufferSize
));
668 // Open the entry and read it in chunks, including a read past the end.
669 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry
));
670 ASSERT_TRUE(NULL
!= entry
);
671 const int kReadBufferSize
= 600;
672 const int kFinalReadSize
= kBufferSize
- kReadBufferSize
;
673 COMPILE_ASSERT(kFinalReadSize
< kReadBufferSize
, should_be_exactly_two_reads
);
674 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kReadBufferSize
));
675 for (int i
= 0; i
< kNumStreams
; i
++) {
676 memset(buffer2
->data(), 0, kReadBufferSize
);
677 EXPECT_EQ(kReadBufferSize
,
678 ReadData(entry
, i
, 0, buffer2
.get(), kReadBufferSize
));
681 memcmp(reference_buffers
[i
]->data(), buffer2
->data(), kReadBufferSize
));
683 memset(buffer2
->data(), 0, kReadBufferSize
);
686 ReadData(entry
, i
, kReadBufferSize
, buffer2
.get(), kReadBufferSize
));
688 memcmp(reference_buffers
[i
]->data() + kReadBufferSize
,
696 TEST_F(DiskCacheEntryTest
, StreamAccess
) {
701 TEST_F(DiskCacheEntryTest
, MemoryOnlyStreamAccess
) {
707 void DiskCacheEntryTest::GetKey() {
708 std::string
key("the first key");
709 disk_cache::Entry
* entry
;
710 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
711 EXPECT_EQ(key
, entry
->GetKey()) << "short key";
714 int seed
= static_cast<int>(Time::Now().ToInternalValue());
716 char key_buffer
[20000];
718 CacheTestFillBuffer(key_buffer
, 3000, true);
719 key_buffer
[1000] = '\0';
722 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
723 EXPECT_TRUE(key
== entry
->GetKey()) << "1000 bytes key";
726 key_buffer
[1000] = 'p';
727 key_buffer
[3000] = '\0';
729 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
730 EXPECT_TRUE(key
== entry
->GetKey()) << "medium size key";
733 CacheTestFillBuffer(key_buffer
, sizeof(key_buffer
), true);
734 key_buffer
[19999] = '\0';
737 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
738 EXPECT_TRUE(key
== entry
->GetKey()) << "long key";
741 CacheTestFillBuffer(key_buffer
, 0x4000, true);
742 key_buffer
[0x4000] = '\0';
745 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
746 EXPECT_TRUE(key
== entry
->GetKey()) << "16KB key";
750 TEST_F(DiskCacheEntryTest
, GetKey
) {
755 TEST_F(DiskCacheEntryTest
, MemoryOnlyGetKey
) {
761 void DiskCacheEntryTest::GetTimes(int stream_index
) {
762 std::string
key("the first key");
763 disk_cache::Entry
* entry
;
765 Time t1
= Time::Now();
766 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
767 EXPECT_TRUE(entry
->GetLastModified() >= t1
);
768 EXPECT_TRUE(entry
->GetLastModified() == entry
->GetLastUsed());
771 Time t2
= Time::Now();
772 EXPECT_TRUE(t2
> t1
);
773 EXPECT_EQ(0, WriteData(entry
, stream_index
, 200, NULL
, 0, false));
774 if (type_
== net::APP_CACHE
) {
775 EXPECT_TRUE(entry
->GetLastModified() < t2
);
777 EXPECT_TRUE(entry
->GetLastModified() >= t2
);
779 EXPECT_TRUE(entry
->GetLastModified() == entry
->GetLastUsed());
782 Time t3
= Time::Now();
783 EXPECT_TRUE(t3
> t2
);
784 const int kSize
= 200;
785 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
786 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 0, buffer
.get(), kSize
));
787 if (type_
== net::APP_CACHE
) {
788 EXPECT_TRUE(entry
->GetLastUsed() < t2
);
789 EXPECT_TRUE(entry
->GetLastModified() < t2
);
790 } else if (type_
== net::SHADER_CACHE
) {
791 EXPECT_TRUE(entry
->GetLastUsed() < t3
);
792 EXPECT_TRUE(entry
->GetLastModified() < t3
);
794 EXPECT_TRUE(entry
->GetLastUsed() >= t3
);
795 EXPECT_TRUE(entry
->GetLastModified() < t3
);
800 TEST_F(DiskCacheEntryTest
, GetTimes
) {
805 TEST_F(DiskCacheEntryTest
, MemoryOnlyGetTimes
) {
811 TEST_F(DiskCacheEntryTest
, AppCacheGetTimes
) {
812 SetCacheType(net::APP_CACHE
);
817 TEST_F(DiskCacheEntryTest
, ShaderCacheGetTimes
) {
818 SetCacheType(net::SHADER_CACHE
);
823 void DiskCacheEntryTest::GrowData(int stream_index
) {
824 std::string
key1("the first key");
825 disk_cache::Entry
* entry
;
826 ASSERT_EQ(net::OK
, CreateEntry(key1
, &entry
));
828 const int kSize
= 20000;
829 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
830 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
831 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
832 memset(buffer2
->data(), 0, kSize
);
834 base::strlcpy(buffer1
->data(), "the data", kSize
);
835 EXPECT_EQ(10, WriteData(entry
, stream_index
, 0, buffer1
.get(), 10, false));
836 EXPECT_EQ(10, ReadData(entry
, stream_index
, 0, buffer2
.get(), 10));
837 EXPECT_STREQ("the data", buffer2
->data());
838 EXPECT_EQ(10, entry
->GetDataSize(stream_index
));
841 WriteData(entry
, stream_index
, 0, buffer1
.get(), 2000, false));
842 EXPECT_EQ(2000, entry
->GetDataSize(stream_index
));
843 EXPECT_EQ(2000, ReadData(entry
, stream_index
, 0, buffer2
.get(), 2000));
844 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 2000));
847 WriteData(entry
, stream_index
, 0, buffer1
.get(), kSize
, false));
848 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
849 EXPECT_EQ(20000, ReadData(entry
, stream_index
, 0, buffer2
.get(), kSize
));
850 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), kSize
));
853 memset(buffer2
->data(), 0, kSize
);
854 std::string
key2("Second key");
855 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry
));
856 EXPECT_EQ(10, WriteData(entry
, stream_index
, 0, buffer1
.get(), 10, false));
857 EXPECT_EQ(10, entry
->GetDataSize(stream_index
));
860 // Go from an internal address to a bigger block size.
861 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
863 WriteData(entry
, stream_index
, 0, buffer1
.get(), 2000, false));
864 EXPECT_EQ(2000, entry
->GetDataSize(stream_index
));
865 EXPECT_EQ(2000, ReadData(entry
, stream_index
, 0, buffer2
.get(), 2000));
866 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 2000));
868 memset(buffer2
->data(), 0, kSize
);
870 // Go from an internal address to an external one.
871 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
873 WriteData(entry
, stream_index
, 0, buffer1
.get(), kSize
, false));
874 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
875 EXPECT_EQ(20000, ReadData(entry
, stream_index
, 0, buffer2
.get(), kSize
));
876 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), kSize
));
879 // Double check the size from disk.
880 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
881 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
883 // Now extend the entry without actual data.
884 EXPECT_EQ(0, WriteData(entry
, stream_index
, 45500, buffer1
.get(), 0, false));
887 // And check again from disk.
888 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
889 EXPECT_EQ(45500, entry
->GetDataSize(stream_index
));
893 TEST_F(DiskCacheEntryTest
, GrowData
) {
898 TEST_F(DiskCacheEntryTest
, GrowDataNoBuffer
) {
900 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
904 TEST_F(DiskCacheEntryTest
, MemoryOnlyGrowData
) {
910 void DiskCacheEntryTest::TruncateData(int stream_index
) {
911 std::string
key("the first key");
912 disk_cache::Entry
* entry
;
913 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
915 const int kSize1
= 20000;
916 const int kSize2
= 20000;
917 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
918 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
920 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
921 memset(buffer2
->data(), 0, kSize2
);
923 // Simple truncation:
924 EXPECT_EQ(200, WriteData(entry
, stream_index
, 0, buffer1
.get(), 200, false));
925 EXPECT_EQ(200, entry
->GetDataSize(stream_index
));
926 EXPECT_EQ(100, WriteData(entry
, stream_index
, 0, buffer1
.get(), 100, false));
927 EXPECT_EQ(200, entry
->GetDataSize(stream_index
));
928 EXPECT_EQ(100, WriteData(entry
, stream_index
, 0, buffer1
.get(), 100, true));
929 EXPECT_EQ(100, entry
->GetDataSize(stream_index
));
930 EXPECT_EQ(0, WriteData(entry
, stream_index
, 50, buffer1
.get(), 0, true));
931 EXPECT_EQ(50, entry
->GetDataSize(stream_index
));
932 EXPECT_EQ(0, WriteData(entry
, stream_index
, 0, buffer1
.get(), 0, true));
933 EXPECT_EQ(0, entry
->GetDataSize(stream_index
));
935 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
937 // Go to an external file.
939 WriteData(entry
, stream_index
, 0, buffer1
.get(), 20000, true));
940 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
941 EXPECT_EQ(20000, ReadData(entry
, stream_index
, 0, buffer2
.get(), 20000));
942 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 20000));
943 memset(buffer2
->data(), 0, kSize2
);
945 // External file truncation
947 WriteData(entry
, stream_index
, 0, buffer1
.get(), 18000, false));
948 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
950 WriteData(entry
, stream_index
, 0, buffer1
.get(), 18000, true));
951 EXPECT_EQ(18000, entry
->GetDataSize(stream_index
));
952 EXPECT_EQ(0, WriteData(entry
, stream_index
, 17500, buffer1
.get(), 0, true));
953 EXPECT_EQ(17500, entry
->GetDataSize(stream_index
));
955 // And back to an internal block.
957 WriteData(entry
, stream_index
, 1000, buffer1
.get(), 600, true));
958 EXPECT_EQ(1600, entry
->GetDataSize(stream_index
));
959 EXPECT_EQ(600, ReadData(entry
, stream_index
, 1000, buffer2
.get(), 600));
960 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 600));
961 EXPECT_EQ(1000, ReadData(entry
, stream_index
, 0, buffer2
.get(), 1000));
962 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 1000))
963 << "Preserves previous data";
965 // Go from external file to zero length.
967 WriteData(entry
, stream_index
, 0, buffer1
.get(), 20000, true));
968 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
969 EXPECT_EQ(0, WriteData(entry
, stream_index
, 0, buffer1
.get(), 0, true));
970 EXPECT_EQ(0, entry
->GetDataSize(stream_index
));
975 TEST_F(DiskCacheEntryTest
, TruncateData
) {
980 TEST_F(DiskCacheEntryTest
, TruncateDataNoBuffer
) {
982 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
986 TEST_F(DiskCacheEntryTest
, MemoryOnlyTruncateData
) {
992 void DiskCacheEntryTest::ZeroLengthIO(int stream_index
) {
993 std::string
key("the first key");
994 disk_cache::Entry
* entry
;
995 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
997 EXPECT_EQ(0, ReadData(entry
, stream_index
, 0, NULL
, 0));
998 EXPECT_EQ(0, WriteData(entry
, stream_index
, 0, NULL
, 0, false));
1000 // This write should extend the entry.
1001 EXPECT_EQ(0, WriteData(entry
, stream_index
, 1000, NULL
, 0, false));
1002 EXPECT_EQ(0, ReadData(entry
, stream_index
, 500, NULL
, 0));
1003 EXPECT_EQ(0, ReadData(entry
, stream_index
, 2000, NULL
, 0));
1004 EXPECT_EQ(1000, entry
->GetDataSize(stream_index
));
1006 EXPECT_EQ(0, WriteData(entry
, stream_index
, 100000, NULL
, 0, true));
1007 EXPECT_EQ(0, ReadData(entry
, stream_index
, 50000, NULL
, 0));
1008 EXPECT_EQ(100000, entry
->GetDataSize(stream_index
));
1010 // Let's verify the actual content.
1011 const int kSize
= 20;
1012 const char zeros
[kSize
] = {};
1013 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1015 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1016 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 500, buffer
.get(), kSize
));
1017 EXPECT_TRUE(!memcmp(buffer
->data(), zeros
, kSize
));
1019 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1020 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 5000, buffer
.get(), kSize
));
1021 EXPECT_TRUE(!memcmp(buffer
->data(), zeros
, kSize
));
1023 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1024 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 50000, buffer
.get(), kSize
));
1025 EXPECT_TRUE(!memcmp(buffer
->data(), zeros
, kSize
));
1030 TEST_F(DiskCacheEntryTest
, ZeroLengthIO
) {
1035 TEST_F(DiskCacheEntryTest
, ZeroLengthIONoBuffer
) {
1037 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
1041 TEST_F(DiskCacheEntryTest
, MemoryOnlyZeroLengthIO
) {
1042 SetMemoryOnlyMode();
1047 // Tests that we handle the content correctly when buffering, a feature of the
1048 // standard cache that permits fast responses to certain reads.
1049 void DiskCacheEntryTest::Buffering() {
1050 std::string
key("the first key");
1051 disk_cache::Entry
* entry
;
1052 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1054 const int kSize
= 200;
1055 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
1056 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
1057 CacheTestFillBuffer(buffer1
->data(), kSize
, true);
1058 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1060 EXPECT_EQ(kSize
, WriteData(entry
, 1, 0, buffer1
.get(), kSize
, false));
1063 // Write a little more and read what we wrote before.
1064 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1065 EXPECT_EQ(kSize
, WriteData(entry
, 1, 5000, buffer1
.get(), kSize
, false));
1066 EXPECT_EQ(kSize
, ReadData(entry
, 1, 0, buffer2
.get(), kSize
));
1067 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1069 // Now go to an external file.
1070 EXPECT_EQ(kSize
, WriteData(entry
, 1, 18000, buffer1
.get(), kSize
, false));
1073 // Write something else and verify old data.
1074 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1075 EXPECT_EQ(kSize
, WriteData(entry
, 1, 10000, buffer1
.get(), kSize
, false));
1076 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1077 EXPECT_EQ(kSize
, ReadData(entry
, 1, 5000, buffer2
.get(), kSize
));
1078 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1079 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1080 EXPECT_EQ(kSize
, ReadData(entry
, 1, 0, buffer2
.get(), kSize
));
1081 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1082 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1083 EXPECT_EQ(kSize
, ReadData(entry
, 1, 18000, buffer2
.get(), kSize
));
1084 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1086 // Extend the file some more.
1087 EXPECT_EQ(kSize
, WriteData(entry
, 1, 23000, buffer1
.get(), kSize
, false));
1090 // And now make sure that we can deal with data in both places (ram/disk).
1091 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1092 EXPECT_EQ(kSize
, WriteData(entry
, 1, 17000, buffer1
.get(), kSize
, false));
1094 // We should not overwrite the data at 18000 with this.
1095 EXPECT_EQ(kSize
, WriteData(entry
, 1, 19000, buffer1
.get(), kSize
, false));
1096 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1097 EXPECT_EQ(kSize
, ReadData(entry
, 1, 18000, buffer2
.get(), kSize
));
1098 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1099 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1100 EXPECT_EQ(kSize
, ReadData(entry
, 1, 17000, buffer2
.get(), kSize
));
1101 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1103 EXPECT_EQ(kSize
, WriteData(entry
, 1, 22900, buffer1
.get(), kSize
, false));
1104 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1105 EXPECT_EQ(100, ReadData(entry
, 1, 23000, buffer2
.get(), kSize
));
1106 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data() + 100, 100));
1108 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1109 EXPECT_EQ(100, ReadData(entry
, 1, 23100, buffer2
.get(), kSize
));
1110 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data() + 100, 100));
1112 // Extend the file again and read before without closing the entry.
1113 EXPECT_EQ(kSize
, WriteData(entry
, 1, 25000, buffer1
.get(), kSize
, false));
1114 EXPECT_EQ(kSize
, WriteData(entry
, 1, 45000, buffer1
.get(), kSize
, false));
1115 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1116 EXPECT_EQ(kSize
, ReadData(entry
, 1, 25000, buffer2
.get(), kSize
));
1117 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1118 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1119 EXPECT_EQ(kSize
, ReadData(entry
, 1, 45000, buffer2
.get(), kSize
));
1120 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1125 TEST_F(DiskCacheEntryTest
, Buffering
) {
1130 TEST_F(DiskCacheEntryTest
, BufferingNoBuffer
) {
1132 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
1136 // Checks that entries are zero length when created.
1137 void DiskCacheEntryTest::SizeAtCreate() {
1138 const char key
[] = "the first key";
1139 disk_cache::Entry
* entry
;
1140 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1142 const int kNumStreams
= 3;
1143 for (int i
= 0; i
< kNumStreams
; ++i
)
1144 EXPECT_EQ(0, entry
->GetDataSize(i
));
1148 TEST_F(DiskCacheEntryTest
, SizeAtCreate
) {
1153 TEST_F(DiskCacheEntryTest
, MemoryOnlySizeAtCreate
) {
1154 SetMemoryOnlyMode();
1159 // Some extra tests to make sure that buffering works properly when changing
1161 void DiskCacheEntryTest::SizeChanges(int stream_index
) {
1162 std::string
key("the first key");
1163 disk_cache::Entry
* entry
;
1164 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1166 const int kSize
= 200;
1167 const char zeros
[kSize
] = {};
1168 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
1169 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
1170 CacheTestFillBuffer(buffer1
->data(), kSize
, true);
1171 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1174 WriteData(entry
, stream_index
, 0, buffer1
.get(), kSize
, true));
1176 WriteData(entry
, stream_index
, 17000, buffer1
.get(), kSize
, true));
1178 WriteData(entry
, stream_index
, 23000, buffer1
.get(), kSize
, true));
1181 // Extend the file and read between the old size and the new write.
1182 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1183 EXPECT_EQ(23000 + kSize
, entry
->GetDataSize(stream_index
));
1185 WriteData(entry
, stream_index
, 25000, buffer1
.get(), kSize
, true));
1186 EXPECT_EQ(25000 + kSize
, entry
->GetDataSize(stream_index
));
1187 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 24000, buffer2
.get(), kSize
));
1188 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, kSize
));
1190 // Read at the end of the old file size.
1193 ReadData(entry
, stream_index
, 23000 + kSize
- 35, buffer2
.get(), kSize
));
1194 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data() + kSize
- 35, 35));
1196 // Read slightly before the last write.
1197 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1198 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 24900, buffer2
.get(), kSize
));
1199 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, 100));
1200 EXPECT_TRUE(!memcmp(buffer2
->data() + 100, buffer1
->data(), kSize
- 100));
1202 // Extend the entry a little more.
1204 WriteData(entry
, stream_index
, 26000, buffer1
.get(), kSize
, true));
1205 EXPECT_EQ(26000 + kSize
, entry
->GetDataSize(stream_index
));
1206 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1207 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 25900, buffer2
.get(), kSize
));
1208 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, 100));
1209 EXPECT_TRUE(!memcmp(buffer2
->data() + 100, buffer1
->data(), kSize
- 100));
1211 // And now reduce the size.
1213 WriteData(entry
, stream_index
, 25000, buffer1
.get(), kSize
, true));
1214 EXPECT_EQ(25000 + kSize
, entry
->GetDataSize(stream_index
));
1217 ReadData(entry
, stream_index
, 25000 + kSize
- 28, buffer2
.get(), kSize
));
1218 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data() + kSize
- 28, 28));
1220 // Reduce the size with a buffer that is not extending the size.
1222 WriteData(entry
, stream_index
, 24000, buffer1
.get(), kSize
, false));
1223 EXPECT_EQ(25000 + kSize
, entry
->GetDataSize(stream_index
));
1225 WriteData(entry
, stream_index
, 24500, buffer1
.get(), kSize
, true));
1226 EXPECT_EQ(24500 + kSize
, entry
->GetDataSize(stream_index
));
1227 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 23900, buffer2
.get(), kSize
));
1228 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, 100));
1229 EXPECT_TRUE(!memcmp(buffer2
->data() + 100, buffer1
->data(), kSize
- 100));
1231 // And now reduce the size below the old size.
1233 WriteData(entry
, stream_index
, 19000, buffer1
.get(), kSize
, true));
1234 EXPECT_EQ(19000 + kSize
, entry
->GetDataSize(stream_index
));
1235 EXPECT_EQ(kSize
, ReadData(entry
, stream_index
, 18900, buffer2
.get(), kSize
));
1236 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, 100));
1237 EXPECT_TRUE(!memcmp(buffer2
->data() + 100, buffer1
->data(), kSize
- 100));
1239 // Verify that the actual file is truncated.
1241 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1242 EXPECT_EQ(19000 + kSize
, entry
->GetDataSize(stream_index
));
1244 // Extend the newly opened file with a zero length write, expect zero fill.
1247 WriteData(entry
, stream_index
, 20000 + kSize
, buffer1
.get(), 0, false));
1249 ReadData(entry
, stream_index
, 19000 + kSize
, buffer1
.get(), kSize
));
1250 EXPECT_EQ(0, memcmp(buffer1
->data(), zeros
, kSize
));
1255 TEST_F(DiskCacheEntryTest
, SizeChanges
) {
1260 TEST_F(DiskCacheEntryTest
, SizeChangesNoBuffer
) {
1262 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
1266 // Write more than the total cache capacity but to a single entry. |size| is the
1267 // amount of bytes to write each time.
1268 void DiskCacheEntryTest::ReuseEntry(int size
, int stream_index
) {
1269 std::string
key1("the first key");
1270 disk_cache::Entry
* entry
;
1271 ASSERT_EQ(net::OK
, CreateEntry(key1
, &entry
));
1274 std::string
key2("the second key");
1275 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry
));
1277 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(size
));
1278 CacheTestFillBuffer(buffer
->data(), size
, false);
1280 for (int i
= 0; i
< 15; i
++) {
1281 EXPECT_EQ(0, WriteData(entry
, stream_index
, 0, buffer
.get(), 0, true));
1283 WriteData(entry
, stream_index
, 0, buffer
.get(), size
, false));
1285 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
1289 ASSERT_EQ(net::OK
, OpenEntry(key1
, &entry
)) << "have not evicted this entry";
1293 TEST_F(DiskCacheEntryTest
, ReuseExternalEntry
) {
1294 SetMaxSize(200 * 1024);
1296 ReuseEntry(20 * 1024, 0);
1299 TEST_F(DiskCacheEntryTest
, MemoryOnlyReuseExternalEntry
) {
1300 SetMemoryOnlyMode();
1301 SetMaxSize(200 * 1024);
1303 ReuseEntry(20 * 1024, 0);
1306 TEST_F(DiskCacheEntryTest
, ReuseInternalEntry
) {
1307 SetMaxSize(100 * 1024);
1309 ReuseEntry(10 * 1024, 0);
1312 TEST_F(DiskCacheEntryTest
, MemoryOnlyReuseInternalEntry
) {
1313 SetMemoryOnlyMode();
1314 SetMaxSize(100 * 1024);
1316 ReuseEntry(10 * 1024, 0);
1319 // Reading somewhere that was not written should return zeros.
1320 void DiskCacheEntryTest::InvalidData(int stream_index
) {
1321 std::string
key("the first key");
1322 disk_cache::Entry
* entry
;
1323 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1325 const int kSize1
= 20000;
1326 const int kSize2
= 20000;
1327 const int kSize3
= 20000;
1328 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
1329 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
1330 scoped_refptr
<net::IOBuffer
> buffer3(new net::IOBuffer(kSize3
));
1332 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
1333 memset(buffer2
->data(), 0, kSize2
);
1335 // Simple data grow:
1337 WriteData(entry
, stream_index
, 400, buffer1
.get(), 200, false));
1338 EXPECT_EQ(600, entry
->GetDataSize(stream_index
));
1339 EXPECT_EQ(100, ReadData(entry
, stream_index
, 300, buffer3
.get(), 100));
1340 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 100));
1342 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1344 // The entry is now on disk. Load it and extend it.
1346 WriteData(entry
, stream_index
, 800, buffer1
.get(), 200, false));
1347 EXPECT_EQ(1000, entry
->GetDataSize(stream_index
));
1348 EXPECT_EQ(100, ReadData(entry
, stream_index
, 700, buffer3
.get(), 100));
1349 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 100));
1351 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1353 // This time using truncate.
1355 WriteData(entry
, stream_index
, 1800, buffer1
.get(), 200, true));
1356 EXPECT_EQ(2000, entry
->GetDataSize(stream_index
));
1357 EXPECT_EQ(100, ReadData(entry
, stream_index
, 1500, buffer3
.get(), 100));
1358 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 100));
1360 // Go to an external file.
1362 WriteData(entry
, stream_index
, 19800, buffer1
.get(), 200, false));
1363 EXPECT_EQ(20000, entry
->GetDataSize(stream_index
));
1364 EXPECT_EQ(4000, ReadData(entry
, stream_index
, 14000, buffer3
.get(), 4000));
1365 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 4000));
1367 // And back to an internal block.
1369 WriteData(entry
, stream_index
, 1000, buffer1
.get(), 600, true));
1370 EXPECT_EQ(1600, entry
->GetDataSize(stream_index
));
1371 EXPECT_EQ(600, ReadData(entry
, stream_index
, 1000, buffer3
.get(), 600));
1372 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer1
->data(), 600));
1376 WriteData(entry
, stream_index
, 2000, buffer1
.get(), 600, false));
1377 EXPECT_EQ(2600, entry
->GetDataSize(stream_index
));
1378 EXPECT_EQ(200, ReadData(entry
, stream_index
, 1800, buffer3
.get(), 200));
1379 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 200));
1381 // And again (with truncation flag).
1383 WriteData(entry
, stream_index
, 3000, buffer1
.get(), 600, true));
1384 EXPECT_EQ(3600, entry
->GetDataSize(stream_index
));
1385 EXPECT_EQ(200, ReadData(entry
, stream_index
, 2800, buffer3
.get(), 200));
1386 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 200));
1391 TEST_F(DiskCacheEntryTest
, InvalidData
) {
1396 TEST_F(DiskCacheEntryTest
, InvalidDataNoBuffer
) {
1398 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
1402 TEST_F(DiskCacheEntryTest
, MemoryOnlyInvalidData
) {
1403 SetMemoryOnlyMode();
1408 // Tests that the cache preserves the buffer of an IO operation.
1409 void DiskCacheEntryTest::ReadWriteDestroyBuffer(int stream_index
) {
1410 std::string
key("the first key");
1411 disk_cache::Entry
* entry
;
1412 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1414 const int kSize
= 200;
1415 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1416 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1418 net::TestCompletionCallback cb
;
1419 EXPECT_EQ(net::ERR_IO_PENDING
,
1421 stream_index
, 0, buffer
.get(), kSize
, cb
.callback(), false));
1423 // Release our reference to the buffer.
1425 EXPECT_EQ(kSize
, cb
.WaitForResult());
1427 // And now test with a Read().
1428 buffer
= new net::IOBuffer(kSize
);
1429 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1432 net::ERR_IO_PENDING
,
1433 entry
->ReadData(stream_index
, 0, buffer
.get(), kSize
, cb
.callback()));
1435 EXPECT_EQ(kSize
, cb
.WaitForResult());
1440 TEST_F(DiskCacheEntryTest
, ReadWriteDestroyBuffer
) {
1442 ReadWriteDestroyBuffer(0);
1445 void DiskCacheEntryTest::DoomNormalEntry() {
1446 std::string
key("the first key");
1447 disk_cache::Entry
* entry
;
1448 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1452 const int kSize
= 20000;
1453 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1454 CacheTestFillBuffer(buffer
->data(), kSize
, true);
1455 buffer
->data()[19999] = '\0';
1457 key
= buffer
->data();
1458 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1459 EXPECT_EQ(20000, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1460 EXPECT_EQ(20000, WriteData(entry
, 1, 0, buffer
.get(), kSize
, false));
1464 FlushQueueForTest();
1465 EXPECT_EQ(0, cache_
->GetEntryCount());
1468 TEST_F(DiskCacheEntryTest
, DoomEntry
) {
1473 TEST_F(DiskCacheEntryTest
, MemoryOnlyDoomEntry
) {
1474 SetMemoryOnlyMode();
1479 // Tests dooming an entry that's linked to an open entry.
1480 void DiskCacheEntryTest::DoomEntryNextToOpenEntry() {
1481 disk_cache::Entry
* entry1
;
1482 disk_cache::Entry
* entry2
;
1483 ASSERT_EQ(net::OK
, CreateEntry("fixed", &entry1
));
1485 ASSERT_EQ(net::OK
, CreateEntry("foo", &entry1
));
1487 ASSERT_EQ(net::OK
, CreateEntry("bar", &entry1
));
1490 ASSERT_EQ(net::OK
, OpenEntry("foo", &entry1
));
1491 ASSERT_EQ(net::OK
, OpenEntry("bar", &entry2
));
1495 ASSERT_EQ(net::OK
, OpenEntry("foo", &entry2
));
1500 ASSERT_EQ(net::OK
, OpenEntry("fixed", &entry1
));
1504 TEST_F(DiskCacheEntryTest
, DoomEntryNextToOpenEntry
) {
1506 DoomEntryNextToOpenEntry();
1509 TEST_F(DiskCacheEntryTest
, NewEvictionDoomEntryNextToOpenEntry
) {
1512 DoomEntryNextToOpenEntry();
1515 TEST_F(DiskCacheEntryTest
, AppCacheDoomEntryNextToOpenEntry
) {
1516 SetCacheType(net::APP_CACHE
);
1518 DoomEntryNextToOpenEntry();
1521 // Verify that basic operations work as expected with doomed entries.
1522 void DiskCacheEntryTest::DoomedEntry(int stream_index
) {
1523 std::string
key("the first key");
1524 disk_cache::Entry
* entry
;
1525 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1528 FlushQueueForTest();
1529 EXPECT_EQ(0, cache_
->GetEntryCount());
1530 Time initial
= Time::Now();
1533 const int kSize1
= 2000;
1534 const int kSize2
= 2000;
1535 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
1536 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
1537 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
1538 memset(buffer2
->data(), 0, kSize2
);
1541 WriteData(entry
, stream_index
, 0, buffer1
.get(), 2000, false));
1542 EXPECT_EQ(2000, ReadData(entry
, stream_index
, 0, buffer2
.get(), 2000));
1543 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer2
->data(), kSize1
));
1544 EXPECT_EQ(key
, entry
->GetKey());
1545 EXPECT_TRUE(initial
< entry
->GetLastModified());
1546 EXPECT_TRUE(initial
< entry
->GetLastUsed());
1551 TEST_F(DiskCacheEntryTest
, DoomedEntry
) {
1556 TEST_F(DiskCacheEntryTest
, MemoryOnlyDoomedEntry
) {
1557 SetMemoryOnlyMode();
1562 // Tests that we discard entries if the data is missing.
1563 TEST_F(DiskCacheEntryTest
, MissingData
) {
1566 std::string
key("the first key");
1567 disk_cache::Entry
* entry
;
1568 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1570 // Write to an external file.
1571 const int kSize
= 20000;
1572 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1573 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1574 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1576 FlushQueueForTest();
1578 disk_cache::Addr
address(0x80000001);
1579 base::FilePath name
= cache_impl_
->GetFileName(address
);
1580 EXPECT_TRUE(base::DeleteFile(name
, false));
1582 // Attempt to read the data.
1583 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1584 EXPECT_EQ(net::ERR_FILE_NOT_FOUND
,
1585 ReadData(entry
, 0, 0, buffer
.get(), kSize
));
1588 // The entry should be gone.
1589 ASSERT_NE(net::OK
, OpenEntry(key
, &entry
));
1592 // Test that child entries in a memory cache backend are not visible from
1594 TEST_F(DiskCacheEntryTest
, MemoryOnlyEnumerationWithSparseEntries
) {
1595 SetMemoryOnlyMode();
1598 const int kSize
= 4096;
1599 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1600 CacheTestFillBuffer(buf
->data(), kSize
, false);
1602 std::string
key("the first key");
1603 disk_cache::Entry
* parent_entry
;
1604 ASSERT_EQ(net::OK
, CreateEntry(key
, &parent_entry
));
1606 // Writes to the parent entry.
1608 parent_entry
->WriteSparseData(
1609 0, buf
.get(), kSize
, net::CompletionCallback()));
1611 // This write creates a child entry and writes to it.
1613 parent_entry
->WriteSparseData(
1614 8192, buf
.get(), kSize
, net::CompletionCallback()));
1616 parent_entry
->Close();
1618 // Perform the enumerations.
1620 disk_cache::Entry
* entry
= NULL
;
1622 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
1623 ASSERT_TRUE(entry
!= NULL
);
1625 disk_cache::MemEntryImpl
* mem_entry
=
1626 reinterpret_cast<disk_cache::MemEntryImpl
*>(entry
);
1627 EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry
, mem_entry
->type());
1630 EXPECT_EQ(1, count
);
1633 // Writes |buf_1| to offset and reads it back as |buf_2|.
1634 void VerifySparseIO(disk_cache::Entry
* entry
, int64 offset
,
1635 net::IOBuffer
* buf_1
, int size
, net::IOBuffer
* buf_2
) {
1636 net::TestCompletionCallback cb
;
1638 memset(buf_2
->data(), 0, size
);
1639 int ret
= entry
->ReadSparseData(offset
, buf_2
, size
, cb
.callback());
1640 EXPECT_EQ(0, cb
.GetResult(ret
));
1642 ret
= entry
->WriteSparseData(offset
, buf_1
, size
, cb
.callback());
1643 EXPECT_EQ(size
, cb
.GetResult(ret
));
1645 ret
= entry
->ReadSparseData(offset
, buf_2
, size
, cb
.callback());
1646 EXPECT_EQ(size
, cb
.GetResult(ret
));
1648 EXPECT_EQ(0, memcmp(buf_1
->data(), buf_2
->data(), size
));
1651 // Reads |size| bytes from |entry| at |offset| and verifies that they are the
1652 // same as the content of the provided |buffer|.
1653 void VerifyContentSparseIO(disk_cache::Entry
* entry
, int64 offset
, char* buffer
,
1655 net::TestCompletionCallback cb
;
1657 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(size
));
1658 memset(buf_1
->data(), 0, size
);
1659 int ret
= entry
->ReadSparseData(offset
, buf_1
.get(), size
, cb
.callback());
1660 EXPECT_EQ(size
, cb
.GetResult(ret
));
1661 EXPECT_EQ(0, memcmp(buf_1
->data(), buffer
, size
));
1664 void DiskCacheEntryTest::BasicSparseIO() {
1665 std::string
key("the first key");
1666 disk_cache::Entry
* entry
;
1667 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1669 const int kSize
= 2048;
1670 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1671 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1672 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1674 // Write at offset 0.
1675 VerifySparseIO(entry
, 0, buf_1
.get(), kSize
, buf_2
.get());
1677 // Write at offset 0x400000 (4 MB).
1678 VerifySparseIO(entry
, 0x400000, buf_1
.get(), kSize
, buf_2
.get());
1680 // Write at offset 0x800000000 (32 GB).
1681 VerifySparseIO(entry
, 0x800000000LL
, buf_1
.get(), kSize
, buf_2
.get());
1685 // Check everything again.
1686 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1687 VerifyContentSparseIO(entry
, 0, buf_1
->data(), kSize
);
1688 VerifyContentSparseIO(entry
, 0x400000, buf_1
->data(), kSize
);
1689 VerifyContentSparseIO(entry
, 0x800000000LL
, buf_1
->data(), kSize
);
1693 TEST_F(DiskCacheEntryTest
, BasicSparseIO
) {
1698 TEST_F(DiskCacheEntryTest
, MemoryOnlyBasicSparseIO
) {
1699 SetMemoryOnlyMode();
1704 void DiskCacheEntryTest::HugeSparseIO() {
1705 std::string
key("the first key");
1706 disk_cache::Entry
* entry
;
1707 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1709 // Write 1.2 MB so that we cover multiple entries.
1710 const int kSize
= 1200 * 1024;
1711 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1712 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1713 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1715 // Write at offset 0x20F0000 (33 MB - 64 KB).
1716 VerifySparseIO(entry
, 0x20F0000, buf_1
.get(), kSize
, buf_2
.get());
1720 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1721 VerifyContentSparseIO(entry
, 0x20F0000, buf_1
->data(), kSize
);
1725 TEST_F(DiskCacheEntryTest
, HugeSparseIO
) {
1730 TEST_F(DiskCacheEntryTest
, MemoryOnlyHugeSparseIO
) {
1731 SetMemoryOnlyMode();
1736 void DiskCacheEntryTest::GetAvailableRange() {
1737 std::string
key("the first key");
1738 disk_cache::Entry
* entry
;
1739 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1741 const int kSize
= 16 * 1024;
1742 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1743 CacheTestFillBuffer(buf
->data(), kSize
, false);
1745 // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
1746 EXPECT_EQ(kSize
, WriteSparseData(entry
, 0x20F0000, buf
.get(), kSize
));
1747 EXPECT_EQ(kSize
, WriteSparseData(entry
, 0x20F4400, buf
.get(), kSize
));
1749 // We stop at the first empty block.
1751 net::TestCompletionCallback cb
;
1752 int rv
= entry
->GetAvailableRange(
1753 0x20F0000, kSize
* 2, &start
, cb
.callback());
1754 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1755 EXPECT_EQ(0x20F0000, start
);
1758 rv
= entry
->GetAvailableRange(0, kSize
, &start
, cb
.callback());
1759 EXPECT_EQ(0, cb
.GetResult(rv
));
1760 rv
= entry
->GetAvailableRange(
1761 0x20F0000 - kSize
, kSize
, &start
, cb
.callback());
1762 EXPECT_EQ(0, cb
.GetResult(rv
));
1763 rv
= entry
->GetAvailableRange(0, 0x2100000, &start
, cb
.callback());
1764 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1765 EXPECT_EQ(0x20F0000, start
);
1767 // We should be able to Read based on the results of GetAvailableRange.
1769 rv
= entry
->GetAvailableRange(0x2100000, kSize
, &start
, cb
.callback());
1770 EXPECT_EQ(0, cb
.GetResult(rv
));
1771 rv
= entry
->ReadSparseData(start
, buf
.get(), kSize
, cb
.callback());
1772 EXPECT_EQ(0, cb
.GetResult(rv
));
1775 rv
= entry
->GetAvailableRange(0x20F2000, kSize
, &start
, cb
.callback());
1776 EXPECT_EQ(0x2000, cb
.GetResult(rv
));
1777 EXPECT_EQ(0x20F2000, start
);
1778 EXPECT_EQ(0x2000, ReadSparseData(entry
, start
, buf
.get(), kSize
));
1780 // Make sure that we respect the |len| argument.
1782 rv
= entry
->GetAvailableRange(
1783 0x20F0001 - kSize
, kSize
, &start
, cb
.callback());
1784 EXPECT_EQ(1, cb
.GetResult(rv
));
1785 EXPECT_EQ(0x20F0000, start
);
1790 TEST_F(DiskCacheEntryTest
, GetAvailableRange
) {
1792 GetAvailableRange();
1795 TEST_F(DiskCacheEntryTest
, MemoryOnlyGetAvailableRange
) {
1796 SetMemoryOnlyMode();
1798 GetAvailableRange();
1801 void DiskCacheEntryTest::CouldBeSparse() {
1802 std::string
key("the first key");
1803 disk_cache::Entry
* entry
;
1804 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1806 const int kSize
= 16 * 1024;
1807 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1808 CacheTestFillBuffer(buf
->data(), kSize
, false);
1810 // Write at offset 0x20F0000 (33 MB - 64 KB).
1811 EXPECT_EQ(kSize
, WriteSparseData(entry
, 0x20F0000, buf
.get(), kSize
));
1813 EXPECT_TRUE(entry
->CouldBeSparse());
1816 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1817 EXPECT_TRUE(entry
->CouldBeSparse());
1820 // Now verify a regular entry.
1821 key
.assign("another key");
1822 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1823 EXPECT_FALSE(entry
->CouldBeSparse());
1825 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buf
.get(), kSize
, false));
1826 EXPECT_EQ(kSize
, WriteData(entry
, 1, 0, buf
.get(), kSize
, false));
1827 EXPECT_EQ(kSize
, WriteData(entry
, 2, 0, buf
.get(), kSize
, false));
1829 EXPECT_FALSE(entry
->CouldBeSparse());
1832 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1833 EXPECT_FALSE(entry
->CouldBeSparse());
1837 TEST_F(DiskCacheEntryTest
, CouldBeSparse
) {
1842 TEST_F(DiskCacheEntryTest
, MemoryCouldBeSparse
) {
1843 SetMemoryOnlyMode();
1848 TEST_F(DiskCacheEntryTest
, MemoryOnlyMisalignedSparseIO
) {
1849 SetMemoryOnlyMode();
1852 const int kSize
= 8192;
1853 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1854 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1855 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1857 std::string
key("the first key");
1858 disk_cache::Entry
* entry
;
1859 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1861 // This loop writes back to back starting from offset 0 and 9000.
1862 for (int i
= 0; i
< kSize
; i
+= 1024) {
1863 scoped_refptr
<net::WrappedIOBuffer
> buf_3(
1864 new net::WrappedIOBuffer(buf_1
->data() + i
));
1865 VerifySparseIO(entry
, i
, buf_3
.get(), 1024, buf_2
.get());
1866 VerifySparseIO(entry
, 9000 + i
, buf_3
.get(), 1024, buf_2
.get());
1869 // Make sure we have data written.
1870 VerifyContentSparseIO(entry
, 0, buf_1
->data(), kSize
);
1871 VerifyContentSparseIO(entry
, 9000, buf_1
->data(), kSize
);
1873 // This tests a large write that spans 3 entries from a misaligned offset.
1874 VerifySparseIO(entry
, 20481, buf_1
.get(), 8192, buf_2
.get());
1879 TEST_F(DiskCacheEntryTest
, MemoryOnlyMisalignedGetAvailableRange
) {
1880 SetMemoryOnlyMode();
1883 const int kSize
= 8192;
1884 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1885 CacheTestFillBuffer(buf
->data(), kSize
, false);
1887 disk_cache::Entry
* entry
;
1888 std::string
key("the first key");
1889 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1891 // Writes in the middle of an entry.
1894 entry
->WriteSparseData(0, buf
.get(), 1024, net::CompletionCallback()));
1897 entry
->WriteSparseData(5120, buf
.get(), 1024, net::CompletionCallback()));
1899 entry
->WriteSparseData(
1900 10000, buf
.get(), 1024, net::CompletionCallback()));
1902 // Writes in the middle of an entry and spans 2 child entries.
1904 entry
->WriteSparseData(
1905 50000, buf
.get(), 8192, net::CompletionCallback()));
1908 net::TestCompletionCallback cb
;
1909 // Test that we stop at a discontinuous child at the second block.
1910 int rv
= entry
->GetAvailableRange(0, 10000, &start
, cb
.callback());
1911 EXPECT_EQ(1024, cb
.GetResult(rv
));
1912 EXPECT_EQ(0, start
);
1914 // Test that number of bytes is reported correctly when we start from the
1915 // middle of a filled region.
1916 rv
= entry
->GetAvailableRange(512, 10000, &start
, cb
.callback());
1917 EXPECT_EQ(512, cb
.GetResult(rv
));
1918 EXPECT_EQ(512, start
);
1920 // Test that we found bytes in the child of next block.
1921 rv
= entry
->GetAvailableRange(1024, 10000, &start
, cb
.callback());
1922 EXPECT_EQ(1024, cb
.GetResult(rv
));
1923 EXPECT_EQ(5120, start
);
1925 // Test that the desired length is respected. It starts within a filled
1927 rv
= entry
->GetAvailableRange(5500, 512, &start
, cb
.callback());
1928 EXPECT_EQ(512, cb
.GetResult(rv
));
1929 EXPECT_EQ(5500, start
);
1931 // Test that the desired length is respected. It starts before a filled
1933 rv
= entry
->GetAvailableRange(5000, 620, &start
, cb
.callback());
1934 EXPECT_EQ(500, cb
.GetResult(rv
));
1935 EXPECT_EQ(5120, start
);
1937 // Test that multiple blocks are scanned.
1938 rv
= entry
->GetAvailableRange(40000, 20000, &start
, cb
.callback());
1939 EXPECT_EQ(8192, cb
.GetResult(rv
));
1940 EXPECT_EQ(50000, start
);
1945 void DiskCacheEntryTest::UpdateSparseEntry() {
1946 std::string
key("the first key");
1947 disk_cache::Entry
* entry1
;
1948 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry1
));
1950 const int kSize
= 2048;
1951 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1952 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1953 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1955 // Write at offset 0.
1956 VerifySparseIO(entry1
, 0, buf_1
.get(), kSize
, buf_2
.get());
1959 // Write at offset 2048.
1960 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry1
));
1961 VerifySparseIO(entry1
, 2048, buf_1
.get(), kSize
, buf_2
.get());
1963 disk_cache::Entry
* entry2
;
1964 ASSERT_EQ(net::OK
, CreateEntry("the second key", &entry2
));
1968 FlushQueueForTest();
1969 if (memory_only_
|| simple_cache_mode_
)
1970 EXPECT_EQ(2, cache_
->GetEntryCount());
1972 EXPECT_EQ(3, cache_
->GetEntryCount());
1975 TEST_F(DiskCacheEntryTest
, UpdateSparseEntry
) {
1976 SetCacheType(net::MEDIA_CACHE
);
1978 UpdateSparseEntry();
1981 TEST_F(DiskCacheEntryTest
, MemoryOnlyUpdateSparseEntry
) {
1982 SetMemoryOnlyMode();
1983 SetCacheType(net::MEDIA_CACHE
);
1985 UpdateSparseEntry();
1988 void DiskCacheEntryTest::DoomSparseEntry() {
1989 std::string
key1("the first key");
1990 std::string
key2("the second key");
1991 disk_cache::Entry
*entry1
, *entry2
;
1992 ASSERT_EQ(net::OK
, CreateEntry(key1
, &entry1
));
1993 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry2
));
1995 const int kSize
= 4 * 1024;
1996 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1997 CacheTestFillBuffer(buf
->data(), kSize
, false);
1999 int64 offset
= 1024;
2000 // Write to a bunch of ranges.
2001 for (int i
= 0; i
< 12; i
++) {
2002 EXPECT_EQ(kSize
, WriteSparseData(entry1
, offset
, buf
.get(), kSize
));
2003 // Keep the second map under the default size.
2005 EXPECT_EQ(kSize
, WriteSparseData(entry2
, offset
, buf
.get(), kSize
));
2010 if (memory_only_
|| simple_cache_mode_
)
2011 EXPECT_EQ(2, cache_
->GetEntryCount());
2013 EXPECT_EQ(15, cache_
->GetEntryCount());
2015 // Doom the first entry while it's still open.
2020 // Doom the second entry after it's fully saved.
2021 EXPECT_EQ(net::OK
, DoomEntry(key2
));
2023 // Make sure we do all needed work. This may fail for entry2 if between Close
2024 // and DoomEntry the system decides to remove all traces of the file from the
2025 // system cache so we don't see that there is pending IO.
2026 base::MessageLoop::current()->RunUntilIdle();
2029 EXPECT_EQ(0, cache_
->GetEntryCount());
2031 if (5 == cache_
->GetEntryCount()) {
2032 // Most likely we are waiting for the result of reading the sparse info
2033 // (it's always async on Posix so it is easy to miss). Unfortunately we
2034 // don't have any signal to watch for so we can only wait.
2035 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500));
2036 base::MessageLoop::current()->RunUntilIdle();
2038 EXPECT_EQ(0, cache_
->GetEntryCount());
2042 TEST_F(DiskCacheEntryTest
, DoomSparseEntry
) {
2048 TEST_F(DiskCacheEntryTest
, MemoryOnlyDoomSparseEntry
) {
2049 SetMemoryOnlyMode();
2054 // A CompletionCallback wrapper that deletes the cache from within the callback.
2055 // The way a CompletionCallback works means that all tasks (even new ones)
2056 // are executed by the message loop before returning to the caller so the only
2057 // way to simulate a race is to execute what we want on the callback.
2058 class SparseTestCompletionCallback
: public net::TestCompletionCallback
{
2060 explicit SparseTestCompletionCallback(scoped_ptr
<disk_cache::Backend
> cache
)
2061 : cache_(cache
.Pass()) {
2065 virtual void SetResult(int result
) OVERRIDE
{
2067 TestCompletionCallback::SetResult(result
);
2070 scoped_ptr
<disk_cache::Backend
> cache_
;
2071 DISALLOW_COPY_AND_ASSIGN(SparseTestCompletionCallback
);
2074 // Tests that we don't crash when the backend is deleted while we are working
2075 // deleting the sub-entries of a sparse entry.
2076 TEST_F(DiskCacheEntryTest
, DoomSparseEntry2
) {
2079 std::string
key("the key");
2080 disk_cache::Entry
* entry
;
2081 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2083 const int kSize
= 4 * 1024;
2084 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
2085 CacheTestFillBuffer(buf
->data(), kSize
, false);
2087 int64 offset
= 1024;
2088 // Write to a bunch of ranges.
2089 for (int i
= 0; i
< 12; i
++) {
2091 entry
->WriteSparseData(
2092 offset
, buf
.get(), kSize
, net::CompletionCallback()));
2095 EXPECT_EQ(9, cache_
->GetEntryCount());
2098 disk_cache::Backend
* cache
= cache_
.get();
2099 SparseTestCompletionCallback
cb(cache_
.Pass());
2100 int rv
= cache
->DoomEntry(key
, cb
.callback());
2101 EXPECT_EQ(net::ERR_IO_PENDING
, rv
);
2102 EXPECT_EQ(net::OK
, cb
.WaitForResult());
2105 void DiskCacheEntryTest::PartialSparseEntry() {
2106 std::string
key("the first key");
2107 disk_cache::Entry
* entry
;
2108 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2110 // We should be able to deal with IO that is not aligned to the block size
2111 // of a sparse entry, at least to write a big range without leaving holes.
2112 const int kSize
= 4 * 1024;
2113 const int kSmallSize
= 128;
2114 scoped_refptr
<net::IOBuffer
> buf1(new net::IOBuffer(kSize
));
2115 CacheTestFillBuffer(buf1
->data(), kSize
, false);
2117 // The first write is just to extend the entry. The third write occupies
2118 // a 1KB block partially, it may not be written internally depending on the
2120 EXPECT_EQ(kSize
, WriteSparseData(entry
, 20000, buf1
.get(), kSize
));
2121 EXPECT_EQ(kSize
, WriteSparseData(entry
, 500, buf1
.get(), kSize
));
2122 EXPECT_EQ(kSmallSize
,
2123 WriteSparseData(entry
, 1080321, buf1
.get(), kSmallSize
));
2125 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2127 scoped_refptr
<net::IOBuffer
> buf2(new net::IOBuffer(kSize
));
2128 memset(buf2
->data(), 0, kSize
);
2129 EXPECT_EQ(0, ReadSparseData(entry
, 8000, buf2
.get(), kSize
));
2131 EXPECT_EQ(500, ReadSparseData(entry
, kSize
, buf2
.get(), kSize
));
2132 EXPECT_EQ(0, memcmp(buf2
->data(), buf1
->data() + kSize
- 500, 500));
2133 EXPECT_EQ(0, ReadSparseData(entry
, 0, buf2
.get(), kSize
));
2135 // This read should not change anything.
2136 EXPECT_EQ(96, ReadSparseData(entry
, 24000, buf2
.get(), kSize
));
2137 EXPECT_EQ(500, ReadSparseData(entry
, kSize
, buf2
.get(), kSize
));
2138 EXPECT_EQ(0, ReadSparseData(entry
, 99, buf2
.get(), kSize
));
2142 net::TestCompletionCallback cb
;
2143 if (memory_only_
|| simple_cache_mode_
) {
2144 rv
= entry
->GetAvailableRange(0, 600, &start
, cb
.callback());
2145 EXPECT_EQ(100, cb
.GetResult(rv
));
2146 EXPECT_EQ(500, start
);
2148 rv
= entry
->GetAvailableRange(0, 2048, &start
, cb
.callback());
2149 EXPECT_EQ(1024, cb
.GetResult(rv
));
2150 EXPECT_EQ(1024, start
);
2152 rv
= entry
->GetAvailableRange(kSize
, kSize
, &start
, cb
.callback());
2153 EXPECT_EQ(500, cb
.GetResult(rv
));
2154 EXPECT_EQ(kSize
, start
);
2155 rv
= entry
->GetAvailableRange(20 * 1024, 10000, &start
, cb
.callback());
2156 EXPECT_EQ(3616, cb
.GetResult(rv
));
2157 EXPECT_EQ(20 * 1024, start
);
2159 // 1. Query before a filled 1KB block.
2160 // 2. Query within a filled 1KB block.
2161 // 3. Query beyond a filled 1KB block.
2162 if (memory_only_
|| simple_cache_mode_
) {
2163 rv
= entry
->GetAvailableRange(19400, kSize
, &start
, cb
.callback());
2164 EXPECT_EQ(3496, cb
.GetResult(rv
));
2165 EXPECT_EQ(20000, start
);
2167 rv
= entry
->GetAvailableRange(19400, kSize
, &start
, cb
.callback());
2168 EXPECT_EQ(3016, cb
.GetResult(rv
));
2169 EXPECT_EQ(20480, start
);
2171 rv
= entry
->GetAvailableRange(3073, kSize
, &start
, cb
.callback());
2172 EXPECT_EQ(1523, cb
.GetResult(rv
));
2173 EXPECT_EQ(3073, start
);
2174 rv
= entry
->GetAvailableRange(4600, kSize
, &start
, cb
.callback());
2175 EXPECT_EQ(0, cb
.GetResult(rv
));
2176 EXPECT_EQ(4600, start
);
2178 // Now make another write and verify that there is no hole in between.
2179 EXPECT_EQ(kSize
, WriteSparseData(entry
, 500 + kSize
, buf1
.get(), kSize
));
2180 rv
= entry
->GetAvailableRange(1024, 10000, &start
, cb
.callback());
2181 EXPECT_EQ(7 * 1024 + 500, cb
.GetResult(rv
));
2182 EXPECT_EQ(1024, start
);
2183 EXPECT_EQ(kSize
, ReadSparseData(entry
, kSize
, buf2
.get(), kSize
));
2184 EXPECT_EQ(0, memcmp(buf2
->data(), buf1
->data() + kSize
- 500, 500));
2185 EXPECT_EQ(0, memcmp(buf2
->data() + 500, buf1
->data(), kSize
- 500));
2190 TEST_F(DiskCacheEntryTest
, PartialSparseEntry
) {
2192 PartialSparseEntry();
2195 TEST_F(DiskCacheEntryTest
, MemoryPartialSparseEntry
) {
2196 SetMemoryOnlyMode();
2198 PartialSparseEntry();
2201 // Tests that corrupt sparse children are removed automatically.
2202 TEST_F(DiskCacheEntryTest
, CleanupSparseEntry
) {
2204 std::string
key("the first key");
2205 disk_cache::Entry
* entry
;
2206 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2208 const int kSize
= 4 * 1024;
2209 scoped_refptr
<net::IOBuffer
> buf1(new net::IOBuffer(kSize
));
2210 CacheTestFillBuffer(buf1
->data(), kSize
, false);
2212 const int k1Meg
= 1024 * 1024;
2213 EXPECT_EQ(kSize
, WriteSparseData(entry
, 8192, buf1
.get(), kSize
));
2214 EXPECT_EQ(kSize
, WriteSparseData(entry
, k1Meg
+ 8192, buf1
.get(), kSize
));
2215 EXPECT_EQ(kSize
, WriteSparseData(entry
, 2 * k1Meg
+ 8192, buf1
.get(), kSize
));
2217 EXPECT_EQ(4, cache_
->GetEntryCount());
2221 std::string child_key
[2];
2222 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
2223 ASSERT_TRUE(entry
!= NULL
);
2224 // Writing to an entry will alter the LRU list and invalidate the iterator.
2225 if (entry
->GetKey() != key
&& count
< 2)
2226 child_key
[count
++] = entry
->GetKey();
2229 for (int i
= 0; i
< 2; i
++) {
2230 ASSERT_EQ(net::OK
, OpenEntry(child_key
[i
], &entry
));
2231 // Overwrite the header's magic and signature.
2232 EXPECT_EQ(12, WriteData(entry
, 2, 0, buf1
.get(), 12, false));
2236 EXPECT_EQ(4, cache_
->GetEntryCount());
2237 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2239 // Two children should be gone. One while reading and one while writing.
2240 EXPECT_EQ(0, ReadSparseData(entry
, 2 * k1Meg
+ 8192, buf1
.get(), kSize
));
2241 EXPECT_EQ(kSize
, WriteSparseData(entry
, k1Meg
+ 16384, buf1
.get(), kSize
));
2242 EXPECT_EQ(0, ReadSparseData(entry
, k1Meg
+ 8192, buf1
.get(), kSize
));
2244 // We never touched this one.
2245 EXPECT_EQ(kSize
, ReadSparseData(entry
, 8192, buf1
.get(), kSize
));
2248 // We re-created one of the corrupt children.
2249 EXPECT_EQ(3, cache_
->GetEntryCount());
2252 TEST_F(DiskCacheEntryTest
, CancelSparseIO
) {
2255 std::string
key("the first key");
2256 disk_cache::Entry
* entry
;
2257 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2259 const int kSize
= 40 * 1024;
2260 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
2261 CacheTestFillBuffer(buf
->data(), kSize
, false);
2263 // This will open and write two "real" entries.
2264 net::TestCompletionCallback cb1
, cb2
, cb3
, cb4
, cb5
;
2265 int rv
= entry
->WriteSparseData(
2266 1024 * 1024 - 4096, buf
.get(), kSize
, cb1
.callback());
2267 EXPECT_EQ(net::ERR_IO_PENDING
, rv
);
2270 rv
= entry
->GetAvailableRange(offset
, kSize
, &offset
, cb5
.callback());
2271 rv
= cb5
.GetResult(rv
);
2272 if (!cb1
.have_result()) {
2273 // We may or may not have finished writing to the entry. If we have not,
2274 // we cannot start another operation at this time.
2275 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED
, rv
);
2278 // We cancel the pending operation, and register multiple notifications.
2279 entry
->CancelSparseIO();
2280 EXPECT_EQ(net::ERR_IO_PENDING
, entry
->ReadyForSparseIO(cb2
.callback()));
2281 EXPECT_EQ(net::ERR_IO_PENDING
, entry
->ReadyForSparseIO(cb3
.callback()));
2282 entry
->CancelSparseIO(); // Should be a no op at this point.
2283 EXPECT_EQ(net::ERR_IO_PENDING
, entry
->ReadyForSparseIO(cb4
.callback()));
2285 if (!cb1
.have_result()) {
2286 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED
,
2287 entry
->ReadSparseData(
2288 offset
, buf
.get(), kSize
, net::CompletionCallback()));
2289 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED
,
2290 entry
->WriteSparseData(
2291 offset
, buf
.get(), kSize
, net::CompletionCallback()));
2294 // Now see if we receive all notifications. Note that we should not be able
2295 // to write everything (unless the timing of the system is really weird).
2296 rv
= cb1
.WaitForResult();
2297 EXPECT_TRUE(rv
== 4096 || rv
== kSize
);
2298 EXPECT_EQ(net::OK
, cb2
.WaitForResult());
2299 EXPECT_EQ(net::OK
, cb3
.WaitForResult());
2300 EXPECT_EQ(net::OK
, cb4
.WaitForResult());
2302 rv
= entry
->GetAvailableRange(offset
, kSize
, &offset
, cb5
.callback());
2303 EXPECT_EQ(0, cb5
.GetResult(rv
));
2307 // Tests that we perform sanity checks on an entry's key. Note that there are
2308 // other tests that exercise sanity checks by using saved corrupt files.
2309 TEST_F(DiskCacheEntryTest
, KeySanityCheck
) {
2312 std::string
key("the first key");
2313 disk_cache::Entry
* entry
;
2314 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2316 disk_cache::EntryImpl
* entry_impl
=
2317 static_cast<disk_cache::EntryImpl
*>(entry
);
2318 disk_cache::EntryStore
* store
= entry_impl
->entry()->Data();
2320 // We have reserved space for a short key (one block), let's say that the key
2321 // takes more than one block, and remove the NULLs after the actual key.
2322 store
->key_len
= 800;
2323 memset(store
->key
+ key
.size(), 'k', sizeof(store
->key
) - key
.size());
2324 entry_impl
->entry()->set_modified();
2327 // We have a corrupt entry. Now reload it. We should NOT read beyond the
2328 // allocated buffer here.
2329 ASSERT_NE(net::OK
, OpenEntry(key
, &entry
));
2330 DisableIntegrityCheck();
2333 // The Simple Cache backend requires a few guarantees from the filesystem like
2334 // atomic renaming of recently open files. Those guarantees are not provided in
2335 // general on Windows.
2336 #if defined(OS_POSIX)
2338 TEST_F(DiskCacheEntryTest
, SimpleCacheInternalAsyncIO
) {
2339 SetSimpleCacheMode();
2344 TEST_F(DiskCacheEntryTest
, SimpleCacheExternalAsyncIO
) {
2345 SetSimpleCacheMode();
2350 TEST_F(DiskCacheEntryTest
, SimpleCacheReleaseBuffer
) {
2351 SetSimpleCacheMode();
2353 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2354 EXPECT_EQ(net::OK
, DoomAllEntries());
2359 TEST_F(DiskCacheEntryTest
, SimpleCacheStreamAccess
) {
2360 SetSimpleCacheMode();
2365 TEST_F(DiskCacheEntryTest
, SimpleCacheGetKey
) {
2366 SetSimpleCacheMode();
2371 TEST_F(DiskCacheEntryTest
, SimpleCacheGetTimes
) {
2372 SetSimpleCacheMode();
2374 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2375 EXPECT_EQ(net::OK
, DoomAllEntries());
2380 TEST_F(DiskCacheEntryTest
, SimpleCacheGrowData
) {
2381 SetSimpleCacheMode();
2383 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2384 EXPECT_EQ(net::OK
, DoomAllEntries());
2389 TEST_F(DiskCacheEntryTest
, SimpleCacheTruncateData
) {
2390 SetSimpleCacheMode();
2392 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2393 EXPECT_EQ(net::OK
, DoomAllEntries());
2398 TEST_F(DiskCacheEntryTest
, SimpleCacheZeroLengthIO
) {
2399 SetSimpleCacheMode();
2401 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2402 EXPECT_EQ(net::OK
, DoomAllEntries());
2407 TEST_F(DiskCacheEntryTest
, SimpleCacheSizeAtCreate
) {
2408 SetSimpleCacheMode();
2413 TEST_F(DiskCacheEntryTest
, SimpleCacheReuseExternalEntry
) {
2414 SetSimpleCacheMode();
2415 SetMaxSize(200 * 1024);
2417 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2418 EXPECT_EQ(net::OK
, DoomAllEntries());
2419 ReuseEntry(20 * 1024, i
);
2423 TEST_F(DiskCacheEntryTest
, SimpleCacheReuseInternalEntry
) {
2424 SetSimpleCacheMode();
2425 SetMaxSize(100 * 1024);
2427 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2428 EXPECT_EQ(net::OK
, DoomAllEntries());
2429 ReuseEntry(10 * 1024, i
);
2433 TEST_F(DiskCacheEntryTest
, SimpleCacheSizeChanges
) {
2434 SetSimpleCacheMode();
2436 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2437 EXPECT_EQ(net::OK
, DoomAllEntries());
2442 TEST_F(DiskCacheEntryTest
, SimpleCacheInvalidData
) {
2443 SetSimpleCacheMode();
2445 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2446 EXPECT_EQ(net::OK
, DoomAllEntries());
2451 TEST_F(DiskCacheEntryTest
, SimpleCacheReadWriteDestroyBuffer
) {
2452 // Proving that the test works well with optimistic operations enabled is
2453 // subtle, instead run only in APP_CACHE mode to disable optimistic
2454 // operations. Stream 0 always uses optimistic operations, so the test is not
2456 SetCacheType(net::APP_CACHE
);
2457 SetSimpleCacheMode();
2459 for (int i
= 1; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
2460 EXPECT_EQ(net::OK
, DoomAllEntries());
2461 ReadWriteDestroyBuffer(i
);
2465 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomEntry
) {
2466 SetSimpleCacheMode();
2471 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomEntryNextToOpenEntry
) {
2472 SetSimpleCacheMode();
2474 DoomEntryNextToOpenEntry();
2477 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomedEntry
) {
2478 SetSimpleCacheMode();
2480 // Stream 2 is excluded because the implementation does not support writing to
2481 // it on a doomed entry, if it was previously lazily omitted.
2482 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
- 1; ++i
) {
2483 EXPECT_EQ(net::OK
, DoomAllEntries());
2488 // Creates an entry with corrupted last byte in stream 0.
2489 // Requires SimpleCacheMode.
2490 bool DiskCacheEntryTest::SimpleCacheMakeBadChecksumEntry(const std::string
& key
,
2492 disk_cache::Entry
* entry
= NULL
;
2494 if (CreateEntry(key
, &entry
) != net::OK
|| !entry
) {
2495 LOG(ERROR
) << "Could not create entry";
2499 const char data
[] = "this is very good data";
2500 const int kDataSize
= arraysize(data
);
2501 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kDataSize
));
2502 base::strlcpy(buffer
->data(), data
, kDataSize
);
2504 EXPECT_EQ(kDataSize
, WriteData(entry
, 1, 0, buffer
.get(), kDataSize
, false));
2508 // Corrupt the last byte of the data.
2509 base::FilePath entry_file0_path
= cache_path_
.AppendASCII(
2510 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, 0));
2511 int flags
= base::PLATFORM_FILE_WRITE
| base::PLATFORM_FILE_OPEN
;
2512 base::PlatformFile entry_file0
=
2513 base::CreatePlatformFile(entry_file0_path
, flags
, NULL
, NULL
);
2514 if (entry_file0
== base::kInvalidPlatformFileValue
)
2518 sizeof(disk_cache::SimpleFileHeader
) + key
.size() + kDataSize
- 2;
2519 EXPECT_EQ(1, base::WritePlatformFile(entry_file0
, file_offset
, "X", 1));
2520 if (!base::ClosePlatformFile(entry_file0
))
2522 *data_size
= kDataSize
;
2526 // Tests that the simple cache can detect entries that have bad data.
2527 TEST_F(DiskCacheEntryTest
, SimpleCacheBadChecksum
) {
2528 SetSimpleCacheMode();
2531 const char key
[] = "the first key";
2533 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key
, &size_unused
));
2535 disk_cache::Entry
* entry
= NULL
;
2538 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2539 ScopedEntryPtr
entry_closer(entry
);
2541 const int kReadBufferSize
= 200;
2542 EXPECT_GE(kReadBufferSize
, entry
->GetDataSize(1));
2543 scoped_refptr
<net::IOBuffer
> read_buffer(new net::IOBuffer(kReadBufferSize
));
2544 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH
,
2545 ReadData(entry
, 1, 0, read_buffer
.get(), kReadBufferSize
));
2548 // Tests that an entry that has had an IO error occur can still be Doomed().
2549 TEST_F(DiskCacheEntryTest
, SimpleCacheErrorThenDoom
) {
2550 SetSimpleCacheMode();
2553 const char key
[] = "the first key";
2555 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key
, &size_unused
));
2557 disk_cache::Entry
* entry
= NULL
;
2559 // Open the entry, forcing an IO error.
2560 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2561 ScopedEntryPtr
entry_closer(entry
);
2563 const int kReadBufferSize
= 200;
2564 EXPECT_GE(kReadBufferSize
, entry
->GetDataSize(1));
2565 scoped_refptr
<net::IOBuffer
> read_buffer(new net::IOBuffer(kReadBufferSize
));
2566 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH
,
2567 ReadData(entry
, 1, 0, read_buffer
.get(), kReadBufferSize
));
2569 entry
->Doom(); // Should not crash.
2572 bool TruncatePath(const base::FilePath
& file_path
, int64 length
) {
2573 const int flags
= base::PLATFORM_FILE_WRITE
| base::PLATFORM_FILE_OPEN
;
2574 base::PlatformFile file
=
2575 base::CreatePlatformFile(file_path
, flags
, NULL
, NULL
);
2576 if (base::kInvalidPlatformFileValue
== file
)
2578 const bool result
= base::TruncatePlatformFile(file
, length
);
2579 base::ClosePlatformFile(file
);
2583 TEST_F(DiskCacheEntryTest
, SimpleCacheNoEOF
) {
2584 SetSimpleCacheMode();
2587 const char key
[] = "the first key";
2589 disk_cache::Entry
* entry
= NULL
;
2590 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2591 disk_cache::Entry
* null
= NULL
;
2592 EXPECT_NE(null
, entry
);
2596 // Force the entry to flush to disk, so subsequent platform file operations
2598 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2602 // Truncate the file such that the length isn't sufficient to have an EOF
2604 int kTruncationBytes
= -implicit_cast
<int>(sizeof(disk_cache::SimpleFileEOF
));
2605 const base::FilePath entry_path
= cache_path_
.AppendASCII(
2606 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, 0));
2607 const int64 invalid_size
=
2608 disk_cache::simple_util::GetFileSizeFromKeyAndDataSize(key
,
2610 EXPECT_TRUE(TruncatePath(entry_path
, invalid_size
));
2611 EXPECT_EQ(net::ERR_FAILED
, OpenEntry(key
, &entry
));
2612 DisableIntegrityCheck();
2615 TEST_F(DiskCacheEntryTest
, SimpleCacheNonOptimisticOperationsBasic
) {
2617 // Create, Write, Read, Close.
2618 SetCacheType(net::APP_CACHE
); // APP_CACHE doesn't use optimistic operations.
2619 SetSimpleCacheMode();
2621 disk_cache::Entry
* const null_entry
= NULL
;
2623 disk_cache::Entry
* entry
= NULL
;
2624 EXPECT_EQ(net::OK
, CreateEntry("my key", &entry
));
2625 ASSERT_NE(null_entry
, entry
);
2626 ScopedEntryPtr
entry_closer(entry
);
2628 const int kBufferSize
= 10;
2629 scoped_refptr
<net::IOBufferWithSize
> write_buffer(
2630 new net::IOBufferWithSize(kBufferSize
));
2631 CacheTestFillBuffer(write_buffer
->data(), write_buffer
->size(), false);
2633 write_buffer
->size(),
2634 WriteData(entry
, 1, 0, write_buffer
.get(), write_buffer
->size(), false));
2636 scoped_refptr
<net::IOBufferWithSize
> read_buffer(
2637 new net::IOBufferWithSize(kBufferSize
));
2638 EXPECT_EQ(read_buffer
->size(),
2639 ReadData(entry
, 1, 0, read_buffer
.get(), read_buffer
->size()));
2642 TEST_F(DiskCacheEntryTest
, SimpleCacheNonOptimisticOperationsDontBlock
) {
2644 // Create, Write, Close.
2645 SetCacheType(net::APP_CACHE
); // APP_CACHE doesn't use optimistic operations.
2646 SetSimpleCacheMode();
2648 disk_cache::Entry
* const null_entry
= NULL
;
2650 MessageLoopHelper helper
;
2651 CallbackTest
create_callback(&helper
, false);
2653 int expected_callback_runs
= 0;
2654 const int kBufferSize
= 10;
2655 scoped_refptr
<net::IOBufferWithSize
> write_buffer(
2656 new net::IOBufferWithSize(kBufferSize
));
2658 disk_cache::Entry
* entry
= NULL
;
2659 EXPECT_EQ(net::OK
, CreateEntry("my key", &entry
));
2660 ASSERT_NE(null_entry
, entry
);
2661 ScopedEntryPtr
entry_closer(entry
);
2663 CacheTestFillBuffer(write_buffer
->data(), write_buffer
->size(), false);
2664 CallbackTest
write_callback(&helper
, false);
2665 int ret
= entry
->WriteData(
2669 write_buffer
->size(),
2670 base::Bind(&CallbackTest::Run
, base::Unretained(&write_callback
)),
2672 ASSERT_EQ(net::ERR_IO_PENDING
, ret
);
2673 helper
.WaitUntilCacheIoFinished(++expected_callback_runs
);
2676 TEST_F(DiskCacheEntryTest
,
2677 SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting
) {
2679 // Create, Write, Read, Close.
2680 SetCacheType(net::APP_CACHE
); // APP_CACHE doesn't use optimistic operations.
2681 SetSimpleCacheMode();
2683 disk_cache::Entry
* const null_entry
= NULL
;
2684 MessageLoopHelper helper
;
2686 disk_cache::Entry
* entry
= NULL
;
2687 // Note that |entry| is only set once CreateEntry() completed which is why we
2688 // have to wait (i.e. use the helper CreateEntry() function).
2689 EXPECT_EQ(net::OK
, CreateEntry("my key", &entry
));
2690 ASSERT_NE(null_entry
, entry
);
2691 ScopedEntryPtr
entry_closer(entry
);
2693 const int kBufferSize
= 10;
2694 scoped_refptr
<net::IOBufferWithSize
> write_buffer(
2695 new net::IOBufferWithSize(kBufferSize
));
2696 CacheTestFillBuffer(write_buffer
->data(), write_buffer
->size(), false);
2697 CallbackTest
write_callback(&helper
, false);
2698 int ret
= entry
->WriteData(
2702 write_buffer
->size(),
2703 base::Bind(&CallbackTest::Run
, base::Unretained(&write_callback
)),
2705 EXPECT_EQ(net::ERR_IO_PENDING
, ret
);
2706 int expected_callback_runs
= 1;
2708 scoped_refptr
<net::IOBufferWithSize
> read_buffer(
2709 new net::IOBufferWithSize(kBufferSize
));
2710 CallbackTest
read_callback(&helper
, false);
2711 ret
= entry
->ReadData(
2715 read_buffer
->size(),
2716 base::Bind(&CallbackTest::Run
, base::Unretained(&read_callback
)));
2717 EXPECT_EQ(net::ERR_IO_PENDING
, ret
);
2718 ++expected_callback_runs
;
2720 helper
.WaitUntilCacheIoFinished(expected_callback_runs
);
2721 ASSERT_EQ(read_buffer
->size(), write_buffer
->size());
2724 memcmp(read_buffer
->data(), write_buffer
->data(), read_buffer
->size()));
2727 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic
) {
2729 // Create, Write, Read, Write, Read, Close.
2730 SetSimpleCacheMode();
2732 disk_cache::Entry
* null
= NULL
;
2733 const char key
[] = "the first key";
2735 MessageLoopHelper helper
;
2736 CallbackTest
callback1(&helper
, false);
2737 CallbackTest
callback2(&helper
, false);
2738 CallbackTest
callback3(&helper
, false);
2739 CallbackTest
callback4(&helper
, false);
2740 CallbackTest
callback5(&helper
, false);
2743 const int kSize1
= 10;
2744 const int kSize2
= 20;
2745 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
2746 scoped_refptr
<net::IOBuffer
> buffer1_read(new net::IOBuffer(kSize1
));
2747 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
2748 scoped_refptr
<net::IOBuffer
> buffer2_read(new net::IOBuffer(kSize2
));
2749 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
2750 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
2752 disk_cache::Entry
* entry
= NULL
;
2753 // Create is optimistic, must return OK.
2755 cache_
->CreateEntry(key
, &entry
,
2756 base::Bind(&CallbackTest::Run
,
2757 base::Unretained(&callback1
))));
2758 EXPECT_NE(null
, entry
);
2759 ScopedEntryPtr
entry_closer(entry
);
2761 // This write may or may not be optimistic (it depends if the previous
2762 // optimistic create already finished by the time we call the write here).
2763 int ret
= entry
->WriteData(
2768 base::Bind(&CallbackTest::Run
, base::Unretained(&callback2
)),
2770 EXPECT_TRUE(kSize1
== ret
|| net::ERR_IO_PENDING
== ret
);
2771 if (net::ERR_IO_PENDING
== ret
)
2774 // This Read must not be optimistic, since we don't support that yet.
2775 EXPECT_EQ(net::ERR_IO_PENDING
,
2781 base::Bind(&CallbackTest::Run
, base::Unretained(&callback3
))));
2783 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
2784 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read
->data(), kSize1
));
2786 // At this point after waiting, the pending operations queue on the entry
2787 // should be empty, so the next Write operation must run as optimistic.
2794 base::Bind(&CallbackTest::Run
, base::Unretained(&callback4
)),
2797 // Lets do another read so we block until both the write and the read
2798 // operation finishes and we can then test for HasOneRef() below.
2799 EXPECT_EQ(net::ERR_IO_PENDING
,
2805 base::Bind(&CallbackTest::Run
, base::Unretained(&callback5
))));
2808 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
2809 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer2_read
->data(), kSize2
));
2811 // Check that we are not leaking.
2812 EXPECT_NE(entry
, null
);
2814 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
2817 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic2
) {
2819 // Create, Open, Close, Close.
2820 SetSimpleCacheMode();
2822 disk_cache::Entry
* null
= NULL
;
2823 const char key
[] = "the first key";
2825 MessageLoopHelper helper
;
2826 CallbackTest
callback1(&helper
, false);
2827 CallbackTest
callback2(&helper
, false);
2829 disk_cache::Entry
* entry
= NULL
;
2831 cache_
->CreateEntry(key
, &entry
,
2832 base::Bind(&CallbackTest::Run
,
2833 base::Unretained(&callback1
))));
2834 EXPECT_NE(null
, entry
);
2835 ScopedEntryPtr
entry_closer(entry
);
2837 disk_cache::Entry
* entry2
= NULL
;
2838 ASSERT_EQ(net::ERR_IO_PENDING
,
2839 cache_
->OpenEntry(key
, &entry2
,
2840 base::Bind(&CallbackTest::Run
,
2841 base::Unretained(&callback2
))));
2842 ASSERT_TRUE(helper
.WaitUntilCacheIoFinished(1));
2844 EXPECT_NE(null
, entry2
);
2845 EXPECT_EQ(entry
, entry2
);
2847 // We have to call close twice, since we called create and open above.
2850 // Check that we are not leaking.
2852 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
2855 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic3
) {
2857 // Create, Close, Open, Close.
2858 SetSimpleCacheMode();
2860 disk_cache::Entry
* null
= NULL
;
2861 const char key
[] = "the first key";
2863 disk_cache::Entry
* entry
= NULL
;
2865 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
2866 EXPECT_NE(null
, entry
);
2869 net::TestCompletionCallback cb
;
2870 disk_cache::Entry
* entry2
= NULL
;
2871 ASSERT_EQ(net::ERR_IO_PENDING
,
2872 cache_
->OpenEntry(key
, &entry2
, cb
.callback()));
2873 ASSERT_EQ(net::OK
, cb
.GetResult(net::ERR_IO_PENDING
));
2874 ScopedEntryPtr
entry_closer(entry2
);
2876 EXPECT_NE(null
, entry2
);
2877 EXPECT_EQ(entry
, entry2
);
2879 // Check that we are not leaking.
2881 static_cast<disk_cache::SimpleEntryImpl
*>(entry2
)->HasOneRef());
2884 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic4
) {
2886 // Create, Close, Write, Open, Open, Close, Write, Read, Close.
2887 SetSimpleCacheMode();
2889 disk_cache::Entry
* null
= NULL
;
2890 const char key
[] = "the first key";
2892 net::TestCompletionCallback cb
;
2893 const int kSize1
= 10;
2894 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
2895 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
2896 disk_cache::Entry
* entry
= NULL
;
2899 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
2900 EXPECT_NE(null
, entry
);
2903 // Lets do a Write so we block until both the Close and the Write
2904 // operation finishes. Write must fail since we are writing in a closed entry.
2906 net::ERR_IO_PENDING
,
2907 entry
->WriteData(1, 0, buffer1
.get(), kSize1
, cb
.callback(), false));
2908 EXPECT_EQ(net::ERR_FAILED
, cb
.GetResult(net::ERR_IO_PENDING
));
2910 // Finish running the pending tasks so that we fully complete the close
2911 // operation and destroy the entry object.
2912 base::MessageLoop::current()->RunUntilIdle();
2914 // At this point the |entry| must have been destroyed, and called
2915 // RemoveSelfFromBackend().
2916 disk_cache::Entry
* entry2
= NULL
;
2917 ASSERT_EQ(net::ERR_IO_PENDING
,
2918 cache_
->OpenEntry(key
, &entry2
, cb
.callback()));
2919 ASSERT_EQ(net::OK
, cb
.GetResult(net::ERR_IO_PENDING
));
2920 EXPECT_NE(null
, entry2
);
2922 disk_cache::Entry
* entry3
= NULL
;
2923 ASSERT_EQ(net::ERR_IO_PENDING
,
2924 cache_
->OpenEntry(key
, &entry3
, cb
.callback()));
2925 ASSERT_EQ(net::OK
, cb
.GetResult(net::ERR_IO_PENDING
));
2926 EXPECT_NE(null
, entry3
);
2927 EXPECT_EQ(entry2
, entry3
);
2930 // The previous Close doesn't actually closes the entry since we opened it
2931 // twice, so the next Write operation must succeed and it must be able to
2932 // perform it optimistically, since there is no operation running on this
2936 1, 0, buffer1
.get(), kSize1
, net::CompletionCallback(), false));
2938 // Lets do another read so we block until both the write and the read
2939 // operation finishes and we can then test for HasOneRef() below.
2940 EXPECT_EQ(net::ERR_IO_PENDING
,
2941 entry2
->ReadData(1, 0, buffer1
.get(), kSize1
, cb
.callback()));
2942 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
2944 // Check that we are not leaking.
2946 static_cast<disk_cache::SimpleEntryImpl
*>(entry2
)->HasOneRef());
2950 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic5
) {
2952 // Create, Doom, Write, Read, Close.
2953 SetSimpleCacheMode();
2955 disk_cache::Entry
* null
= NULL
;
2956 const char key
[] = "the first key";
2958 net::TestCompletionCallback cb
;
2959 const int kSize1
= 10;
2960 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
2961 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
2962 disk_cache::Entry
* entry
= NULL
;
2965 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
2966 EXPECT_NE(null
, entry
);
2967 ScopedEntryPtr
entry_closer(entry
);
2971 net::ERR_IO_PENDING
,
2972 entry
->WriteData(1, 0, buffer1
.get(), kSize1
, cb
.callback(), false));
2973 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
2975 EXPECT_EQ(net::ERR_IO_PENDING
,
2976 entry
->ReadData(1, 0, buffer1
.get(), kSize1
, cb
.callback()));
2977 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
2979 // Check that we are not leaking.
2981 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
2984 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic6
) {
2986 // Create, Write, Doom, Doom, Read, Doom, Close.
2987 SetSimpleCacheMode();
2989 disk_cache::Entry
* null
= NULL
;
2990 const char key
[] = "the first key";
2992 net::TestCompletionCallback cb
;
2993 const int kSize1
= 10;
2994 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
2995 scoped_refptr
<net::IOBuffer
> buffer1_read(new net::IOBuffer(kSize1
));
2996 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
2997 disk_cache::Entry
* entry
= NULL
;
3000 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
3001 EXPECT_NE(null
, entry
);
3002 ScopedEntryPtr
entry_closer(entry
);
3005 net::ERR_IO_PENDING
,
3006 entry
->WriteData(1, 0, buffer1
.get(), kSize1
, cb
.callback(), false));
3007 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
3012 // This Read must not be optimistic, since we don't support that yet.
3013 EXPECT_EQ(net::ERR_IO_PENDING
,
3014 entry
->ReadData(1, 0, buffer1_read
.get(), kSize1
, cb
.callback()));
3015 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
3016 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read
->data(), kSize1
));
3021 // Confirm that IO buffers are not referenced by the Simple Cache after a write
3023 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimisticWriteReleases
) {
3024 SetSimpleCacheMode();
3027 const char key
[] = "the first key";
3028 disk_cache::Entry
* entry
= NULL
;
3030 // First, an optimistic create.
3032 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
3034 ScopedEntryPtr
entry_closer(entry
);
3036 const int kWriteSize
= 512;
3037 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kWriteSize
));
3038 EXPECT_TRUE(buffer1
->HasOneRef());
3039 CacheTestFillBuffer(buffer1
->data(), kWriteSize
, false);
3041 // An optimistic write happens only when there is an empty queue of pending
3042 // operations. To ensure the queue is empty, we issue a write and wait until
3044 EXPECT_EQ(kWriteSize
,
3045 WriteData(entry
, 1, 0, buffer1
.get(), kWriteSize
, false));
3046 EXPECT_TRUE(buffer1
->HasOneRef());
3048 // Finally, we should perform an optimistic write and confirm that all
3049 // references to the IO buffer have been released.
3053 1, 0, buffer1
.get(), kWriteSize
, net::CompletionCallback(), false));
3054 EXPECT_TRUE(buffer1
->HasOneRef());
3057 TEST_F(DiskCacheEntryTest
, SimpleCacheCreateDoomRace
) {
3059 // Create, Doom, Write, Close, Check files are not on disk anymore.
3060 SetSimpleCacheMode();
3062 disk_cache::Entry
* null
= NULL
;
3063 const char key
[] = "the first key";
3065 net::TestCompletionCallback cb
;
3066 const int kSize1
= 10;
3067 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
3068 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
3069 disk_cache::Entry
* entry
= NULL
;
3072 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
3073 EXPECT_NE(null
, entry
);
3075 EXPECT_EQ(net::ERR_IO_PENDING
, cache_
->DoomEntry(key
, cb
.callback()));
3076 EXPECT_EQ(net::OK
, cb
.GetResult(net::ERR_IO_PENDING
));
3080 entry
->WriteData(0, 0, buffer1
.get(), kSize1
, cb
.callback(), false));
3084 // Finish running the pending tasks so that we fully complete the close
3085 // operation and destroy the entry object.
3086 base::MessageLoop::current()->RunUntilIdle();
3088 for (int i
= 0; i
< disk_cache::kSimpleEntryFileCount
; ++i
) {
3089 base::FilePath entry_file_path
= cache_path_
.AppendASCII(
3090 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, i
));
3091 base::File::Info info
;
3092 EXPECT_FALSE(base::GetFileInfo(entry_file_path
, &info
));
3096 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomCreateRace
) {
3097 // This test runs as APP_CACHE to make operations more synchronous. Test
3099 // Create, Doom, Create.
3100 SetCacheType(net::APP_CACHE
);
3101 SetSimpleCacheMode();
3103 disk_cache::Entry
* null
= NULL
;
3104 const char key
[] = "the first key";
3106 net::TestCompletionCallback create_callback
;
3108 disk_cache::Entry
* entry1
= NULL
;
3110 create_callback
.GetResult(
3111 cache_
->CreateEntry(key
, &entry1
, create_callback
.callback())));
3112 ScopedEntryPtr
entry1_closer(entry1
);
3113 EXPECT_NE(null
, entry1
);
3115 net::TestCompletionCallback doom_callback
;
3116 EXPECT_EQ(net::ERR_IO_PENDING
,
3117 cache_
->DoomEntry(key
, doom_callback
.callback()));
3119 disk_cache::Entry
* entry2
= NULL
;
3121 create_callback
.GetResult(
3122 cache_
->CreateEntry(key
, &entry2
, create_callback
.callback())));
3123 ScopedEntryPtr
entry2_closer(entry2
);
3124 EXPECT_EQ(net::OK
, doom_callback
.GetResult(net::ERR_IO_PENDING
));
3127 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomDoom
) {
3129 // Create, Doom, Create, Doom (1st entry), Open.
3130 SetSimpleCacheMode();
3132 disk_cache::Entry
* null
= NULL
;
3134 const char key
[] = "the first key";
3136 disk_cache::Entry
* entry1
= NULL
;
3137 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry1
));
3138 ScopedEntryPtr
entry1_closer(entry1
);
3139 EXPECT_NE(null
, entry1
);
3141 EXPECT_EQ(net::OK
, DoomEntry(key
));
3143 disk_cache::Entry
* entry2
= NULL
;
3144 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry2
));
3145 ScopedEntryPtr
entry2_closer(entry2
);
3146 EXPECT_NE(null
, entry2
);
3148 // Redundantly dooming entry1 should not delete entry2.
3149 disk_cache::SimpleEntryImpl
* simple_entry1
=
3150 static_cast<disk_cache::SimpleEntryImpl
*>(entry1
);
3151 net::TestCompletionCallback cb
;
3153 cb
.GetResult(simple_entry1
->DoomEntry(cb
.callback())));
3155 disk_cache::Entry
* entry3
= NULL
;
3156 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry3
));
3157 ScopedEntryPtr
entry3_closer(entry3
);
3158 EXPECT_NE(null
, entry3
);
3161 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomCreateDoom
) {
3163 // Create, Doom, Create, Doom.
3164 SetSimpleCacheMode();
3167 disk_cache::Entry
* null
= NULL
;
3169 const char key
[] = "the first key";
3171 disk_cache::Entry
* entry1
= NULL
;
3172 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry1
));
3173 ScopedEntryPtr
entry1_closer(entry1
);
3174 EXPECT_NE(null
, entry1
);
3178 disk_cache::Entry
* entry2
= NULL
;
3179 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry2
));
3180 ScopedEntryPtr
entry2_closer(entry2
);
3181 EXPECT_NE(null
, entry2
);
3185 // This test passes if it doesn't crash.
3188 // Checks that an optimistic Create would fail later on a racing Open.
3189 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimisticCreateFailsOnOpen
) {
3190 SetSimpleCacheMode();
3193 // Create a corrupt file in place of a future entry. Optimistic create should
3194 // initially succeed, but realize later that creation failed.
3195 const std::string key
= "the key";
3196 net::TestCompletionCallback cb
;
3197 disk_cache::Entry
* entry
= NULL
;
3198 disk_cache::Entry
* entry2
= NULL
;
3200 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3202 EXPECT_EQ(net::OK
, cache_
->CreateEntry(key
, &entry
, cb
.callback()));
3204 ScopedEntryPtr
entry_closer(entry
);
3205 ASSERT_NE(net::OK
, OpenEntry(key
, &entry2
));
3207 // Check that we are not leaking.
3209 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
3211 DisableIntegrityCheck();
3214 // Tests that old entries are evicted while new entries remain in the index.
3215 // This test relies on non-mandatory properties of the simple Cache Backend:
3216 // LRU eviction, specific values of high-watermark and low-watermark etc.
3217 // When changing the eviction algorithm, the test will have to be re-engineered.
3218 TEST_F(DiskCacheEntryTest
, SimpleCacheEvictOldEntries
) {
3219 const int kMaxSize
= 200 * 1024;
3220 const int kWriteSize
= kMaxSize
/ 10;
3221 const int kNumExtraEntries
= 12;
3222 SetSimpleCacheMode();
3223 SetMaxSize(kMaxSize
);
3226 std::string
key1("the first key");
3227 disk_cache::Entry
* entry
;
3228 ASSERT_EQ(net::OK
, CreateEntry(key1
, &entry
));
3229 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kWriteSize
));
3230 CacheTestFillBuffer(buffer
->data(), kWriteSize
, false);
3231 EXPECT_EQ(kWriteSize
,
3232 WriteData(entry
, 1, 0, buffer
.get(), kWriteSize
, false));
3236 std::string
key2("the key prefix");
3237 for (int i
= 0; i
< kNumExtraEntries
; i
++) {
3238 ASSERT_EQ(net::OK
, CreateEntry(key2
+ base::StringPrintf("%d", i
), &entry
));
3239 ScopedEntryPtr
entry_closer(entry
);
3240 EXPECT_EQ(kWriteSize
,
3241 WriteData(entry
, 1, 0, buffer
.get(), kWriteSize
, false));
3244 // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using
3245 // the internal knowledge about |SimpleBackendImpl|.
3246 ASSERT_NE(net::OK
, OpenEntry(key1
, &entry
))
3247 << "Should have evicted the old entry";
3248 for (int i
= 0; i
< 2; i
++) {
3249 int entry_no
= kNumExtraEntries
- i
- 1;
3250 // Generally there is no guarantee that at this point the backround eviction
3251 // is finished. We are testing the positive case, i.e. when the eviction
3252 // never reaches this entry, should be non-flaky.
3253 ASSERT_EQ(net::OK
, OpenEntry(key2
+ base::StringPrintf("%d", entry_no
),
3255 << "Should not have evicted fresh entry " << entry_no
;
3260 // Tests that if a read and a following in-flight truncate are both in progress
3261 // simultaniously that they both can occur successfully. See
3262 // http://crbug.com/239223
3263 TEST_F(DiskCacheEntryTest
, SimpleCacheInFlightTruncate
) {
3264 SetSimpleCacheMode();
3267 const char key
[] = "the first key";
3269 const int kBufferSize
= 1024;
3270 scoped_refptr
<net::IOBuffer
> write_buffer(new net::IOBuffer(kBufferSize
));
3271 CacheTestFillBuffer(write_buffer
->data(), kBufferSize
, false);
3273 disk_cache::Entry
* entry
= NULL
;
3274 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3276 EXPECT_EQ(kBufferSize
,
3277 WriteData(entry
, 1, 0, write_buffer
.get(), kBufferSize
, false));
3281 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3282 ScopedEntryPtr
entry_closer(entry
);
3284 MessageLoopHelper helper
;
3287 // Make a short read.
3288 const int kReadBufferSize
= 512;
3289 scoped_refptr
<net::IOBuffer
> read_buffer(new net::IOBuffer(kReadBufferSize
));
3290 CallbackTest
read_callback(&helper
, false);
3291 EXPECT_EQ(net::ERR_IO_PENDING
,
3296 base::Bind(&CallbackTest::Run
,
3297 base::Unretained(&read_callback
))));
3300 // Truncate the entry to the length of that read.
3301 scoped_refptr
<net::IOBuffer
>
3302 truncate_buffer(new net::IOBuffer(kReadBufferSize
));
3303 CacheTestFillBuffer(truncate_buffer
->data(), kReadBufferSize
, false);
3304 CallbackTest
truncate_callback(&helper
, false);
3305 EXPECT_EQ(net::ERR_IO_PENDING
,
3308 truncate_buffer
.get(),
3310 base::Bind(&CallbackTest::Run
,
3311 base::Unretained(&truncate_callback
)),
3315 // Wait for both the read and truncation to finish, and confirm that both
3317 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
3318 EXPECT_EQ(kReadBufferSize
, read_callback
.last_result());
3319 EXPECT_EQ(kReadBufferSize
, truncate_callback
.last_result());
3321 memcmp(write_buffer
->data(), read_buffer
->data(), kReadBufferSize
));
3324 // Tests that if a write and a read dependant on it are both in flight
3325 // simultaneiously that they both can complete successfully without erroneous
3326 // early returns. See http://crbug.com/239223
3327 TEST_F(DiskCacheEntryTest
, SimpleCacheInFlightRead
) {
3328 SetSimpleCacheMode();
3331 const char key
[] = "the first key";
3332 disk_cache::Entry
* entry
= NULL
;
3334 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
3335 ScopedEntryPtr
entry_closer(entry
);
3337 const int kBufferSize
= 1024;
3338 scoped_refptr
<net::IOBuffer
> write_buffer(new net::IOBuffer(kBufferSize
));
3339 CacheTestFillBuffer(write_buffer
->data(), kBufferSize
, false);
3341 MessageLoopHelper helper
;
3344 CallbackTest
write_callback(&helper
, false);
3345 EXPECT_EQ(net::ERR_IO_PENDING
,
3350 base::Bind(&CallbackTest::Run
,
3351 base::Unretained(&write_callback
)),
3355 scoped_refptr
<net::IOBuffer
> read_buffer(new net::IOBuffer(kBufferSize
));
3356 CallbackTest
read_callback(&helper
, false);
3357 EXPECT_EQ(net::ERR_IO_PENDING
,
3362 base::Bind(&CallbackTest::Run
,
3363 base::Unretained(&read_callback
))));
3366 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
3367 EXPECT_EQ(kBufferSize
, write_callback
.last_result());
3368 EXPECT_EQ(kBufferSize
, read_callback
.last_result());
3369 EXPECT_EQ(0, memcmp(write_buffer
->data(), read_buffer
->data(), kBufferSize
));
3372 TEST_F(DiskCacheEntryTest
, SimpleCacheOpenCreateRaceWithNoIndex
) {
3373 SetSimpleCacheMode();
3374 DisableSimpleCacheWaitForIndex();
3375 DisableIntegrityCheck();
3378 // Assume the index is not initialized, which is likely, since we are blocking
3379 // the IO thread from executing the index finalization step.
3380 disk_cache::Entry
* entry1
;
3381 net::TestCompletionCallback cb1
;
3382 disk_cache::Entry
* entry2
;
3383 net::TestCompletionCallback cb2
;
3384 int rv1
= cache_
->OpenEntry("key", &entry1
, cb1
.callback());
3385 int rv2
= cache_
->CreateEntry("key", &entry2
, cb2
.callback());
3387 EXPECT_EQ(net::ERR_FAILED
, cb1
.GetResult(rv1
));
3388 ASSERT_EQ(net::OK
, cb2
.GetResult(rv2
));
3392 // Checks that reading two entries simultaneously does not discard a CRC check.
3393 // TODO(pasko): make it work with Simple Cache.
3394 TEST_F(DiskCacheEntryTest
, DISABLED_SimpleCacheMultipleReadersCheckCRC
) {
3395 SetSimpleCacheMode();
3398 const char key
[] = "key";
3401 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key
, &size
));
3403 scoped_refptr
<net::IOBuffer
> read_buffer1(new net::IOBuffer(size
));
3404 scoped_refptr
<net::IOBuffer
> read_buffer2(new net::IOBuffer(size
));
3406 // Advance the first reader a little.
3407 disk_cache::Entry
* entry
= NULL
;
3408 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3409 EXPECT_EQ(1, ReadData(entry
, 0, 0, read_buffer1
.get(), 1));
3411 // Make the second reader pass the point where the first one is, and close.
3412 disk_cache::Entry
* entry2
= NULL
;
3413 EXPECT_EQ(net::OK
, OpenEntry(key
, &entry2
));
3414 EXPECT_EQ(1, ReadData(entry2
, 0, 0, read_buffer2
.get(), 1));
3415 EXPECT_EQ(1, ReadData(entry2
, 0, 1, read_buffer2
.get(), 1));
3418 // Read the data till the end should produce an error.
3419 EXPECT_GT(0, ReadData(entry
, 0, 1, read_buffer1
.get(), size
));
3421 DisableIntegrityCheck();
3424 // Checking one more scenario of overlapped reading of a bad entry.
3425 // Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of
3427 TEST_F(DiskCacheEntryTest
, SimpleCacheMultipleReadersCheckCRC2
) {
3428 SetSimpleCacheMode();
3431 const char key
[] = "key";
3433 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key
, &size
));
3435 scoped_refptr
<net::IOBuffer
> read_buffer1(new net::IOBuffer(size
));
3436 scoped_refptr
<net::IOBuffer
> read_buffer2(new net::IOBuffer(size
));
3438 // Advance the first reader a little.
3439 disk_cache::Entry
* entry
= NULL
;
3440 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3441 ScopedEntryPtr
entry_closer(entry
);
3442 EXPECT_EQ(1, ReadData(entry
, 1, 0, read_buffer1
.get(), 1));
3444 // Advance the 2nd reader by the same amount.
3445 disk_cache::Entry
* entry2
= NULL
;
3446 EXPECT_EQ(net::OK
, OpenEntry(key
, &entry2
));
3447 ScopedEntryPtr
entry2_closer(entry2
);
3448 EXPECT_EQ(1, ReadData(entry2
, 1, 0, read_buffer2
.get(), 1));
3450 // Continue reading 1st.
3451 EXPECT_GT(0, ReadData(entry
, 1, 1, read_buffer1
.get(), size
));
3453 // This read should fail as well because we have previous read failures.
3454 EXPECT_GT(0, ReadData(entry2
, 1, 1, read_buffer2
.get(), 1));
3455 DisableIntegrityCheck();
3458 // Test if we can sequentially read each subset of the data until all the data
3459 // is read, then the CRC is calculated correctly and the reads are successful.
3460 TEST_F(DiskCacheEntryTest
, SimpleCacheReadCombineCRC
) {
3462 // Create, Write, Read (first half of data), Read (second half of data),
3464 SetSimpleCacheMode();
3466 disk_cache::Entry
* null
= NULL
;
3467 const char key
[] = "the first key";
3469 const int kHalfSize
= 200;
3470 const int kSize
= 2 * kHalfSize
;
3471 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3472 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
3473 disk_cache::Entry
* entry
= NULL
;
3475 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3476 EXPECT_NE(null
, entry
);
3478 EXPECT_EQ(kSize
, WriteData(entry
, 1, 0, buffer1
.get(), kSize
, false));
3481 disk_cache::Entry
* entry2
= NULL
;
3482 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry2
));
3483 EXPECT_EQ(entry
, entry2
);
3485 // Read the first half of the data.
3487 int buf_len
= kHalfSize
;
3488 scoped_refptr
<net::IOBuffer
> buffer1_read1(new net::IOBuffer(buf_len
));
3489 EXPECT_EQ(buf_len
, ReadData(entry2
, 1, offset
, buffer1_read1
.get(), buf_len
));
3490 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read1
->data(), buf_len
));
3492 // Read the second half of the data.
3494 buf_len
= kHalfSize
;
3495 scoped_refptr
<net::IOBuffer
> buffer1_read2(new net::IOBuffer(buf_len
));
3496 EXPECT_EQ(buf_len
, ReadData(entry2
, 1, offset
, buffer1_read2
.get(), buf_len
));
3497 char* buffer1_data
= buffer1
->data() + offset
;
3498 EXPECT_EQ(0, memcmp(buffer1_data
, buffer1_read2
->data(), buf_len
));
3500 // Check that we are not leaking.
3501 EXPECT_NE(entry
, null
);
3503 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
3508 // Test if we can write the data not in sequence and read correctly. In
3509 // this case the CRC will not be present.
3510 TEST_F(DiskCacheEntryTest
, SimpleCacheNonSequentialWrite
) {
3512 // Create, Write (second half of data), Write (first half of data), Read,
3514 SetSimpleCacheMode();
3516 disk_cache::Entry
* null
= NULL
;
3517 const char key
[] = "the first key";
3519 const int kHalfSize
= 200;
3520 const int kSize
= 2 * kHalfSize
;
3521 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3522 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
3523 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
3524 char* buffer1_data
= buffer1
->data() + kHalfSize
;
3525 memcpy(buffer2
->data(), buffer1_data
, kHalfSize
);
3527 disk_cache::Entry
* entry
= NULL
;
3528 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3530 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
3531 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3532 EXPECT_NE(null
, entry
);
3534 int offset
= kHalfSize
;
3535 int buf_len
= kHalfSize
;
3538 WriteData(entry
, i
, offset
, buffer2
.get(), buf_len
, false));
3540 buf_len
= kHalfSize
;
3542 WriteData(entry
, i
, offset
, buffer1
.get(), buf_len
, false));
3545 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3547 scoped_refptr
<net::IOBuffer
> buffer1_read1(new net::IOBuffer(kSize
));
3548 EXPECT_EQ(kSize
, ReadData(entry
, i
, 0, buffer1_read1
.get(), kSize
));
3549 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read1
->data(), kSize
));
3550 // Check that we are not leaking.
3551 ASSERT_NE(entry
, null
);
3552 EXPECT_TRUE(static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
3557 // Test that changing stream1 size does not affect stream0 (stream0 and stream1
3558 // are stored in the same file in Simple Cache).
3559 TEST_F(DiskCacheEntryTest
, SimpleCacheStream1SizeChanges
) {
3560 SetSimpleCacheMode();
3562 disk_cache::Entry
* entry
= NULL
;
3563 const char key
[] = "the key";
3564 const int kSize
= 100;
3565 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3566 scoped_refptr
<net::IOBuffer
> buffer_read(new net::IOBuffer(kSize
));
3567 CacheTestFillBuffer(buffer
->data(), kSize
, false);
3569 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3572 // Write something into stream0.
3573 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
3574 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer_read
.get(), kSize
));
3575 EXPECT_EQ(0, memcmp(buffer
->data(), buffer_read
->data(), kSize
));
3579 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3580 int stream1_size
= 100;
3581 EXPECT_EQ(0, WriteData(entry
, 1, stream1_size
, buffer
.get(), 0, false));
3582 EXPECT_EQ(stream1_size
, entry
->GetDataSize(1));
3585 // Check that stream0 data has not been modified and that the EOF record for
3586 // stream 0 contains a crc.
3587 // The entry needs to be reopened before checking the crc: Open will perform
3588 // the synchronization with the previous Close. This ensures the EOF records
3589 // have been written to disk before we attempt to read them independently.
3590 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3591 base::FilePath entry_file0_path
= cache_path_
.AppendASCII(
3592 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, 0));
3593 int flags
= base::PLATFORM_FILE_READ
| base::PLATFORM_FILE_OPEN
;
3594 base::PlatformFile entry_file0
=
3595 base::CreatePlatformFile(entry_file0_path
, flags
, NULL
, NULL
);
3596 ASSERT_TRUE(entry_file0
!= base::kInvalidPlatformFileValue
);
3598 int data_size
[disk_cache::kSimpleEntryStreamCount
] = {kSize
, stream1_size
, 0};
3599 int sparse_data_size
= 0;
3600 disk_cache::SimpleEntryStat
entry_stat(
3601 base::Time::Now(), base::Time::Now(), data_size
, sparse_data_size
);
3602 int eof_offset
= entry_stat
.GetEOFOffsetInFile(key
, 0);
3603 disk_cache::SimpleFileEOF eof_record
;
3604 ASSERT_EQ(static_cast<int>(sizeof(eof_record
)), base::ReadPlatformFile(
3607 reinterpret_cast<char*>(&eof_record
),
3608 sizeof(eof_record
)));
3609 EXPECT_EQ(disk_cache::kSimpleFinalMagicNumber
, eof_record
.final_magic_number
);
3610 EXPECT_TRUE((eof_record
.flags
& disk_cache::SimpleFileEOF::FLAG_HAS_CRC32
) ==
3611 disk_cache::SimpleFileEOF::FLAG_HAS_CRC32
);
3613 buffer_read
= new net::IOBuffer(kSize
);
3614 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer_read
.get(), kSize
));
3615 EXPECT_EQ(0, memcmp(buffer
->data(), buffer_read
->data(), kSize
));
3619 EXPECT_EQ(0, WriteData(entry
, 1, stream1_size
, buffer
.get(), 0, true));
3620 EXPECT_EQ(stream1_size
, entry
->GetDataSize(1));
3623 // Check that stream0 data has not been modified.
3624 buffer_read
= new net::IOBuffer(kSize
);
3625 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3626 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer_read
.get(), kSize
));
3627 EXPECT_EQ(0, memcmp(buffer
->data(), buffer_read
->data(), kSize
));
3632 // Test that writing within the range for which the crc has already been
3633 // computed will properly invalidate the computed crc.
3634 TEST_F(DiskCacheEntryTest
, SimpleCacheCRCRewrite
) {
3636 // Create, Write (big data), Write (small data in the middle), Close.
3637 // Open, Read (all), Close.
3638 SetSimpleCacheMode();
3640 disk_cache::Entry
* null
= NULL
;
3641 const char key
[] = "the first key";
3643 const int kHalfSize
= 200;
3644 const int kSize
= 2 * kHalfSize
;
3645 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3646 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kHalfSize
));
3647 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
3648 CacheTestFillBuffer(buffer2
->data(), kHalfSize
, false);
3650 disk_cache::Entry
* entry
= NULL
;
3651 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3652 EXPECT_NE(null
, entry
);
3655 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
3656 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3658 int buf_len
= kSize
;
3661 WriteData(entry
, i
, offset
, buffer1
.get(), buf_len
, false));
3663 buf_len
= kHalfSize
;
3665 WriteData(entry
, i
, offset
, buffer2
.get(), buf_len
, false));
3668 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3670 scoped_refptr
<net::IOBuffer
> buffer1_read1(new net::IOBuffer(kSize
));
3671 EXPECT_EQ(kSize
, ReadData(entry
, i
, 0, buffer1_read1
.get(), kSize
));
3672 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read1
->data(), kHalfSize
));
3675 memcmp(buffer2
->data(), buffer1_read1
->data() + kHalfSize
, kHalfSize
));
3681 bool DiskCacheEntryTest::SimpleCacheThirdStreamFileExists(const char* key
) {
3682 int third_stream_file_index
=
3683 disk_cache::simple_util::GetFileIndexFromStreamIndex(2);
3684 base::FilePath third_stream_file_path
= cache_path_
.AppendASCII(
3685 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(
3686 key
, third_stream_file_index
));
3687 return PathExists(third_stream_file_path
);
3690 void DiskCacheEntryTest::SyncDoomEntry(const char* key
) {
3691 net::TestCompletionCallback callback
;
3692 cache_
->DoomEntry(key
, callback
.callback());
3693 callback
.WaitForResult();
3696 // Check that a newly-created entry with no third-stream writes omits the
3697 // third stream file.
3698 TEST_F(DiskCacheEntryTest
, SimpleCacheOmittedThirdStream1
) {
3699 SetSimpleCacheMode();
3702 const char key
[] = "key";
3704 disk_cache::Entry
* entry
;
3706 // Create entry and close without writing: third stream file should be
3707 // omitted, since the stream is empty.
3708 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3710 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3713 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3716 // Check that a newly-created entry with only a single zero-offset, zero-length
3717 // write omits the third stream file.
3718 TEST_F(DiskCacheEntryTest
, SimpleCacheOmittedThirdStream2
) {
3719 SetSimpleCacheMode();
3722 const int kHalfSize
= 8;
3723 const int kSize
= kHalfSize
* 2;
3724 const char key
[] = "key";
3725 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3726 CacheTestFillBuffer(buffer
->data(), kHalfSize
, false);
3728 disk_cache::Entry
* entry
;
3730 // Create entry, write empty buffer to third stream, and close: third stream
3731 // should still be omitted, since the entry ignores writes that don't modify
3732 // data or change the length.
3733 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3734 EXPECT_EQ(0, WriteData(entry
, 2, 0, buffer
, 0, true));
3736 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3739 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3742 // Check that we can read back data written to the third stream.
3743 TEST_F(DiskCacheEntryTest
, SimpleCacheOmittedThirdStream3
) {
3744 SetSimpleCacheMode();
3747 const int kHalfSize
= 8;
3748 const int kSize
= kHalfSize
* 2;
3749 const char key
[] = "key";
3750 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3751 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
3752 CacheTestFillBuffer(buffer1
->data(), kHalfSize
, false);
3754 disk_cache::Entry
* entry
;
3756 // Create entry, write data to third stream, and close: third stream should
3757 // not be omitted, since it contains data. Re-open entry and ensure there
3758 // are that many bytes in the third stream.
3759 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3760 EXPECT_EQ(kHalfSize
, WriteData(entry
, 2, 0, buffer1
, kHalfSize
, true));
3762 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key
));
3764 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3765 EXPECT_EQ(kHalfSize
, ReadData(entry
, 2, 0, buffer2
, kSize
));
3766 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer2
->data(), kHalfSize
));
3768 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key
));
3771 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3774 // Check that we remove the third stream file upon opening an entry and finding
3775 // the third stream empty. (This is the upgrade path for entries written
3776 // before the third stream was optional.)
3777 TEST_F(DiskCacheEntryTest
, SimpleCacheOmittedThirdStream4
) {
3778 SetSimpleCacheMode();
3781 const int kHalfSize
= 8;
3782 const int kSize
= kHalfSize
* 2;
3783 const char key
[] = "key";
3784 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3785 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
3786 CacheTestFillBuffer(buffer1
->data(), kHalfSize
, false);
3788 disk_cache::Entry
* entry
;
3790 // Create entry, write data to third stream, truncate third stream back to
3791 // empty, and close: third stream will not initially be omitted, since entry
3792 // creates the file when the first significant write comes in, and only
3793 // removes it on open if it is empty. Reopen, ensure that the file is
3794 // deleted, and that there's no data in the third stream.
3795 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3796 EXPECT_EQ(kHalfSize
, WriteData(entry
, 2, 0, buffer1
, kHalfSize
, true));
3797 EXPECT_EQ(0, WriteData(entry
, 2, 0, buffer1
, 0, true));
3799 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key
));
3801 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3802 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3803 EXPECT_EQ(0, ReadData(entry
, 2, 0, buffer2
, kSize
));
3805 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3808 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3811 // Check that we don't accidentally create the third stream file once the entry
3813 TEST_F(DiskCacheEntryTest
, SimpleCacheOmittedThirdStream5
) {
3814 SetSimpleCacheMode();
3817 const int kHalfSize
= 8;
3818 const int kSize
= kHalfSize
* 2;
3819 const char key
[] = "key";
3820 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3821 CacheTestFillBuffer(buffer
->data(), kHalfSize
, false);
3823 disk_cache::Entry
* entry
;
3825 // Create entry, doom entry, write data to third stream, and close: third
3826 // stream should not exist. (Note: We don't care if the write fails, just
3827 // that it doesn't cause the file to be created on disk.)
3828 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3830 WriteData(entry
, 2, 0, buffer
, kHalfSize
, true);
3832 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key
));
3835 // There could be a race between Doom and an optimistic write.
3836 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomOptimisticWritesRace
) {
3838 // Create, first Write, second Write, Close.
3840 SetSimpleCacheMode();
3842 disk_cache::Entry
* null
= NULL
;
3843 const char key
[] = "the first key";
3845 const int kSize
= 200;
3846 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3847 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
3848 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
3849 CacheTestFillBuffer(buffer2
->data(), kSize
, false);
3851 // The race only happens on stream 1 and stream 2.
3852 for (int i
= 0; i
< disk_cache::kSimpleEntryStreamCount
; ++i
) {
3853 ASSERT_EQ(net::OK
, DoomAllEntries());
3854 disk_cache::Entry
* entry
= NULL
;
3856 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3857 EXPECT_NE(null
, entry
);
3861 ASSERT_EQ(net::OK
, DoomAllEntries());
3862 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3863 EXPECT_NE(null
, entry
);
3866 int buf_len
= kSize
;
3867 // This write should not be optimistic (since create is).
3869 WriteData(entry
, i
, offset
, buffer1
.get(), buf_len
, false));
3872 // This write should be optimistic.
3874 WriteData(entry
, i
, offset
, buffer2
.get(), buf_len
, false));
3877 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3878 EXPECT_NE(null
, entry
);
3885 TEST_F(DiskCacheEntryTest
, SimpleCacheBasicSparseIO
) {
3886 SetSimpleCacheMode();
3891 TEST_F(DiskCacheEntryTest
, SimpleCacheHugeSparseIO
) {
3892 SetSimpleCacheMode();
3897 TEST_F(DiskCacheEntryTest
, SimpleCacheGetAvailableRange
) {
3898 SetSimpleCacheMode();
3900 GetAvailableRange();
3903 TEST_F(DiskCacheEntryTest
, DISABLED_SimpleCacheCouldBeSparse
) {
3904 SetSimpleCacheMode();
3909 TEST_F(DiskCacheEntryTest
, SimpleCacheUpdateSparseEntry
) {
3910 SetSimpleCacheMode();
3912 UpdateSparseEntry();
3915 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomSparseEntry
) {
3916 SetSimpleCacheMode();
3921 TEST_F(DiskCacheEntryTest
, SimpleCachePartialSparseEntry
) {
3922 SetSimpleCacheMode();
3924 PartialSparseEntry();
3927 TEST_F(DiskCacheEntryTest
, SimpleCacheTruncateLargeSparseFile
) {
3928 const int kSize
= 1024;
3930 SetSimpleCacheMode();
3931 // An entry is allowed sparse data 1/10 the size of the cache, so this size
3932 // allows for one |kSize|-sized range plus overhead, but not two ranges.
3933 SetMaxSize(kSize
* 15);
3936 const char key
[] = "key";
3937 disk_cache::Entry
* null
= NULL
;
3938 disk_cache::Entry
* entry
;
3939 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3940 EXPECT_NE(null
, entry
);
3942 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3943 CacheTestFillBuffer(buffer
->data(), kSize
, false);
3944 net::TestCompletionCallback callback
;
3947 // Verify initial conditions.
3948 ret
= entry
->ReadSparseData(0, buffer
, kSize
, callback
.callback());
3949 EXPECT_EQ(0, callback
.GetResult(ret
));
3951 ret
= entry
->ReadSparseData(kSize
, buffer
, kSize
, callback
.callback());
3952 EXPECT_EQ(0, callback
.GetResult(ret
));
3954 // Write a range and make sure it reads back.
3955 ret
= entry
->WriteSparseData(0, buffer
, kSize
, callback
.callback());
3956 EXPECT_EQ(kSize
, callback
.GetResult(ret
));
3958 ret
= entry
->ReadSparseData(0, buffer
, kSize
, callback
.callback());
3959 EXPECT_EQ(kSize
, callback
.GetResult(ret
));
3961 // Write another range and make sure it reads back.
3962 ret
= entry
->WriteSparseData(kSize
, buffer
, kSize
, callback
.callback());
3963 EXPECT_EQ(kSize
, callback
.GetResult(ret
));
3965 ret
= entry
->ReadSparseData(kSize
, buffer
, kSize
, callback
.callback());
3966 EXPECT_EQ(kSize
, callback
.GetResult(ret
));
3968 // Make sure the first range was removed when the second was written.
3969 ret
= entry
->ReadSparseData(0, buffer
, kSize
, callback
.callback());
3970 EXPECT_EQ(0, callback
.GetResult(ret
));
3975 #endif // defined(OS_POSIX)