1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
7 #include "base/bind_helpers.h"
8 #include "base/file_util.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/threading/platform_thread.h"
12 #include "base/timer/timer.h"
13 #include "net/base/completion_callback.h"
14 #include "net/base/io_buffer.h"
15 #include "net/base/net_errors.h"
16 #include "net/base/test_completion_callback.h"
17 #include "net/disk_cache/backend_impl.h"
18 #include "net/disk_cache/disk_cache_test_base.h"
19 #include "net/disk_cache/disk_cache_test_util.h"
20 #include "net/disk_cache/entry_impl.h"
21 #include "net/disk_cache/mem_entry_impl.h"
22 #include "net/disk_cache/simple/simple_entry_format.h"
23 #include "net/disk_cache/simple/simple_entry_impl.h"
24 #include "net/disk_cache/simple/simple_test_util.h"
25 #include "net/disk_cache/simple/simple_util.h"
26 #include "testing/gtest/include/gtest/gtest.h"
29 using disk_cache::ScopedEntryPtr
;
31 // Tests that can run with different types of caches.
32 class DiskCacheEntryTest
: public DiskCacheTestWithCache
{
34 void InternalSyncIOBackground(disk_cache::Entry
* entry
);
35 void ExternalSyncIOBackground(disk_cache::Entry
* entry
);
38 void InternalSyncIO();
39 void InternalAsyncIO();
40 void ExternalSyncIO();
41 void ExternalAsyncIO();
52 void ReuseEntry(int size
);
54 void ReadWriteDestroyBuffer();
55 void DoomNormalEntry();
56 void DoomEntryNextToOpenEntry();
60 void GetAvailableRange();
62 void UpdateSparseEntry();
63 void DoomSparseEntry();
64 void PartialSparseEntry();
65 bool SimpleCacheMakeBadChecksumEntry(const char* key
, int* data_size
);
68 // This part of the test runs on the background thread.
69 void DiskCacheEntryTest::InternalSyncIOBackground(disk_cache::Entry
* entry
) {
70 const int kSize1
= 10;
71 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
72 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
75 entry
->ReadData(0, 0, buffer1
.get(), kSize1
, net::CompletionCallback()));
76 base::strlcpy(buffer1
->data(), "the data", kSize1
);
79 0, 0, buffer1
.get(), kSize1
, net::CompletionCallback(), false));
80 memset(buffer1
->data(), 0, kSize1
);
83 entry
->ReadData(0, 0, buffer1
.get(), kSize1
, net::CompletionCallback()));
84 EXPECT_STREQ("the data", buffer1
->data());
86 const int kSize2
= 5000;
87 const int kSize3
= 10000;
88 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
89 scoped_refptr
<net::IOBuffer
> buffer3(new net::IOBuffer(kSize3
));
90 memset(buffer3
->data(), 0, kSize3
);
91 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
92 base::strlcpy(buffer2
->data(), "The really big data goes here", kSize2
);
96 1, 1500, buffer2
.get(), kSize2
, net::CompletionCallback(), false));
97 memset(buffer2
->data(), 0, kSize2
);
100 1, 1511, buffer2
.get(), kSize2
, net::CompletionCallback()));
101 EXPECT_STREQ("big data goes here", buffer2
->data());
104 entry
->ReadData(1, 0, buffer2
.get(), kSize2
, net::CompletionCallback()));
105 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer3
->data(), 1500));
108 1, 5000, buffer2
.get(), kSize2
, net::CompletionCallback()));
112 1, 6500, buffer2
.get(), kSize2
, net::CompletionCallback()));
115 entry
->ReadData(1, 0, buffer3
.get(), kSize3
, net::CompletionCallback()));
118 1, 0, buffer3
.get(), 8192, net::CompletionCallback(), false));
121 entry
->ReadData(1, 0, buffer3
.get(), kSize3
, net::CompletionCallback()));
122 EXPECT_EQ(8192, entry
->GetDataSize(1));
124 // We need to delete the memory buffer on this thread.
125 EXPECT_EQ(0, entry
->WriteData(
126 0, 0, NULL
, 0, net::CompletionCallback(), true));
127 EXPECT_EQ(0, entry
->WriteData(
128 1, 0, NULL
, 0, net::CompletionCallback(), true));
131 // We need to support synchronous IO even though it is not a supported operation
132 // from the point of view of the disk cache's public interface, because we use
133 // it internally, not just by a few tests, but as part of the implementation
134 // (see sparse_control.cc, for example).
135 void DiskCacheEntryTest::InternalSyncIO() {
136 disk_cache::Entry
* entry
= NULL
;
137 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
138 ASSERT_TRUE(NULL
!= entry
);
140 // The bulk of the test runs from within the callback, on the cache thread.
141 RunTaskForTest(base::Bind(&DiskCacheEntryTest::InternalSyncIOBackground
,
142 base::Unretained(this),
149 EXPECT_EQ(0, cache_
->GetEntryCount());
152 TEST_F(DiskCacheEntryTest
, InternalSyncIO
) {
157 TEST_F(DiskCacheEntryTest
, MemoryOnlyInternalSyncIO
) {
163 void DiskCacheEntryTest::InternalAsyncIO() {
164 disk_cache::Entry
* entry
= NULL
;
165 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
166 ASSERT_TRUE(NULL
!= entry
);
168 // Avoid using internal buffers for the test. We have to write something to
169 // the entry and close it so that we flush the internal buffer to disk. After
170 // that, IO operations will be really hitting the disk. We don't care about
171 // the content, so just extending the entry is enough (all extensions zero-
173 EXPECT_EQ(0, WriteData(entry
, 0, 15 * 1024, NULL
, 0, false));
174 EXPECT_EQ(0, WriteData(entry
, 1, 15 * 1024, NULL
, 0, false));
176 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry
));
178 MessageLoopHelper helper
;
179 // Let's verify that each IO goes to the right callback object.
180 CallbackTest
callback1(&helper
, false);
181 CallbackTest
callback2(&helper
, false);
182 CallbackTest
callback3(&helper
, false);
183 CallbackTest
callback4(&helper
, false);
184 CallbackTest
callback5(&helper
, false);
185 CallbackTest
callback6(&helper
, false);
186 CallbackTest
callback7(&helper
, false);
187 CallbackTest
callback8(&helper
, false);
188 CallbackTest
callback9(&helper
, false);
189 CallbackTest
callback10(&helper
, false);
190 CallbackTest
callback11(&helper
, false);
191 CallbackTest
callback12(&helper
, false);
192 CallbackTest
callback13(&helper
, false);
194 const int kSize1
= 10;
195 const int kSize2
= 5000;
196 const int kSize3
= 10000;
197 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
198 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
199 scoped_refptr
<net::IOBuffer
> buffer3(new net::IOBuffer(kSize3
));
200 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
201 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
202 CacheTestFillBuffer(buffer3
->data(), kSize3
, false);
210 base::Bind(&CallbackTest::Run
, base::Unretained(&callback1
))));
211 base::strlcpy(buffer1
->data(), "the data", kSize1
);
213 int ret
= entry
->WriteData(
218 base::Bind(&CallbackTest::Run
, base::Unretained(&callback2
)),
220 EXPECT_TRUE(10 == ret
|| net::ERR_IO_PENDING
== ret
);
221 if (net::ERR_IO_PENDING
== ret
)
224 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
225 memset(buffer2
->data(), 0, kSize2
);
226 ret
= entry
->ReadData(
231 base::Bind(&CallbackTest::Run
, base::Unretained(&callback3
)));
232 EXPECT_TRUE(10 == ret
|| net::ERR_IO_PENDING
== ret
);
233 if (net::ERR_IO_PENDING
== ret
)
236 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
237 EXPECT_STREQ("the data", buffer2
->data());
239 base::strlcpy(buffer2
->data(), "The really big data goes here", kSize2
);
240 ret
= entry
->WriteData(
245 base::Bind(&CallbackTest::Run
, base::Unretained(&callback4
)),
247 EXPECT_TRUE(5000 == ret
|| net::ERR_IO_PENDING
== ret
);
248 if (net::ERR_IO_PENDING
== ret
)
251 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
252 memset(buffer3
->data(), 0, kSize3
);
253 ret
= entry
->ReadData(
258 base::Bind(&CallbackTest::Run
, base::Unretained(&callback5
)));
259 EXPECT_TRUE(4989 == ret
|| net::ERR_IO_PENDING
== ret
);
260 if (net::ERR_IO_PENDING
== ret
)
263 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
264 EXPECT_STREQ("big data goes here", buffer3
->data());
265 ret
= entry
->ReadData(
270 base::Bind(&CallbackTest::Run
, base::Unretained(&callback6
)));
271 EXPECT_TRUE(5000 == ret
|| net::ERR_IO_PENDING
== ret
);
272 if (net::ERR_IO_PENDING
== ret
)
275 memset(buffer3
->data(), 0, kSize3
);
277 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
278 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer3
->data(), 1500));
279 ret
= entry
->ReadData(
284 base::Bind(&CallbackTest::Run
, base::Unretained(&callback7
)));
285 EXPECT_TRUE(1500 == ret
|| net::ERR_IO_PENDING
== ret
);
286 if (net::ERR_IO_PENDING
== ret
)
289 ret
= entry
->ReadData(
294 base::Bind(&CallbackTest::Run
, base::Unretained(&callback9
)));
295 EXPECT_TRUE(6500 == ret
|| net::ERR_IO_PENDING
== ret
);
296 if (net::ERR_IO_PENDING
== ret
)
299 ret
= entry
->WriteData(
304 base::Bind(&CallbackTest::Run
, base::Unretained(&callback10
)),
306 EXPECT_TRUE(8192 == ret
|| net::ERR_IO_PENDING
== ret
);
307 if (net::ERR_IO_PENDING
== ret
)
310 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
311 ret
= entry
->ReadData(
316 base::Bind(&CallbackTest::Run
, base::Unretained(&callback11
)));
317 EXPECT_TRUE(8192 == ret
|| net::ERR_IO_PENDING
== ret
);
318 if (net::ERR_IO_PENDING
== ret
)
321 EXPECT_EQ(8192, entry
->GetDataSize(1));
323 ret
= entry
->ReadData(
328 base::Bind(&CallbackTest::Run
, base::Unretained(&callback12
)));
329 EXPECT_TRUE(10 == ret
|| net::ERR_IO_PENDING
== ret
);
330 if (net::ERR_IO_PENDING
== ret
)
333 ret
= entry
->ReadData(
338 base::Bind(&CallbackTest::Run
, base::Unretained(&callback13
)));
339 EXPECT_TRUE(5000 == ret
|| net::ERR_IO_PENDING
== ret
);
340 if (net::ERR_IO_PENDING
== ret
)
343 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
345 EXPECT_FALSE(helper
.callback_reused_error());
350 EXPECT_EQ(0, cache_
->GetEntryCount());
353 TEST_F(DiskCacheEntryTest
, InternalAsyncIO
) {
358 TEST_F(DiskCacheEntryTest
, MemoryOnlyInternalAsyncIO
) {
364 // This part of the test runs on the background thread.
365 void DiskCacheEntryTest::ExternalSyncIOBackground(disk_cache::Entry
* entry
) {
366 const int kSize1
= 17000;
367 const int kSize2
= 25000;
368 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
369 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
370 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
371 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
372 base::strlcpy(buffer1
->data(), "the data", kSize1
);
375 0, 0, buffer1
.get(), kSize1
, net::CompletionCallback(), false));
376 memset(buffer1
->data(), 0, kSize1
);
379 entry
->ReadData(0, 0, buffer1
.get(), kSize1
, net::CompletionCallback()));
380 EXPECT_STREQ("the data", buffer1
->data());
382 base::strlcpy(buffer2
->data(), "The really big data goes here", kSize2
);
386 1, 10000, buffer2
.get(), kSize2
, net::CompletionCallback(), false));
387 memset(buffer2
->data(), 0, kSize2
);
390 1, 10011, buffer2
.get(), kSize2
, net::CompletionCallback()));
391 EXPECT_STREQ("big data goes here", buffer2
->data());
394 entry
->ReadData(1, 0, buffer2
.get(), kSize2
, net::CompletionCallback()));
395 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer2
->data(), 10000));
398 1, 30000, buffer2
.get(), kSize2
, net::CompletionCallback()));
402 1, 35000, buffer2
.get(), kSize2
, net::CompletionCallback()));
405 entry
->ReadData(1, 0, buffer1
.get(), kSize1
, net::CompletionCallback()));
409 1, 20000, buffer1
.get(), kSize1
, net::CompletionCallback(), false));
410 EXPECT_EQ(37000, entry
->GetDataSize(1));
412 // We need to delete the memory buffer on this thread.
413 EXPECT_EQ(0, entry
->WriteData(
414 0, 0, NULL
, 0, net::CompletionCallback(), true));
415 EXPECT_EQ(0, entry
->WriteData(
416 1, 0, NULL
, 0, net::CompletionCallback(), true));
419 void DiskCacheEntryTest::ExternalSyncIO() {
420 disk_cache::Entry
* entry
;
421 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
423 // The bulk of the test runs from within the callback, on the cache thread.
424 RunTaskForTest(base::Bind(&DiskCacheEntryTest::ExternalSyncIOBackground
,
425 base::Unretained(this),
431 EXPECT_EQ(0, cache_
->GetEntryCount());
434 TEST_F(DiskCacheEntryTest
, ExternalSyncIO
) {
439 TEST_F(DiskCacheEntryTest
, ExternalSyncIONoBuffer
) {
441 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
445 TEST_F(DiskCacheEntryTest
, MemoryOnlyExternalSyncIO
) {
451 void DiskCacheEntryTest::ExternalAsyncIO() {
452 disk_cache::Entry
* entry
;
453 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
457 MessageLoopHelper helper
;
458 // Let's verify that each IO goes to the right callback object.
459 CallbackTest
callback1(&helper
, false);
460 CallbackTest
callback2(&helper
, false);
461 CallbackTest
callback3(&helper
, false);
462 CallbackTest
callback4(&helper
, false);
463 CallbackTest
callback5(&helper
, false);
464 CallbackTest
callback6(&helper
, false);
465 CallbackTest
callback7(&helper
, false);
466 CallbackTest
callback8(&helper
, false);
467 CallbackTest
callback9(&helper
, false);
469 const int kSize1
= 17000;
470 const int kSize2
= 25000;
471 const int kSize3
= 25000;
472 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
473 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
474 scoped_refptr
<net::IOBuffer
> buffer3(new net::IOBuffer(kSize3
));
475 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
476 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
477 CacheTestFillBuffer(buffer3
->data(), kSize3
, false);
478 base::strlcpy(buffer1
->data(), "the data", kSize1
);
479 int ret
= entry
->WriteData(
484 base::Bind(&CallbackTest::Run
, base::Unretained(&callback1
)),
486 EXPECT_TRUE(17000 == ret
|| net::ERR_IO_PENDING
== ret
);
487 if (net::ERR_IO_PENDING
== ret
)
490 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
492 memset(buffer2
->data(), 0, kSize1
);
493 ret
= entry
->ReadData(
498 base::Bind(&CallbackTest::Run
, base::Unretained(&callback2
)));
499 EXPECT_TRUE(17000 == ret
|| net::ERR_IO_PENDING
== ret
);
500 if (net::ERR_IO_PENDING
== ret
)
503 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
504 EXPECT_STREQ("the data", buffer2
->data());
506 base::strlcpy(buffer2
->data(), "The really big data goes here", kSize2
);
507 ret
= entry
->WriteData(
512 base::Bind(&CallbackTest::Run
, base::Unretained(&callback3
)),
514 EXPECT_TRUE(25000 == ret
|| net::ERR_IO_PENDING
== ret
);
515 if (net::ERR_IO_PENDING
== ret
)
518 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
520 memset(buffer3
->data(), 0, kSize3
);
521 ret
= entry
->ReadData(
526 base::Bind(&CallbackTest::Run
, base::Unretained(&callback4
)));
527 EXPECT_TRUE(24989 == ret
|| net::ERR_IO_PENDING
== ret
);
528 if (net::ERR_IO_PENDING
== ret
)
531 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
532 EXPECT_STREQ("big data goes here", buffer3
->data());
533 ret
= entry
->ReadData(
538 base::Bind(&CallbackTest::Run
, base::Unretained(&callback5
)));
539 EXPECT_TRUE(25000 == ret
|| net::ERR_IO_PENDING
== ret
);
540 if (net::ERR_IO_PENDING
== ret
)
543 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
544 memset(buffer3
->data(), 0, kSize3
);
545 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer3
->data(), 10000));
546 ret
= entry
->ReadData(
551 base::Bind(&CallbackTest::Run
, base::Unretained(&callback6
)));
552 EXPECT_TRUE(5000 == ret
|| net::ERR_IO_PENDING
== ret
);
553 if (net::ERR_IO_PENDING
== ret
)
562 base::Bind(&CallbackTest::Run
, base::Unretained(&callback7
))));
563 ret
= entry
->ReadData(
568 base::Bind(&CallbackTest::Run
, base::Unretained(&callback8
)));
569 EXPECT_TRUE(17000 == ret
|| net::ERR_IO_PENDING
== ret
);
570 if (net::ERR_IO_PENDING
== ret
)
572 ret
= entry
->WriteData(
577 base::Bind(&CallbackTest::Run
, base::Unretained(&callback9
)),
579 EXPECT_TRUE(17000 == ret
|| net::ERR_IO_PENDING
== ret
);
580 if (net::ERR_IO_PENDING
== ret
)
583 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
584 EXPECT_EQ(37000, entry
->GetDataSize(1));
586 EXPECT_FALSE(helper
.callback_reused_error());
591 EXPECT_EQ(0, cache_
->GetEntryCount());
594 TEST_F(DiskCacheEntryTest
, ExternalAsyncIO
) {
599 TEST_F(DiskCacheEntryTest
, ExternalAsyncIONoBuffer
) {
601 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
605 TEST_F(DiskCacheEntryTest
, MemoryOnlyExternalAsyncIO
) {
611 // Tests that IOBuffers are not referenced after IO completes.
612 void DiskCacheEntryTest::ReleaseBuffer() {
613 disk_cache::Entry
* entry
= NULL
;
614 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
615 ASSERT_TRUE(NULL
!= entry
);
617 const int kBufferSize
= 1024;
618 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kBufferSize
));
619 CacheTestFillBuffer(buffer
->data(), kBufferSize
, false);
621 net::ReleaseBufferCompletionCallback
cb(buffer
.get());
623 entry
->WriteData(0, 0, buffer
.get(), kBufferSize
, cb
.callback(), false);
624 EXPECT_EQ(kBufferSize
, cb
.GetResult(rv
));
628 TEST_F(DiskCacheEntryTest
, ReleaseBuffer
) {
630 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
634 TEST_F(DiskCacheEntryTest
, MemoryOnlyReleaseBuffer
) {
640 void DiskCacheEntryTest::StreamAccess() {
641 disk_cache::Entry
* entry
= NULL
;
642 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry
));
643 ASSERT_TRUE(NULL
!= entry
);
645 const int kBufferSize
= 1024;
646 const int kNumStreams
= 3;
647 scoped_refptr
<net::IOBuffer
> reference_buffers
[kNumStreams
];
648 for (int i
= 0; i
< kNumStreams
; i
++) {
649 reference_buffers
[i
] = new net::IOBuffer(kBufferSize
);
650 CacheTestFillBuffer(reference_buffers
[i
]->data(), kBufferSize
, false);
652 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kBufferSize
));
653 for (int i
= 0; i
< kNumStreams
; i
++) {
656 WriteData(entry
, i
, 0, reference_buffers
[i
].get(), kBufferSize
, false));
657 memset(buffer1
->data(), 0, kBufferSize
);
658 EXPECT_EQ(kBufferSize
, ReadData(entry
, i
, 0, buffer1
.get(), kBufferSize
));
660 0, memcmp(reference_buffers
[i
]->data(), buffer1
->data(), kBufferSize
));
662 EXPECT_EQ(net::ERR_INVALID_ARGUMENT
,
663 ReadData(entry
, kNumStreams
, 0, buffer1
.get(), kBufferSize
));
666 // Open the entry and read it in chunks, including a read past the end.
667 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry
));
668 ASSERT_TRUE(NULL
!= entry
);
669 const int kReadBufferSize
= 600;
670 const int kFinalReadSize
= kBufferSize
- kReadBufferSize
;
671 COMPILE_ASSERT(kFinalReadSize
< kReadBufferSize
, should_be_exactly_two_reads
);
672 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kReadBufferSize
));
673 for (int i
= 0; i
< kNumStreams
; i
++) {
674 memset(buffer2
->data(), 0, kReadBufferSize
);
675 EXPECT_EQ(kReadBufferSize
,
676 ReadData(entry
, i
, 0, buffer2
.get(), kReadBufferSize
));
679 memcmp(reference_buffers
[i
]->data(), buffer2
->data(), kReadBufferSize
));
681 memset(buffer2
->data(), 0, kReadBufferSize
);
684 ReadData(entry
, i
, kReadBufferSize
, buffer2
.get(), kReadBufferSize
));
686 memcmp(reference_buffers
[i
]->data() + kReadBufferSize
,
694 TEST_F(DiskCacheEntryTest
, StreamAccess
) {
699 TEST_F(DiskCacheEntryTest
, MemoryOnlyStreamAccess
) {
705 void DiskCacheEntryTest::GetKey() {
706 std::string
key("the first key");
707 disk_cache::Entry
* entry
;
708 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
709 EXPECT_EQ(key
, entry
->GetKey()) << "short key";
712 int seed
= static_cast<int>(Time::Now().ToInternalValue());
714 char key_buffer
[20000];
716 CacheTestFillBuffer(key_buffer
, 3000, true);
717 key_buffer
[1000] = '\0';
720 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
721 EXPECT_TRUE(key
== entry
->GetKey()) << "1000 bytes key";
724 key_buffer
[1000] = 'p';
725 key_buffer
[3000] = '\0';
727 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
728 EXPECT_TRUE(key
== entry
->GetKey()) << "medium size key";
731 CacheTestFillBuffer(key_buffer
, sizeof(key_buffer
), true);
732 key_buffer
[19999] = '\0';
735 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
736 EXPECT_TRUE(key
== entry
->GetKey()) << "long key";
739 CacheTestFillBuffer(key_buffer
, 0x4000, true);
740 key_buffer
[0x4000] = '\0';
743 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
744 EXPECT_TRUE(key
== entry
->GetKey()) << "16KB key";
748 TEST_F(DiskCacheEntryTest
, GetKey
) {
753 TEST_F(DiskCacheEntryTest
, MemoryOnlyGetKey
) {
759 void DiskCacheEntryTest::GetTimes() {
760 std::string
key("the first key");
761 disk_cache::Entry
* entry
;
763 Time t1
= Time::Now();
764 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
765 EXPECT_TRUE(entry
->GetLastModified() >= t1
);
766 EXPECT_TRUE(entry
->GetLastModified() == entry
->GetLastUsed());
769 Time t2
= Time::Now();
770 EXPECT_TRUE(t2
> t1
);
771 EXPECT_EQ(0, WriteData(entry
, 0, 200, NULL
, 0, false));
772 if (type_
== net::APP_CACHE
) {
773 EXPECT_TRUE(entry
->GetLastModified() < t2
);
775 EXPECT_TRUE(entry
->GetLastModified() >= t2
);
777 EXPECT_TRUE(entry
->GetLastModified() == entry
->GetLastUsed());
780 Time t3
= Time::Now();
781 EXPECT_TRUE(t3
> t2
);
782 const int kSize
= 200;
783 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
784 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer
.get(), kSize
));
785 if (type_
== net::APP_CACHE
) {
786 EXPECT_TRUE(entry
->GetLastUsed() < t2
);
787 EXPECT_TRUE(entry
->GetLastModified() < t2
);
788 } else if (type_
== net::SHADER_CACHE
) {
789 EXPECT_TRUE(entry
->GetLastUsed() < t3
);
790 EXPECT_TRUE(entry
->GetLastModified() < t3
);
792 EXPECT_TRUE(entry
->GetLastUsed() >= t3
);
793 EXPECT_TRUE(entry
->GetLastModified() < t3
);
798 TEST_F(DiskCacheEntryTest
, GetTimes
) {
803 TEST_F(DiskCacheEntryTest
, MemoryOnlyGetTimes
) {
809 TEST_F(DiskCacheEntryTest
, AppCacheGetTimes
) {
810 SetCacheType(net::APP_CACHE
);
815 TEST_F(DiskCacheEntryTest
, ShaderCacheGetTimes
) {
816 SetCacheType(net::SHADER_CACHE
);
821 void DiskCacheEntryTest::GrowData() {
822 std::string
key1("the first key");
823 disk_cache::Entry
* entry
;
824 ASSERT_EQ(net::OK
, CreateEntry(key1
, &entry
));
826 const int kSize
= 20000;
827 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
828 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
829 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
830 memset(buffer2
->data(), 0, kSize
);
832 base::strlcpy(buffer1
->data(), "the data", kSize
);
833 EXPECT_EQ(10, WriteData(entry
, 0, 0, buffer1
.get(), 10, false));
834 EXPECT_EQ(10, ReadData(entry
, 0, 0, buffer2
.get(), 10));
835 EXPECT_STREQ("the data", buffer2
->data());
836 EXPECT_EQ(10, entry
->GetDataSize(0));
838 EXPECT_EQ(2000, WriteData(entry
, 0, 0, buffer1
.get(), 2000, false));
839 EXPECT_EQ(2000, entry
->GetDataSize(0));
840 EXPECT_EQ(2000, ReadData(entry
, 0, 0, buffer2
.get(), 2000));
841 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 2000));
843 EXPECT_EQ(20000, WriteData(entry
, 0, 0, buffer1
.get(), kSize
, false));
844 EXPECT_EQ(20000, entry
->GetDataSize(0));
845 EXPECT_EQ(20000, ReadData(entry
, 0, 0, buffer2
.get(), kSize
));
846 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), kSize
));
849 memset(buffer2
->data(), 0, kSize
);
850 std::string
key2("Second key");
851 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry
));
852 EXPECT_EQ(10, WriteData(entry
, 0, 0, buffer1
.get(), 10, false));
853 EXPECT_EQ(10, entry
->GetDataSize(0));
856 // Go from an internal address to a bigger block size.
857 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
858 EXPECT_EQ(2000, WriteData(entry
, 0, 0, buffer1
.get(), 2000, false));
859 EXPECT_EQ(2000, entry
->GetDataSize(0));
860 EXPECT_EQ(2000, ReadData(entry
, 0, 0, buffer2
.get(), 2000));
861 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 2000));
863 memset(buffer2
->data(), 0, kSize
);
865 // Go from an internal address to an external one.
866 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
867 EXPECT_EQ(20000, WriteData(entry
, 0, 0, buffer1
.get(), kSize
, false));
868 EXPECT_EQ(20000, entry
->GetDataSize(0));
869 EXPECT_EQ(20000, ReadData(entry
, 0, 0, buffer2
.get(), kSize
));
870 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), kSize
));
873 // Double check the size from disk.
874 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
875 EXPECT_EQ(20000, entry
->GetDataSize(0));
877 // Now extend the entry without actual data.
878 EXPECT_EQ(0, WriteData(entry
, 0, 45500, buffer1
.get(), 0, false));
881 // And check again from disk.
882 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
883 EXPECT_EQ(45500, entry
->GetDataSize(0));
887 TEST_F(DiskCacheEntryTest
, GrowData
) {
892 TEST_F(DiskCacheEntryTest
, GrowDataNoBuffer
) {
894 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
898 TEST_F(DiskCacheEntryTest
, MemoryOnlyGrowData
) {
904 void DiskCacheEntryTest::TruncateData() {
905 std::string
key("the first key");
906 disk_cache::Entry
* entry
;
907 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
909 const int kSize1
= 20000;
910 const int kSize2
= 20000;
911 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
912 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
914 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
915 memset(buffer2
->data(), 0, kSize2
);
917 // Simple truncation:
918 EXPECT_EQ(200, WriteData(entry
, 0, 0, buffer1
.get(), 200, false));
919 EXPECT_EQ(200, entry
->GetDataSize(0));
920 EXPECT_EQ(100, WriteData(entry
, 0, 0, buffer1
.get(), 100, false));
921 EXPECT_EQ(200, entry
->GetDataSize(0));
922 EXPECT_EQ(100, WriteData(entry
, 0, 0, buffer1
.get(), 100, true));
923 EXPECT_EQ(100, entry
->GetDataSize(0));
924 EXPECT_EQ(0, WriteData(entry
, 0, 50, buffer1
.get(), 0, true));
925 EXPECT_EQ(50, entry
->GetDataSize(0));
926 EXPECT_EQ(0, WriteData(entry
, 0, 0, buffer1
.get(), 0, true));
927 EXPECT_EQ(0, entry
->GetDataSize(0));
929 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
931 // Go to an external file.
932 EXPECT_EQ(20000, WriteData(entry
, 0, 0, buffer1
.get(), 20000, true));
933 EXPECT_EQ(20000, entry
->GetDataSize(0));
934 EXPECT_EQ(20000, ReadData(entry
, 0, 0, buffer2
.get(), 20000));
935 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 20000));
936 memset(buffer2
->data(), 0, kSize2
);
938 // External file truncation
939 EXPECT_EQ(18000, WriteData(entry
, 0, 0, buffer1
.get(), 18000, false));
940 EXPECT_EQ(20000, entry
->GetDataSize(0));
941 EXPECT_EQ(18000, WriteData(entry
, 0, 0, buffer1
.get(), 18000, true));
942 EXPECT_EQ(18000, entry
->GetDataSize(0));
943 EXPECT_EQ(0, WriteData(entry
, 0, 17500, buffer1
.get(), 0, true));
944 EXPECT_EQ(17500, entry
->GetDataSize(0));
946 // And back to an internal block.
947 EXPECT_EQ(600, WriteData(entry
, 0, 1000, buffer1
.get(), 600, true));
948 EXPECT_EQ(1600, entry
->GetDataSize(0));
949 EXPECT_EQ(600, ReadData(entry
, 0, 1000, buffer2
.get(), 600));
950 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 600));
951 EXPECT_EQ(1000, ReadData(entry
, 0, 0, buffer2
.get(), 1000));
952 EXPECT_TRUE(!memcmp(buffer1
->data(), buffer2
->data(), 1000))
953 << "Preserves previous data";
955 // Go from external file to zero length.
956 EXPECT_EQ(20000, WriteData(entry
, 0, 0, buffer1
.get(), 20000, true));
957 EXPECT_EQ(20000, entry
->GetDataSize(0));
958 EXPECT_EQ(0, WriteData(entry
, 0, 0, buffer1
.get(), 0, true));
959 EXPECT_EQ(0, entry
->GetDataSize(0));
964 TEST_F(DiskCacheEntryTest
, TruncateData
) {
969 TEST_F(DiskCacheEntryTest
, TruncateDataNoBuffer
) {
971 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
975 TEST_F(DiskCacheEntryTest
, MemoryOnlyTruncateData
) {
981 void DiskCacheEntryTest::ZeroLengthIO() {
982 std::string
key("the first key");
983 disk_cache::Entry
* entry
;
984 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
986 EXPECT_EQ(0, ReadData(entry
, 0, 0, NULL
, 0));
987 EXPECT_EQ(0, WriteData(entry
, 0, 0, NULL
, 0, false));
989 // This write should extend the entry.
990 EXPECT_EQ(0, WriteData(entry
, 0, 1000, NULL
, 0, false));
991 EXPECT_EQ(0, ReadData(entry
, 0, 500, NULL
, 0));
992 EXPECT_EQ(0, ReadData(entry
, 0, 2000, NULL
, 0));
993 EXPECT_EQ(1000, entry
->GetDataSize(0));
995 EXPECT_EQ(0, WriteData(entry
, 0, 100000, NULL
, 0, true));
996 EXPECT_EQ(0, ReadData(entry
, 0, 50000, NULL
, 0));
997 EXPECT_EQ(100000, entry
->GetDataSize(0));
999 // Let's verify the actual content.
1000 const int kSize
= 20;
1001 const char zeros
[kSize
] = {};
1002 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1004 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1005 EXPECT_EQ(kSize
, ReadData(entry
, 0, 500, buffer
.get(), kSize
));
1006 EXPECT_TRUE(!memcmp(buffer
->data(), zeros
, kSize
));
1008 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1009 EXPECT_EQ(kSize
, ReadData(entry
, 0, 5000, buffer
.get(), kSize
));
1010 EXPECT_TRUE(!memcmp(buffer
->data(), zeros
, kSize
));
1012 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1013 EXPECT_EQ(kSize
, ReadData(entry
, 0, 50000, buffer
.get(), kSize
));
1014 EXPECT_TRUE(!memcmp(buffer
->data(), zeros
, kSize
));
1019 TEST_F(DiskCacheEntryTest
, ZeroLengthIO
) {
1024 TEST_F(DiskCacheEntryTest
, ZeroLengthIONoBuffer
) {
1026 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
1030 TEST_F(DiskCacheEntryTest
, MemoryOnlyZeroLengthIO
) {
1031 SetMemoryOnlyMode();
1036 // Tests that we handle the content correctly when buffering, a feature of the
1037 // standard cache that permits fast responses to certain reads.
1038 void DiskCacheEntryTest::Buffering() {
1039 std::string
key("the first key");
1040 disk_cache::Entry
* entry
;
1041 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1043 const int kSize
= 200;
1044 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
1045 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
1046 CacheTestFillBuffer(buffer1
->data(), kSize
, true);
1047 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1049 EXPECT_EQ(kSize
, WriteData(entry
, 1, 0, buffer1
.get(), kSize
, false));
1052 // Write a little more and read what we wrote before.
1053 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1054 EXPECT_EQ(kSize
, WriteData(entry
, 1, 5000, buffer1
.get(), kSize
, false));
1055 EXPECT_EQ(kSize
, ReadData(entry
, 1, 0, buffer2
.get(), kSize
));
1056 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1058 // Now go to an external file.
1059 EXPECT_EQ(kSize
, WriteData(entry
, 1, 18000, buffer1
.get(), kSize
, false));
1062 // Write something else and verify old data.
1063 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1064 EXPECT_EQ(kSize
, WriteData(entry
, 1, 10000, buffer1
.get(), kSize
, false));
1065 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1066 EXPECT_EQ(kSize
, ReadData(entry
, 1, 5000, buffer2
.get(), kSize
));
1067 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1068 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1069 EXPECT_EQ(kSize
, ReadData(entry
, 1, 0, buffer2
.get(), kSize
));
1070 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1071 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1072 EXPECT_EQ(kSize
, ReadData(entry
, 1, 18000, buffer2
.get(), kSize
));
1073 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1075 // Extend the file some more.
1076 EXPECT_EQ(kSize
, WriteData(entry
, 1, 23000, buffer1
.get(), kSize
, false));
1079 // And now make sure that we can deal with data in both places (ram/disk).
1080 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1081 EXPECT_EQ(kSize
, WriteData(entry
, 1, 17000, buffer1
.get(), kSize
, false));
1083 // We should not overwrite the data at 18000 with this.
1084 EXPECT_EQ(kSize
, WriteData(entry
, 1, 19000, buffer1
.get(), kSize
, false));
1085 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1086 EXPECT_EQ(kSize
, ReadData(entry
, 1, 18000, buffer2
.get(), kSize
));
1087 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1088 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1089 EXPECT_EQ(kSize
, ReadData(entry
, 1, 17000, buffer2
.get(), kSize
));
1090 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1092 EXPECT_EQ(kSize
, WriteData(entry
, 1, 22900, buffer1
.get(), kSize
, false));
1093 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1094 EXPECT_EQ(100, ReadData(entry
, 1, 23000, buffer2
.get(), kSize
));
1095 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data() + 100, 100));
1097 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1098 EXPECT_EQ(100, ReadData(entry
, 1, 23100, buffer2
.get(), kSize
));
1099 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data() + 100, 100));
1101 // Extend the file again and read before without closing the entry.
1102 EXPECT_EQ(kSize
, WriteData(entry
, 1, 25000, buffer1
.get(), kSize
, false));
1103 EXPECT_EQ(kSize
, WriteData(entry
, 1, 45000, buffer1
.get(), kSize
, false));
1104 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1105 EXPECT_EQ(kSize
, ReadData(entry
, 1, 25000, buffer2
.get(), kSize
));
1106 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1107 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1108 EXPECT_EQ(kSize
, ReadData(entry
, 1, 45000, buffer2
.get(), kSize
));
1109 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data(), kSize
));
1114 TEST_F(DiskCacheEntryTest
, Buffering
) {
1119 TEST_F(DiskCacheEntryTest
, BufferingNoBuffer
) {
1121 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
1125 // Checks that entries are zero length when created.
1126 void DiskCacheEntryTest::SizeAtCreate() {
1127 const char key
[] = "the first key";
1128 disk_cache::Entry
* entry
;
1129 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1131 const int kNumStreams
= 3;
1132 for (int i
= 0; i
< kNumStreams
; ++i
)
1133 EXPECT_EQ(0, entry
->GetDataSize(i
));
1137 TEST_F(DiskCacheEntryTest
, SizeAtCreate
) {
1142 TEST_F(DiskCacheEntryTest
, MemoryOnlySizeAtCreate
) {
1143 SetMemoryOnlyMode();
1148 // Some extra tests to make sure that buffering works properly when changing
1150 void DiskCacheEntryTest::SizeChanges() {
1151 std::string
key("the first key");
1152 disk_cache::Entry
* entry
;
1153 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1155 const int kSize
= 200;
1156 const char zeros
[kSize
] = {};
1157 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
1158 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
1159 CacheTestFillBuffer(buffer1
->data(), kSize
, true);
1160 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1162 EXPECT_EQ(kSize
, WriteData(entry
, 1, 0, buffer1
.get(), kSize
, true));
1163 EXPECT_EQ(kSize
, WriteData(entry
, 1, 17000, buffer1
.get(), kSize
, true));
1164 EXPECT_EQ(kSize
, WriteData(entry
, 1, 23000, buffer1
.get(), kSize
, true));
1167 // Extend the file and read between the old size and the new write.
1168 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1169 EXPECT_EQ(23000 + kSize
, entry
->GetDataSize(1));
1170 EXPECT_EQ(kSize
, WriteData(entry
, 1, 25000, buffer1
.get(), kSize
, true));
1171 EXPECT_EQ(25000 + kSize
, entry
->GetDataSize(1));
1172 EXPECT_EQ(kSize
, ReadData(entry
, 1, 24000, buffer2
.get(), kSize
));
1173 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, kSize
));
1175 // Read at the end of the old file size.
1177 ReadData(entry
, 1, 23000 + kSize
- 35, buffer2
.get(), kSize
));
1178 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data() + kSize
- 35, 35));
1180 // Read slightly before the last write.
1181 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1182 EXPECT_EQ(kSize
, ReadData(entry
, 1, 24900, buffer2
.get(), kSize
));
1183 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, 100));
1184 EXPECT_TRUE(!memcmp(buffer2
->data() + 100, buffer1
->data(), kSize
- 100));
1186 // Extend the entry a little more.
1187 EXPECT_EQ(kSize
, WriteData(entry
, 1, 26000, buffer1
.get(), kSize
, true));
1188 EXPECT_EQ(26000 + kSize
, entry
->GetDataSize(1));
1189 CacheTestFillBuffer(buffer2
->data(), kSize
, true);
1190 EXPECT_EQ(kSize
, ReadData(entry
, 1, 25900, buffer2
.get(), kSize
));
1191 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, 100));
1192 EXPECT_TRUE(!memcmp(buffer2
->data() + 100, buffer1
->data(), kSize
- 100));
1194 // And now reduce the size.
1195 EXPECT_EQ(kSize
, WriteData(entry
, 1, 25000, buffer1
.get(), kSize
, true));
1196 EXPECT_EQ(25000 + kSize
, entry
->GetDataSize(1));
1197 EXPECT_EQ(28, ReadData(entry
, 1, 25000 + kSize
- 28, buffer2
.get(), kSize
));
1198 EXPECT_TRUE(!memcmp(buffer2
->data(), buffer1
->data() + kSize
- 28, 28));
1200 // Reduce the size with a buffer that is not extending the size.
1201 EXPECT_EQ(kSize
, WriteData(entry
, 1, 24000, buffer1
.get(), kSize
, false));
1202 EXPECT_EQ(25000 + kSize
, entry
->GetDataSize(1));
1203 EXPECT_EQ(kSize
, WriteData(entry
, 1, 24500, buffer1
.get(), kSize
, true));
1204 EXPECT_EQ(24500 + kSize
, entry
->GetDataSize(1));
1205 EXPECT_EQ(kSize
, ReadData(entry
, 1, 23900, buffer2
.get(), kSize
));
1206 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, 100));
1207 EXPECT_TRUE(!memcmp(buffer2
->data() + 100, buffer1
->data(), kSize
- 100));
1209 // And now reduce the size below the old size.
1210 EXPECT_EQ(kSize
, WriteData(entry
, 1, 19000, buffer1
.get(), kSize
, true));
1211 EXPECT_EQ(19000 + kSize
, entry
->GetDataSize(1));
1212 EXPECT_EQ(kSize
, ReadData(entry
, 1, 18900, buffer2
.get(), kSize
));
1213 EXPECT_TRUE(!memcmp(buffer2
->data(), zeros
, 100));
1214 EXPECT_TRUE(!memcmp(buffer2
->data() + 100, buffer1
->data(), kSize
- 100));
1216 // Verify that the actual file is truncated.
1218 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1219 EXPECT_EQ(19000 + kSize
, entry
->GetDataSize(1));
1221 // Extend the newly opened file with a zero length write, expect zero fill.
1222 EXPECT_EQ(0, WriteData(entry
, 1, 20000 + kSize
, buffer1
.get(), 0, false));
1223 EXPECT_EQ(kSize
, ReadData(entry
, 1, 19000 + kSize
, buffer1
.get(), kSize
));
1224 EXPECT_EQ(0, memcmp(buffer1
->data(), zeros
, kSize
));
1229 TEST_F(DiskCacheEntryTest
, SizeChanges
) {
1234 TEST_F(DiskCacheEntryTest
, SizeChangesNoBuffer
) {
1236 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
1240 // Write more than the total cache capacity but to a single entry. |size| is the
1241 // amount of bytes to write each time.
1242 void DiskCacheEntryTest::ReuseEntry(int size
) {
1243 std::string
key1("the first key");
1244 disk_cache::Entry
* entry
;
1245 ASSERT_EQ(net::OK
, CreateEntry(key1
, &entry
));
1248 std::string
key2("the second key");
1249 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry
));
1251 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(size
));
1252 CacheTestFillBuffer(buffer
->data(), size
, false);
1254 for (int i
= 0; i
< 15; i
++) {
1255 EXPECT_EQ(0, WriteData(entry
, 0, 0, buffer
.get(), 0, true));
1256 EXPECT_EQ(size
, WriteData(entry
, 0, 0, buffer
.get(), size
, false));
1258 ASSERT_EQ(net::OK
, OpenEntry(key2
, &entry
));
1262 ASSERT_EQ(net::OK
, OpenEntry(key1
, &entry
)) << "have not evicted this entry";
1266 TEST_F(DiskCacheEntryTest
, ReuseExternalEntry
) {
1267 SetMaxSize(200 * 1024);
1269 ReuseEntry(20 * 1024);
1272 TEST_F(DiskCacheEntryTest
, MemoryOnlyReuseExternalEntry
) {
1273 SetMemoryOnlyMode();
1274 SetMaxSize(200 * 1024);
1276 ReuseEntry(20 * 1024);
1279 TEST_F(DiskCacheEntryTest
, ReuseInternalEntry
) {
1280 SetMaxSize(100 * 1024);
1282 ReuseEntry(10 * 1024);
1285 TEST_F(DiskCacheEntryTest
, MemoryOnlyReuseInternalEntry
) {
1286 SetMemoryOnlyMode();
1287 SetMaxSize(100 * 1024);
1289 ReuseEntry(10 * 1024);
1292 // Reading somewhere that was not written should return zeros.
1293 void DiskCacheEntryTest::InvalidData() {
1294 std::string
key("the first key");
1295 disk_cache::Entry
* entry
;
1296 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1298 const int kSize1
= 20000;
1299 const int kSize2
= 20000;
1300 const int kSize3
= 20000;
1301 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
1302 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
1303 scoped_refptr
<net::IOBuffer
> buffer3(new net::IOBuffer(kSize3
));
1305 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
1306 memset(buffer2
->data(), 0, kSize2
);
1308 // Simple data grow:
1309 EXPECT_EQ(200, WriteData(entry
, 0, 400, buffer1
.get(), 200, false));
1310 EXPECT_EQ(600, entry
->GetDataSize(0));
1311 EXPECT_EQ(100, ReadData(entry
, 0, 300, buffer3
.get(), 100));
1312 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 100));
1314 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1316 // The entry is now on disk. Load it and extend it.
1317 EXPECT_EQ(200, WriteData(entry
, 0, 800, buffer1
.get(), 200, false));
1318 EXPECT_EQ(1000, entry
->GetDataSize(0));
1319 EXPECT_EQ(100, ReadData(entry
, 0, 700, buffer3
.get(), 100));
1320 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 100));
1322 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1324 // This time using truncate.
1325 EXPECT_EQ(200, WriteData(entry
, 0, 1800, buffer1
.get(), 200, true));
1326 EXPECT_EQ(2000, entry
->GetDataSize(0));
1327 EXPECT_EQ(100, ReadData(entry
, 0, 1500, buffer3
.get(), 100));
1328 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 100));
1330 // Go to an external file.
1331 EXPECT_EQ(200, WriteData(entry
, 0, 19800, buffer1
.get(), 200, false));
1332 EXPECT_EQ(20000, entry
->GetDataSize(0));
1333 EXPECT_EQ(4000, ReadData(entry
, 0, 14000, buffer3
.get(), 4000));
1334 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 4000));
1336 // And back to an internal block.
1337 EXPECT_EQ(600, WriteData(entry
, 0, 1000, buffer1
.get(), 600, true));
1338 EXPECT_EQ(1600, entry
->GetDataSize(0));
1339 EXPECT_EQ(600, ReadData(entry
, 0, 1000, buffer3
.get(), 600));
1340 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer1
->data(), 600));
1343 EXPECT_EQ(600, WriteData(entry
, 0, 2000, buffer1
.get(), 600, false));
1344 EXPECT_EQ(2600, entry
->GetDataSize(0));
1345 EXPECT_EQ(200, ReadData(entry
, 0, 1800, buffer3
.get(), 200));
1346 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 200));
1348 // And again (with truncation flag).
1349 EXPECT_EQ(600, WriteData(entry
, 0, 3000, buffer1
.get(), 600, true));
1350 EXPECT_EQ(3600, entry
->GetDataSize(0));
1351 EXPECT_EQ(200, ReadData(entry
, 0, 2800, buffer3
.get(), 200));
1352 EXPECT_TRUE(!memcmp(buffer3
->data(), buffer2
->data(), 200));
1357 TEST_F(DiskCacheEntryTest
, InvalidData
) {
1362 TEST_F(DiskCacheEntryTest
, InvalidDataNoBuffer
) {
1364 cache_impl_
->SetFlags(disk_cache::kNoBuffering
);
1368 TEST_F(DiskCacheEntryTest
, MemoryOnlyInvalidData
) {
1369 SetMemoryOnlyMode();
1374 // Tests that the cache preserves the buffer of an IO operation.
1375 void DiskCacheEntryTest::ReadWriteDestroyBuffer() {
1376 std::string
key("the first key");
1377 disk_cache::Entry
* entry
;
1378 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1380 const int kSize
= 200;
1381 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1382 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1384 net::TestCompletionCallback cb
;
1385 EXPECT_EQ(net::ERR_IO_PENDING
,
1386 entry
->WriteData(0, 0, buffer
.get(), kSize
, cb
.callback(), false));
1388 // Release our reference to the buffer.
1390 EXPECT_EQ(kSize
, cb
.WaitForResult());
1392 // And now test with a Read().
1393 buffer
= new net::IOBuffer(kSize
);
1394 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1396 EXPECT_EQ(net::ERR_IO_PENDING
,
1397 entry
->ReadData(0, 0, buffer
.get(), kSize
, cb
.callback()));
1399 EXPECT_EQ(kSize
, cb
.WaitForResult());
1404 TEST_F(DiskCacheEntryTest
, ReadWriteDestroyBuffer
) {
1406 ReadWriteDestroyBuffer();
1409 void DiskCacheEntryTest::DoomNormalEntry() {
1410 std::string
key("the first key");
1411 disk_cache::Entry
* entry
;
1412 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1416 const int kSize
= 20000;
1417 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1418 CacheTestFillBuffer(buffer
->data(), kSize
, true);
1419 buffer
->data()[19999] = '\0';
1421 key
= buffer
->data();
1422 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1423 EXPECT_EQ(20000, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1424 EXPECT_EQ(20000, WriteData(entry
, 1, 0, buffer
.get(), kSize
, false));
1428 FlushQueueForTest();
1429 EXPECT_EQ(0, cache_
->GetEntryCount());
1432 TEST_F(DiskCacheEntryTest
, DoomEntry
) {
1437 TEST_F(DiskCacheEntryTest
, MemoryOnlyDoomEntry
) {
1438 SetMemoryOnlyMode();
1443 // Tests dooming an entry that's linked to an open entry.
1444 void DiskCacheEntryTest::DoomEntryNextToOpenEntry() {
1445 disk_cache::Entry
* entry1
;
1446 disk_cache::Entry
* entry2
;
1447 ASSERT_EQ(net::OK
, CreateEntry("fixed", &entry1
));
1449 ASSERT_EQ(net::OK
, CreateEntry("foo", &entry1
));
1451 ASSERT_EQ(net::OK
, CreateEntry("bar", &entry1
));
1454 ASSERT_EQ(net::OK
, OpenEntry("foo", &entry1
));
1455 ASSERT_EQ(net::OK
, OpenEntry("bar", &entry2
));
1459 ASSERT_EQ(net::OK
, OpenEntry("foo", &entry2
));
1464 ASSERT_EQ(net::OK
, OpenEntry("fixed", &entry1
));
1468 TEST_F(DiskCacheEntryTest
, DoomEntryNextToOpenEntry
) {
1470 DoomEntryNextToOpenEntry();
1473 TEST_F(DiskCacheEntryTest
, NewEvictionDoomEntryNextToOpenEntry
) {
1476 DoomEntryNextToOpenEntry();
1479 TEST_F(DiskCacheEntryTest
, AppCacheDoomEntryNextToOpenEntry
) {
1480 SetCacheType(net::APP_CACHE
);
1482 DoomEntryNextToOpenEntry();
1485 // Verify that basic operations work as expected with doomed entries.
1486 void DiskCacheEntryTest::DoomedEntry() {
1487 std::string
key("the first key");
1488 disk_cache::Entry
* entry
;
1489 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1492 FlushQueueForTest();
1493 EXPECT_EQ(0, cache_
->GetEntryCount());
1494 Time initial
= Time::Now();
1497 const int kSize1
= 2000;
1498 const int kSize2
= 2000;
1499 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
1500 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
1501 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
1502 memset(buffer2
->data(), 0, kSize2
);
1504 EXPECT_EQ(2000, WriteData(entry
, 0, 0, buffer1
.get(), 2000, false));
1505 EXPECT_EQ(2000, ReadData(entry
, 0, 0, buffer2
.get(), 2000));
1506 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer2
->data(), kSize1
));
1507 EXPECT_EQ(key
, entry
->GetKey());
1508 EXPECT_TRUE(initial
< entry
->GetLastModified());
1509 EXPECT_TRUE(initial
< entry
->GetLastUsed());
1514 TEST_F(DiskCacheEntryTest
, DoomedEntry
) {
1519 TEST_F(DiskCacheEntryTest
, MemoryOnlyDoomedEntry
) {
1520 SetMemoryOnlyMode();
1525 // Tests that we discard entries if the data is missing.
1526 TEST_F(DiskCacheEntryTest
, MissingData
) {
1529 std::string
key("the first key");
1530 disk_cache::Entry
* entry
;
1531 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1533 // Write to an external file.
1534 const int kSize
= 20000;
1535 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1536 CacheTestFillBuffer(buffer
->data(), kSize
, false);
1537 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1539 FlushQueueForTest();
1541 disk_cache::Addr
address(0x80000001);
1542 base::FilePath name
= cache_impl_
->GetFileName(address
);
1543 EXPECT_TRUE(base::DeleteFile(name
, false));
1545 // Attempt to read the data.
1546 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1547 EXPECT_EQ(net::ERR_FILE_NOT_FOUND
,
1548 ReadData(entry
, 0, 0, buffer
.get(), kSize
));
1551 // The entry should be gone.
1552 ASSERT_NE(net::OK
, OpenEntry(key
, &entry
));
1555 // Test that child entries in a memory cache backend are not visible from
1557 TEST_F(DiskCacheEntryTest
, MemoryOnlyEnumerationWithSparseEntries
) {
1558 SetMemoryOnlyMode();
1561 const int kSize
= 4096;
1562 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1563 CacheTestFillBuffer(buf
->data(), kSize
, false);
1565 std::string
key("the first key");
1566 disk_cache::Entry
* parent_entry
;
1567 ASSERT_EQ(net::OK
, CreateEntry(key
, &parent_entry
));
1569 // Writes to the parent entry.
1571 parent_entry
->WriteSparseData(
1572 0, buf
.get(), kSize
, net::CompletionCallback()));
1574 // This write creates a child entry and writes to it.
1576 parent_entry
->WriteSparseData(
1577 8192, buf
.get(), kSize
, net::CompletionCallback()));
1579 parent_entry
->Close();
1581 // Perform the enumerations.
1583 disk_cache::Entry
* entry
= NULL
;
1585 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
1586 ASSERT_TRUE(entry
!= NULL
);
1588 disk_cache::MemEntryImpl
* mem_entry
=
1589 reinterpret_cast<disk_cache::MemEntryImpl
*>(entry
);
1590 EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry
, mem_entry
->type());
1593 EXPECT_EQ(1, count
);
1596 // Writes |buf_1| to offset and reads it back as |buf_2|.
1597 void VerifySparseIO(disk_cache::Entry
* entry
, int64 offset
,
1598 net::IOBuffer
* buf_1
, int size
, net::IOBuffer
* buf_2
) {
1599 net::TestCompletionCallback cb
;
1601 memset(buf_2
->data(), 0, size
);
1602 int ret
= entry
->ReadSparseData(offset
, buf_2
, size
, cb
.callback());
1603 EXPECT_EQ(0, cb
.GetResult(ret
));
1605 ret
= entry
->WriteSparseData(offset
, buf_1
, size
, cb
.callback());
1606 EXPECT_EQ(size
, cb
.GetResult(ret
));
1608 ret
= entry
->ReadSparseData(offset
, buf_2
, size
, cb
.callback());
1609 EXPECT_EQ(size
, cb
.GetResult(ret
));
1611 EXPECT_EQ(0, memcmp(buf_1
->data(), buf_2
->data(), size
));
1614 // Reads |size| bytes from |entry| at |offset| and verifies that they are the
1615 // same as the content of the provided |buffer|.
1616 void VerifyContentSparseIO(disk_cache::Entry
* entry
, int64 offset
, char* buffer
,
1618 net::TestCompletionCallback cb
;
1620 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(size
));
1621 memset(buf_1
->data(), 0, size
);
1622 int ret
= entry
->ReadSparseData(offset
, buf_1
.get(), size
, cb
.callback());
1623 EXPECT_EQ(size
, cb
.GetResult(ret
));
1624 EXPECT_EQ(0, memcmp(buf_1
->data(), buffer
, size
));
1627 void DiskCacheEntryTest::BasicSparseIO() {
1628 std::string
key("the first key");
1629 disk_cache::Entry
* entry
;
1630 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1632 const int kSize
= 2048;
1633 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1634 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1635 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1637 // Write at offset 0.
1638 VerifySparseIO(entry
, 0, buf_1
.get(), kSize
, buf_2
.get());
1640 // Write at offset 0x400000 (4 MB).
1641 VerifySparseIO(entry
, 0x400000, buf_1
.get(), kSize
, buf_2
.get());
1643 // Write at offset 0x800000000 (32 GB).
1644 VerifySparseIO(entry
, 0x800000000LL
, buf_1
.get(), kSize
, buf_2
.get());
1648 // Check everything again.
1649 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1650 VerifyContentSparseIO(entry
, 0, buf_1
->data(), kSize
);
1651 VerifyContentSparseIO(entry
, 0x400000, buf_1
->data(), kSize
);
1652 VerifyContentSparseIO(entry
, 0x800000000LL
, buf_1
->data(), kSize
);
1656 TEST_F(DiskCacheEntryTest
, BasicSparseIO
) {
1661 TEST_F(DiskCacheEntryTest
, MemoryOnlyBasicSparseIO
) {
1662 SetMemoryOnlyMode();
1667 void DiskCacheEntryTest::HugeSparseIO() {
1668 std::string
key("the first key");
1669 disk_cache::Entry
* entry
;
1670 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1672 // Write 1.2 MB so that we cover multiple entries.
1673 const int kSize
= 1200 * 1024;
1674 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1675 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1676 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1678 // Write at offset 0x20F0000 (33 MB - 64 KB).
1679 VerifySparseIO(entry
, 0x20F0000, buf_1
.get(), kSize
, buf_2
.get());
1683 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1684 VerifyContentSparseIO(entry
, 0x20F0000, buf_1
->data(), kSize
);
1688 TEST_F(DiskCacheEntryTest
, HugeSparseIO
) {
1693 TEST_F(DiskCacheEntryTest
, MemoryOnlyHugeSparseIO
) {
1694 SetMemoryOnlyMode();
1699 void DiskCacheEntryTest::GetAvailableRange() {
1700 std::string
key("the first key");
1701 disk_cache::Entry
* entry
;
1702 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1704 const int kSize
= 16 * 1024;
1705 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1706 CacheTestFillBuffer(buf
->data(), kSize
, false);
1708 // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
1709 EXPECT_EQ(kSize
, WriteSparseData(entry
, 0x20F0000, buf
.get(), kSize
));
1710 EXPECT_EQ(kSize
, WriteSparseData(entry
, 0x20F4400, buf
.get(), kSize
));
1712 // We stop at the first empty block.
1714 net::TestCompletionCallback cb
;
1715 int rv
= entry
->GetAvailableRange(
1716 0x20F0000, kSize
* 2, &start
, cb
.callback());
1717 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1718 EXPECT_EQ(0x20F0000, start
);
1721 rv
= entry
->GetAvailableRange(0, kSize
, &start
, cb
.callback());
1722 EXPECT_EQ(0, cb
.GetResult(rv
));
1723 rv
= entry
->GetAvailableRange(
1724 0x20F0000 - kSize
, kSize
, &start
, cb
.callback());
1725 EXPECT_EQ(0, cb
.GetResult(rv
));
1726 rv
= entry
->GetAvailableRange(0, 0x2100000, &start
, cb
.callback());
1727 EXPECT_EQ(kSize
, cb
.GetResult(rv
));
1728 EXPECT_EQ(0x20F0000, start
);
1730 // We should be able to Read based on the results of GetAvailableRange.
1732 rv
= entry
->GetAvailableRange(0x2100000, kSize
, &start
, cb
.callback());
1733 EXPECT_EQ(0, cb
.GetResult(rv
));
1734 rv
= entry
->ReadSparseData(start
, buf
.get(), kSize
, cb
.callback());
1735 EXPECT_EQ(0, cb
.GetResult(rv
));
1738 rv
= entry
->GetAvailableRange(0x20F2000, kSize
, &start
, cb
.callback());
1739 EXPECT_EQ(0x2000, cb
.GetResult(rv
));
1740 EXPECT_EQ(0x20F2000, start
);
1741 EXPECT_EQ(0x2000, ReadSparseData(entry
, start
, buf
.get(), kSize
));
1743 // Make sure that we respect the |len| argument.
1745 rv
= entry
->GetAvailableRange(
1746 0x20F0001 - kSize
, kSize
, &start
, cb
.callback());
1747 EXPECT_EQ(1, cb
.GetResult(rv
));
1748 EXPECT_EQ(0x20F0000, start
);
1753 TEST_F(DiskCacheEntryTest
, GetAvailableRange
) {
1755 GetAvailableRange();
1758 TEST_F(DiskCacheEntryTest
, MemoryOnlyGetAvailableRange
) {
1759 SetMemoryOnlyMode();
1761 GetAvailableRange();
1764 void DiskCacheEntryTest::CouldBeSparse() {
1765 std::string
key("the first key");
1766 disk_cache::Entry
* entry
;
1767 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1769 const int kSize
= 16 * 1024;
1770 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1771 CacheTestFillBuffer(buf
->data(), kSize
, false);
1773 // Write at offset 0x20F0000 (33 MB - 64 KB).
1774 EXPECT_EQ(kSize
, WriteSparseData(entry
, 0x20F0000, buf
.get(), kSize
));
1776 EXPECT_TRUE(entry
->CouldBeSparse());
1779 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1780 EXPECT_TRUE(entry
->CouldBeSparse());
1783 // Now verify a regular entry.
1784 key
.assign("another key");
1785 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1786 EXPECT_FALSE(entry
->CouldBeSparse());
1788 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buf
.get(), kSize
, false));
1789 EXPECT_EQ(kSize
, WriteData(entry
, 1, 0, buf
.get(), kSize
, false));
1790 EXPECT_EQ(kSize
, WriteData(entry
, 2, 0, buf
.get(), kSize
, false));
1792 EXPECT_FALSE(entry
->CouldBeSparse());
1795 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1796 EXPECT_FALSE(entry
->CouldBeSparse());
1800 TEST_F(DiskCacheEntryTest
, CouldBeSparse
) {
1805 TEST_F(DiskCacheEntryTest
, MemoryCouldBeSparse
) {
1806 SetMemoryOnlyMode();
1811 TEST_F(DiskCacheEntryTest
, MemoryOnlyMisalignedSparseIO
) {
1812 SetMemoryOnlyMode();
1815 const int kSize
= 8192;
1816 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1817 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1818 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1820 std::string
key("the first key");
1821 disk_cache::Entry
* entry
;
1822 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1824 // This loop writes back to back starting from offset 0 and 9000.
1825 for (int i
= 0; i
< kSize
; i
+= 1024) {
1826 scoped_refptr
<net::WrappedIOBuffer
> buf_3(
1827 new net::WrappedIOBuffer(buf_1
->data() + i
));
1828 VerifySparseIO(entry
, i
, buf_3
.get(), 1024, buf_2
.get());
1829 VerifySparseIO(entry
, 9000 + i
, buf_3
.get(), 1024, buf_2
.get());
1832 // Make sure we have data written.
1833 VerifyContentSparseIO(entry
, 0, buf_1
->data(), kSize
);
1834 VerifyContentSparseIO(entry
, 9000, buf_1
->data(), kSize
);
1836 // This tests a large write that spans 3 entries from a misaligned offset.
1837 VerifySparseIO(entry
, 20481, buf_1
.get(), 8192, buf_2
.get());
1842 TEST_F(DiskCacheEntryTest
, MemoryOnlyMisalignedGetAvailableRange
) {
1843 SetMemoryOnlyMode();
1846 const int kSize
= 8192;
1847 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1848 CacheTestFillBuffer(buf
->data(), kSize
, false);
1850 disk_cache::Entry
* entry
;
1851 std::string
key("the first key");
1852 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1854 // Writes in the middle of an entry.
1857 entry
->WriteSparseData(0, buf
.get(), 1024, net::CompletionCallback()));
1860 entry
->WriteSparseData(5120, buf
.get(), 1024, net::CompletionCallback()));
1862 entry
->WriteSparseData(
1863 10000, buf
.get(), 1024, net::CompletionCallback()));
1865 // Writes in the middle of an entry and spans 2 child entries.
1867 entry
->WriteSparseData(
1868 50000, buf
.get(), 8192, net::CompletionCallback()));
1871 net::TestCompletionCallback cb
;
1872 // Test that we stop at a discontinuous child at the second block.
1873 int rv
= entry
->GetAvailableRange(0, 10000, &start
, cb
.callback());
1874 EXPECT_EQ(1024, cb
.GetResult(rv
));
1875 EXPECT_EQ(0, start
);
1877 // Test that number of bytes is reported correctly when we start from the
1878 // middle of a filled region.
1879 rv
= entry
->GetAvailableRange(512, 10000, &start
, cb
.callback());
1880 EXPECT_EQ(512, cb
.GetResult(rv
));
1881 EXPECT_EQ(512, start
);
1883 // Test that we found bytes in the child of next block.
1884 rv
= entry
->GetAvailableRange(1024, 10000, &start
, cb
.callback());
1885 EXPECT_EQ(1024, cb
.GetResult(rv
));
1886 EXPECT_EQ(5120, start
);
1888 // Test that the desired length is respected. It starts within a filled
1890 rv
= entry
->GetAvailableRange(5500, 512, &start
, cb
.callback());
1891 EXPECT_EQ(512, cb
.GetResult(rv
));
1892 EXPECT_EQ(5500, start
);
1894 // Test that the desired length is respected. It starts before a filled
1896 rv
= entry
->GetAvailableRange(5000, 620, &start
, cb
.callback());
1897 EXPECT_EQ(500, cb
.GetResult(rv
));
1898 EXPECT_EQ(5120, start
);
1900 // Test that multiple blocks are scanned.
1901 rv
= entry
->GetAvailableRange(40000, 20000, &start
, cb
.callback());
1902 EXPECT_EQ(8192, cb
.GetResult(rv
));
1903 EXPECT_EQ(50000, start
);
1908 void DiskCacheEntryTest::UpdateSparseEntry() {
1909 std::string
key("the first key");
1910 disk_cache::Entry
* entry1
;
1911 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry1
));
1913 const int kSize
= 2048;
1914 scoped_refptr
<net::IOBuffer
> buf_1(new net::IOBuffer(kSize
));
1915 scoped_refptr
<net::IOBuffer
> buf_2(new net::IOBuffer(kSize
));
1916 CacheTestFillBuffer(buf_1
->data(), kSize
, false);
1918 // Write at offset 0.
1919 VerifySparseIO(entry1
, 0, buf_1
.get(), kSize
, buf_2
.get());
1922 // Write at offset 2048.
1923 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry1
));
1924 VerifySparseIO(entry1
, 2048, buf_1
.get(), kSize
, buf_2
.get());
1926 disk_cache::Entry
* entry2
;
1927 ASSERT_EQ(net::OK
, CreateEntry("the second key", &entry2
));
1931 FlushQueueForTest();
1933 EXPECT_EQ(2, cache_
->GetEntryCount());
1935 EXPECT_EQ(3, cache_
->GetEntryCount());
1938 TEST_F(DiskCacheEntryTest
, UpdateSparseEntry
) {
1939 SetCacheType(net::MEDIA_CACHE
);
1941 UpdateSparseEntry();
1944 TEST_F(DiskCacheEntryTest
, MemoryOnlyUpdateSparseEntry
) {
1945 SetMemoryOnlyMode();
1946 SetCacheType(net::MEDIA_CACHE
);
1948 UpdateSparseEntry();
1951 void DiskCacheEntryTest::DoomSparseEntry() {
1952 std::string
key1("the first key");
1953 std::string
key2("the second key");
1954 disk_cache::Entry
*entry1
, *entry2
;
1955 ASSERT_EQ(net::OK
, CreateEntry(key1
, &entry1
));
1956 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry2
));
1958 const int kSize
= 4 * 1024;
1959 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
1960 CacheTestFillBuffer(buf
->data(), kSize
, false);
1962 int64 offset
= 1024;
1963 // Write to a bunch of ranges.
1964 for (int i
= 0; i
< 12; i
++) {
1966 entry1
->WriteSparseData(
1967 offset
, buf
.get(), kSize
, net::CompletionCallback()));
1968 // Keep the second map under the default size.
1971 entry2
->WriteSparseData(
1972 offset
, buf
.get(), kSize
, net::CompletionCallback()));
1979 EXPECT_EQ(2, cache_
->GetEntryCount());
1981 EXPECT_EQ(15, cache_
->GetEntryCount());
1983 // Doom the first entry while it's still open.
1988 // Doom the second entry after it's fully saved.
1989 EXPECT_EQ(net::OK
, DoomEntry(key2
));
1991 // Make sure we do all needed work. This may fail for entry2 if between Close
1992 // and DoomEntry the system decides to remove all traces of the file from the
1993 // system cache so we don't see that there is pending IO.
1994 base::MessageLoop::current()->RunUntilIdle();
1997 EXPECT_EQ(0, cache_
->GetEntryCount());
1999 if (5 == cache_
->GetEntryCount()) {
2000 // Most likely we are waiting for the result of reading the sparse info
2001 // (it's always async on Posix so it is easy to miss). Unfortunately we
2002 // don't have any signal to watch for so we can only wait.
2003 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500));
2004 base::MessageLoop::current()->RunUntilIdle();
2006 EXPECT_EQ(0, cache_
->GetEntryCount());
2010 TEST_F(DiskCacheEntryTest
, DoomSparseEntry
) {
2016 TEST_F(DiskCacheEntryTest
, MemoryOnlyDoomSparseEntry
) {
2017 SetMemoryOnlyMode();
2022 // A CompletionCallback wrapper that deletes the cache from within the callback.
2023 // The way a CompletionCallback works means that all tasks (even new ones)
2024 // are executed by the message loop before returning to the caller so the only
2025 // way to simulate a race is to execute what we want on the callback.
2026 class SparseTestCompletionCallback
: public net::TestCompletionCallback
{
2028 explicit SparseTestCompletionCallback(scoped_ptr
<disk_cache::Backend
> cache
)
2029 : cache_(cache
.Pass()) {
2033 virtual void SetResult(int result
) OVERRIDE
{
2035 TestCompletionCallback::SetResult(result
);
2038 scoped_ptr
<disk_cache::Backend
> cache_
;
2039 DISALLOW_COPY_AND_ASSIGN(SparseTestCompletionCallback
);
2042 // Tests that we don't crash when the backend is deleted while we are working
2043 // deleting the sub-entries of a sparse entry.
2044 TEST_F(DiskCacheEntryTest
, DoomSparseEntry2
) {
2047 std::string
key("the key");
2048 disk_cache::Entry
* entry
;
2049 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2051 const int kSize
= 4 * 1024;
2052 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
2053 CacheTestFillBuffer(buf
->data(), kSize
, false);
2055 int64 offset
= 1024;
2056 // Write to a bunch of ranges.
2057 for (int i
= 0; i
< 12; i
++) {
2059 entry
->WriteSparseData(
2060 offset
, buf
.get(), kSize
, net::CompletionCallback()));
2063 EXPECT_EQ(9, cache_
->GetEntryCount());
2066 disk_cache::Backend
* cache
= cache_
.get();
2067 SparseTestCompletionCallback
cb(cache_
.Pass());
2068 int rv
= cache
->DoomEntry(key
, cb
.callback());
2069 EXPECT_EQ(net::ERR_IO_PENDING
, rv
);
2070 EXPECT_EQ(net::OK
, cb
.WaitForResult());
2073 void DiskCacheEntryTest::PartialSparseEntry() {
2074 std::string
key("the first key");
2075 disk_cache::Entry
* entry
;
2076 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2078 // We should be able to deal with IO that is not aligned to the block size
2079 // of a sparse entry, at least to write a big range without leaving holes.
2080 const int kSize
= 4 * 1024;
2081 const int kSmallSize
= 128;
2082 scoped_refptr
<net::IOBuffer
> buf1(new net::IOBuffer(kSize
));
2083 CacheTestFillBuffer(buf1
->data(), kSize
, false);
2085 // The first write is just to extend the entry. The third write occupies
2086 // a 1KB block partially, it may not be written internally depending on the
2088 EXPECT_EQ(kSize
, WriteSparseData(entry
, 20000, buf1
.get(), kSize
));
2089 EXPECT_EQ(kSize
, WriteSparseData(entry
, 500, buf1
.get(), kSize
));
2090 EXPECT_EQ(kSmallSize
,
2091 WriteSparseData(entry
, 1080321, buf1
.get(), kSmallSize
));
2093 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2095 scoped_refptr
<net::IOBuffer
> buf2(new net::IOBuffer(kSize
));
2096 memset(buf2
->data(), 0, kSize
);
2097 EXPECT_EQ(0, ReadSparseData(entry
, 8000, buf2
.get(), kSize
));
2099 EXPECT_EQ(500, ReadSparseData(entry
, kSize
, buf2
.get(), kSize
));
2100 EXPECT_EQ(0, memcmp(buf2
->data(), buf1
->data() + kSize
- 500, 500));
2101 EXPECT_EQ(0, ReadSparseData(entry
, 0, buf2
.get(), kSize
));
2103 // This read should not change anything.
2104 EXPECT_EQ(96, ReadSparseData(entry
, 24000, buf2
.get(), kSize
));
2105 EXPECT_EQ(500, ReadSparseData(entry
, kSize
, buf2
.get(), kSize
));
2106 EXPECT_EQ(0, ReadSparseData(entry
, 99, buf2
.get(), kSize
));
2110 net::TestCompletionCallback cb
;
2112 rv
= entry
->GetAvailableRange(0, 600, &start
, cb
.callback());
2113 EXPECT_EQ(100, cb
.GetResult(rv
));
2114 EXPECT_EQ(500, start
);
2116 rv
= entry
->GetAvailableRange(0, 2048, &start
, cb
.callback());
2117 EXPECT_EQ(1024, cb
.GetResult(rv
));
2118 EXPECT_EQ(1024, start
);
2120 rv
= entry
->GetAvailableRange(kSize
, kSize
, &start
, cb
.callback());
2121 EXPECT_EQ(500, cb
.GetResult(rv
));
2122 EXPECT_EQ(kSize
, start
);
2123 rv
= entry
->GetAvailableRange(20 * 1024, 10000, &start
, cb
.callback());
2124 EXPECT_EQ(3616, cb
.GetResult(rv
));
2125 EXPECT_EQ(20 * 1024, start
);
2127 // 1. Query before a filled 1KB block.
2128 // 2. Query within a filled 1KB block.
2129 // 3. Query beyond a filled 1KB block.
2131 rv
= entry
->GetAvailableRange(19400, kSize
, &start
, cb
.callback());
2132 EXPECT_EQ(3496, cb
.GetResult(rv
));
2133 EXPECT_EQ(20000, start
);
2135 rv
= entry
->GetAvailableRange(19400, kSize
, &start
, cb
.callback());
2136 EXPECT_EQ(3016, cb
.GetResult(rv
));
2137 EXPECT_EQ(20480, start
);
2139 rv
= entry
->GetAvailableRange(3073, kSize
, &start
, cb
.callback());
2140 EXPECT_EQ(1523, cb
.GetResult(rv
));
2141 EXPECT_EQ(3073, start
);
2142 rv
= entry
->GetAvailableRange(4600, kSize
, &start
, cb
.callback());
2143 EXPECT_EQ(0, cb
.GetResult(rv
));
2144 EXPECT_EQ(4600, start
);
2146 // Now make another write and verify that there is no hole in between.
2147 EXPECT_EQ(kSize
, WriteSparseData(entry
, 500 + kSize
, buf1
.get(), kSize
));
2148 rv
= entry
->GetAvailableRange(1024, 10000, &start
, cb
.callback());
2149 EXPECT_EQ(7 * 1024 + 500, cb
.GetResult(rv
));
2150 EXPECT_EQ(1024, start
);
2151 EXPECT_EQ(kSize
, ReadSparseData(entry
, kSize
, buf2
.get(), kSize
));
2152 EXPECT_EQ(0, memcmp(buf2
->data(), buf1
->data() + kSize
- 500, 500));
2153 EXPECT_EQ(0, memcmp(buf2
->data() + 500, buf1
->data(), kSize
- 500));
2158 TEST_F(DiskCacheEntryTest
, PartialSparseEntry
) {
2160 PartialSparseEntry();
2163 TEST_F(DiskCacheEntryTest
, MemoryPartialSparseEntry
) {
2164 SetMemoryOnlyMode();
2166 PartialSparseEntry();
2169 // Tests that corrupt sparse children are removed automatically.
2170 TEST_F(DiskCacheEntryTest
, CleanupSparseEntry
) {
2172 std::string
key("the first key");
2173 disk_cache::Entry
* entry
;
2174 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2176 const int kSize
= 4 * 1024;
2177 scoped_refptr
<net::IOBuffer
> buf1(new net::IOBuffer(kSize
));
2178 CacheTestFillBuffer(buf1
->data(), kSize
, false);
2180 const int k1Meg
= 1024 * 1024;
2181 EXPECT_EQ(kSize
, WriteSparseData(entry
, 8192, buf1
.get(), kSize
));
2182 EXPECT_EQ(kSize
, WriteSparseData(entry
, k1Meg
+ 8192, buf1
.get(), kSize
));
2183 EXPECT_EQ(kSize
, WriteSparseData(entry
, 2 * k1Meg
+ 8192, buf1
.get(), kSize
));
2185 EXPECT_EQ(4, cache_
->GetEntryCount());
2189 std::string child_key
[2];
2190 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
2191 ASSERT_TRUE(entry
!= NULL
);
2192 // Writing to an entry will alter the LRU list and invalidate the iterator.
2193 if (entry
->GetKey() != key
&& count
< 2)
2194 child_key
[count
++] = entry
->GetKey();
2197 for (int i
= 0; i
< 2; i
++) {
2198 ASSERT_EQ(net::OK
, OpenEntry(child_key
[i
], &entry
));
2199 // Overwrite the header's magic and signature.
2200 EXPECT_EQ(12, WriteData(entry
, 2, 0, buf1
.get(), 12, false));
2204 EXPECT_EQ(4, cache_
->GetEntryCount());
2205 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2207 // Two children should be gone. One while reading and one while writing.
2208 EXPECT_EQ(0, ReadSparseData(entry
, 2 * k1Meg
+ 8192, buf1
.get(), kSize
));
2209 EXPECT_EQ(kSize
, WriteSparseData(entry
, k1Meg
+ 16384, buf1
.get(), kSize
));
2210 EXPECT_EQ(0, ReadSparseData(entry
, k1Meg
+ 8192, buf1
.get(), kSize
));
2212 // We never touched this one.
2213 EXPECT_EQ(kSize
, ReadSparseData(entry
, 8192, buf1
.get(), kSize
));
2216 // We re-created one of the corrupt children.
2217 EXPECT_EQ(3, cache_
->GetEntryCount());
2220 TEST_F(DiskCacheEntryTest
, CancelSparseIO
) {
2223 std::string
key("the first key");
2224 disk_cache::Entry
* entry
;
2225 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2227 const int kSize
= 40 * 1024;
2228 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kSize
));
2229 CacheTestFillBuffer(buf
->data(), kSize
, false);
2231 // This will open and write two "real" entries.
2232 net::TestCompletionCallback cb1
, cb2
, cb3
, cb4
, cb5
;
2233 int rv
= entry
->WriteSparseData(
2234 1024 * 1024 - 4096, buf
.get(), kSize
, cb1
.callback());
2235 EXPECT_EQ(net::ERR_IO_PENDING
, rv
);
2238 rv
= entry
->GetAvailableRange(offset
, kSize
, &offset
, cb5
.callback());
2239 rv
= cb5
.GetResult(rv
);
2240 if (!cb1
.have_result()) {
2241 // We may or may not have finished writing to the entry. If we have not,
2242 // we cannot start another operation at this time.
2243 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED
, rv
);
2246 // We cancel the pending operation, and register multiple notifications.
2247 entry
->CancelSparseIO();
2248 EXPECT_EQ(net::ERR_IO_PENDING
, entry
->ReadyForSparseIO(cb2
.callback()));
2249 EXPECT_EQ(net::ERR_IO_PENDING
, entry
->ReadyForSparseIO(cb3
.callback()));
2250 entry
->CancelSparseIO(); // Should be a no op at this point.
2251 EXPECT_EQ(net::ERR_IO_PENDING
, entry
->ReadyForSparseIO(cb4
.callback()));
2253 if (!cb1
.have_result()) {
2254 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED
,
2255 entry
->ReadSparseData(
2256 offset
, buf
.get(), kSize
, net::CompletionCallback()));
2257 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED
,
2258 entry
->WriteSparseData(
2259 offset
, buf
.get(), kSize
, net::CompletionCallback()));
2262 // Now see if we receive all notifications. Note that we should not be able
2263 // to write everything (unless the timing of the system is really weird).
2264 rv
= cb1
.WaitForResult();
2265 EXPECT_TRUE(rv
== 4096 || rv
== kSize
);
2266 EXPECT_EQ(net::OK
, cb2
.WaitForResult());
2267 EXPECT_EQ(net::OK
, cb3
.WaitForResult());
2268 EXPECT_EQ(net::OK
, cb4
.WaitForResult());
2270 rv
= entry
->GetAvailableRange(offset
, kSize
, &offset
, cb5
.callback());
2271 EXPECT_EQ(0, cb5
.GetResult(rv
));
2275 // Tests that we perform sanity checks on an entry's key. Note that there are
2276 // other tests that exercise sanity checks by using saved corrupt files.
2277 TEST_F(DiskCacheEntryTest
, KeySanityCheck
) {
2280 std::string
key("the first key");
2281 disk_cache::Entry
* entry
;
2282 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2284 disk_cache::EntryImpl
* entry_impl
=
2285 static_cast<disk_cache::EntryImpl
*>(entry
);
2286 disk_cache::EntryStore
* store
= entry_impl
->entry()->Data();
2288 // We have reserved space for a short key (one block), let's say that the key
2289 // takes more than one block, and remove the NULLs after the actual key.
2290 store
->key_len
= 800;
2291 memset(store
->key
+ key
.size(), 'k', sizeof(store
->key
) - key
.size());
2292 entry_impl
->entry()->set_modified();
2295 // We have a corrupt entry. Now reload it. We should NOT read beyond the
2296 // allocated buffer here.
2297 ASSERT_NE(net::OK
, OpenEntry(key
, &entry
));
2298 DisableIntegrityCheck();
2301 // The simple cache backend isn't intended to work on Windows, which has very
2302 // different file system guarantees from Linux.
2303 #if defined(OS_POSIX)
2305 TEST_F(DiskCacheEntryTest
, SimpleCacheInternalAsyncIO
) {
2306 SetSimpleCacheMode();
2311 TEST_F(DiskCacheEntryTest
, SimpleCacheExternalAsyncIO
) {
2312 SetSimpleCacheMode();
2317 TEST_F(DiskCacheEntryTest
, SimpleCacheReleaseBuffer
) {
2318 SetSimpleCacheMode();
2323 TEST_F(DiskCacheEntryTest
, SimpleCacheStreamAccess
) {
2324 SetSimpleCacheMode();
2329 TEST_F(DiskCacheEntryTest
, SimpleCacheGetKey
) {
2330 SetSimpleCacheMode();
2335 TEST_F(DiskCacheEntryTest
, SimpleCacheGetTimes
) {
2336 SetSimpleCacheMode();
2341 TEST_F(DiskCacheEntryTest
, SimpleCacheGrowData
) {
2342 SetSimpleCacheMode();
2347 TEST_F(DiskCacheEntryTest
, SimpleCacheTruncateData
) {
2348 SetSimpleCacheMode();
2353 TEST_F(DiskCacheEntryTest
, SimpleCacheZeroLengthIO
) {
2354 SetSimpleCacheMode();
2359 TEST_F(DiskCacheEntryTest
, SimpleCacheSizeAtCreate
) {
2360 SetSimpleCacheMode();
2365 TEST_F(DiskCacheEntryTest
, SimpleCacheReuseExternalEntry
) {
2366 SetSimpleCacheMode();
2367 SetMaxSize(200 * 1024);
2369 ReuseEntry(20 * 1024);
2372 TEST_F(DiskCacheEntryTest
, SimpleCacheReuseInternalEntry
) {
2373 SetSimpleCacheMode();
2374 SetMaxSize(100 * 1024);
2376 ReuseEntry(10 * 1024);
2379 TEST_F(DiskCacheEntryTest
, SimpleCacheSizeChanges
) {
2380 SetSimpleCacheMode();
2385 TEST_F(DiskCacheEntryTest
, SimpleCacheInvalidData
) {
2386 SetSimpleCacheMode();
2391 TEST_F(DiskCacheEntryTest
, SimpleCacheReadWriteDestroyBuffer
) {
2392 SetSimpleCacheMode();
2394 ReadWriteDestroyBuffer();
2397 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomEntry
) {
2398 SetSimpleCacheMode();
2403 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomEntryNextToOpenEntry
) {
2404 SetSimpleCacheMode();
2406 DoomEntryNextToOpenEntry();
2409 TEST_F(DiskCacheEntryTest
, SimpleCacheDoomedEntry
) {
2410 SetSimpleCacheMode();
2415 // Creates an entry with corrupted last byte in stream 0.
2416 // Requires SimpleCacheMode.
2417 bool DiskCacheEntryTest::SimpleCacheMakeBadChecksumEntry(const char* key
,
2419 disk_cache::Entry
* entry
= NULL
;
2421 if (CreateEntry(key
, &entry
) != net::OK
|| !entry
) {
2422 LOG(ERROR
) << "Could not create entry";
2426 const char data
[] = "this is very good data";
2427 const int kDataSize
= arraysize(data
);
2428 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kDataSize
));
2429 base::strlcpy(buffer
->data(), data
, kDataSize
);
2431 EXPECT_EQ(kDataSize
, WriteData(entry
, 0, 0, buffer
.get(), kDataSize
, false));
2435 // Corrupt the last byte of the data.
2436 base::FilePath entry_file0_path
= cache_path_
.AppendASCII(
2437 disk_cache::simple_util::GetFilenameFromKeyAndIndex(key
, 0));
2438 int flags
= base::PLATFORM_FILE_WRITE
| base::PLATFORM_FILE_OPEN
;
2439 base::PlatformFile entry_file0
=
2440 base::CreatePlatformFile(entry_file0_path
, flags
, NULL
, NULL
);
2441 if (entry_file0
== base::kInvalidPlatformFileValue
)
2444 disk_cache::simple_util::GetFileOffsetFromKeyAndDataOffset(
2445 key
, kDataSize
- 2);
2446 EXPECT_EQ(1, base::WritePlatformFile(entry_file0
, file_offset
, "X", 1));
2447 if (!base::ClosePlatformFile(entry_file0
))
2449 *data_size
= kDataSize
;
2453 // Tests that the simple cache can detect entries that have bad data.
2454 TEST_F(DiskCacheEntryTest
, SimpleCacheBadChecksum
) {
2455 SetSimpleCacheMode();
2458 const char key
[] = "the first key";
2460 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key
, &size_unused
));
2462 disk_cache::Entry
* entry
= NULL
;
2465 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2466 ScopedEntryPtr
entry_closer(entry
);
2468 const int kReadBufferSize
= 200;
2469 EXPECT_GE(kReadBufferSize
, entry
->GetDataSize(0));
2470 scoped_refptr
<net::IOBuffer
> read_buffer(new net::IOBuffer(kReadBufferSize
));
2471 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH
,
2472 ReadData(entry
, 0, 0, read_buffer
.get(), kReadBufferSize
));
2475 // Tests that an entry that has had an IO error occur can still be Doomed().
2476 TEST_F(DiskCacheEntryTest
, SimpleCacheErrorThenDoom
) {
2477 SetSimpleCacheMode();
2480 const char key
[] = "the first key";
2482 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key
, &size_unused
));
2484 disk_cache::Entry
* entry
= NULL
;
2486 // Open the entry, forcing an IO error.
2487 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2488 ScopedEntryPtr
entry_closer(entry
);
2490 const int kReadBufferSize
= 200;
2491 EXPECT_GE(kReadBufferSize
, entry
->GetDataSize(0));
2492 scoped_refptr
<net::IOBuffer
> read_buffer(new net::IOBuffer(kReadBufferSize
));
2493 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH
,
2494 ReadData(entry
, 0, 0, read_buffer
.get(), kReadBufferSize
));
2496 entry
->Doom(); // Should not crash.
2499 bool TruncatePath(const base::FilePath
& file_path
, int64 length
) {
2500 const int flags
= base::PLATFORM_FILE_WRITE
| base::PLATFORM_FILE_OPEN
;
2501 base::PlatformFile file
=
2502 base::CreatePlatformFile(file_path
, flags
, NULL
, NULL
);
2503 if (base::kInvalidPlatformFileValue
== file
)
2505 const bool result
= base::TruncatePlatformFile(file
, length
);
2506 base::ClosePlatformFile(file
);
2510 TEST_F(DiskCacheEntryTest
, SimpleCacheNoEOF
) {
2511 SetSimpleCacheMode();
2514 const char key
[] = "the first key";
2516 disk_cache::Entry
* entry
= NULL
;
2517 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2518 disk_cache::Entry
* null
= NULL
;
2519 EXPECT_NE(null
, entry
);
2523 // Force the entry to flush to disk, so subsequent platform file operations
2525 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
2529 // Truncate the file such that the length isn't sufficient to have an EOF
2531 int kTruncationBytes
= -implicit_cast
<int>(sizeof(disk_cache::SimpleFileEOF
));
2532 const base::FilePath entry_path
= cache_path_
.AppendASCII(
2533 disk_cache::simple_util::GetFilenameFromKeyAndIndex(key
, 0));
2534 const int64 invalid_size
=
2535 disk_cache::simple_util::GetFileSizeFromKeyAndDataSize(key
,
2537 EXPECT_TRUE(TruncatePath(entry_path
, invalid_size
));
2538 EXPECT_EQ(net::ERR_FAILED
, OpenEntry(key
, &entry
));
2539 DisableIntegrityCheck();
2542 TEST_F(DiskCacheEntryTest
, SimpleCacheNonOptimisticOperationsBasic
) {
2544 // Create, Write, Read, Close.
2545 SetCacheType(net::APP_CACHE
); // APP_CACHE doesn't use optimistic operations.
2546 SetSimpleCacheMode();
2548 disk_cache::Entry
* const null_entry
= NULL
;
2550 disk_cache::Entry
* entry
= NULL
;
2551 EXPECT_EQ(net::OK
, CreateEntry("my key", &entry
));
2552 ASSERT_NE(null_entry
, entry
);
2553 ScopedEntryPtr
entry_closer(entry
);
2555 const int kBufferSize
= 10;
2556 scoped_refptr
<net::IOBufferWithSize
> write_buffer(
2557 new net::IOBufferWithSize(kBufferSize
));
2558 CacheTestFillBuffer(write_buffer
->data(), write_buffer
->size(), false);
2560 write_buffer
->size(),
2561 WriteData(entry
, 0, 0, write_buffer
.get(), write_buffer
->size(), false));
2563 scoped_refptr
<net::IOBufferWithSize
> read_buffer(
2564 new net::IOBufferWithSize(kBufferSize
));
2566 read_buffer
->size(),
2567 ReadData(entry
, 0, 0, read_buffer
.get(), read_buffer
->size()));
2570 TEST_F(DiskCacheEntryTest
, SimpleCacheNonOptimisticOperationsDontBlock
) {
2572 // Create, Write, Close.
2573 SetCacheType(net::APP_CACHE
); // APP_CACHE doesn't use optimistic operations.
2574 SetSimpleCacheMode();
2576 disk_cache::Entry
* const null_entry
= NULL
;
2578 MessageLoopHelper helper
;
2579 CallbackTest
create_callback(&helper
, false);
2581 int expected_callback_runs
= 0;
2582 const int kBufferSize
= 10;
2583 scoped_refptr
<net::IOBufferWithSize
> write_buffer(
2584 new net::IOBufferWithSize(kBufferSize
));
2586 disk_cache::Entry
* entry
= NULL
;
2587 EXPECT_EQ(net::OK
, CreateEntry("my key", &entry
));
2588 ASSERT_NE(null_entry
, entry
);
2589 ScopedEntryPtr
entry_closer(entry
);
2591 CacheTestFillBuffer(write_buffer
->data(), write_buffer
->size(), false);
2592 CallbackTest
write_callback(&helper
, false);
2593 int ret
= entry
->WriteData(
2597 write_buffer
->size(),
2598 base::Bind(&CallbackTest::Run
, base::Unretained(&write_callback
)),
2600 ASSERT_EQ(net::ERR_IO_PENDING
, ret
);
2601 helper
.WaitUntilCacheIoFinished(++expected_callback_runs
);
2604 TEST_F(DiskCacheEntryTest
,
2605 SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting
) {
2607 // Create, Write, Read, Close.
2608 SetCacheType(net::APP_CACHE
); // APP_CACHE doesn't use optimistic operations.
2609 SetSimpleCacheMode();
2611 disk_cache::Entry
* const null_entry
= NULL
;
2612 MessageLoopHelper helper
;
2614 disk_cache::Entry
* entry
= NULL
;
2615 // Note that |entry| is only set once CreateEntry() completed which is why we
2616 // have to wait (i.e. use the helper CreateEntry() function).
2617 EXPECT_EQ(net::OK
, CreateEntry("my key", &entry
));
2618 ASSERT_NE(null_entry
, entry
);
2619 ScopedEntryPtr
entry_closer(entry
);
2621 const int kBufferSize
= 10;
2622 scoped_refptr
<net::IOBufferWithSize
> write_buffer(
2623 new net::IOBufferWithSize(kBufferSize
));
2624 CacheTestFillBuffer(write_buffer
->data(), write_buffer
->size(), false);
2625 CallbackTest
write_callback(&helper
, false);
2626 int ret
= entry
->WriteData(
2630 write_buffer
->size(),
2631 base::Bind(&CallbackTest::Run
, base::Unretained(&write_callback
)),
2633 EXPECT_EQ(net::ERR_IO_PENDING
, ret
);
2634 int expected_callback_runs
= 1;
2636 scoped_refptr
<net::IOBufferWithSize
> read_buffer(
2637 new net::IOBufferWithSize(kBufferSize
));
2638 CallbackTest
read_callback(&helper
, false);
2639 ret
= entry
->ReadData(
2643 read_buffer
->size(),
2644 base::Bind(&CallbackTest::Run
, base::Unretained(&read_callback
)));
2645 EXPECT_EQ(net::ERR_IO_PENDING
, ret
);
2646 ++expected_callback_runs
;
2648 helper
.WaitUntilCacheIoFinished(expected_callback_runs
);
2649 ASSERT_EQ(read_buffer
->size(), write_buffer
->size());
2652 memcmp(read_buffer
->data(), write_buffer
->data(), read_buffer
->size()));
2655 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic
) {
2657 // Create, Write, Read, Write, Read, Close.
2658 SetSimpleCacheMode();
2660 disk_cache::Entry
* null
= NULL
;
2661 const char key
[] = "the first key";
2663 MessageLoopHelper helper
;
2664 CallbackTest
callback1(&helper
, false);
2665 CallbackTest
callback2(&helper
, false);
2666 CallbackTest
callback3(&helper
, false);
2667 CallbackTest
callback4(&helper
, false);
2668 CallbackTest
callback5(&helper
, false);
2671 const int kSize1
= 10;
2672 const int kSize2
= 20;
2673 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
2674 scoped_refptr
<net::IOBuffer
> buffer1_read(new net::IOBuffer(kSize1
));
2675 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize2
));
2676 scoped_refptr
<net::IOBuffer
> buffer2_read(new net::IOBuffer(kSize2
));
2677 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
2678 CacheTestFillBuffer(buffer2
->data(), kSize2
, false);
2680 disk_cache::Entry
* entry
= NULL
;
2681 // Create is optimistic, must return OK.
2683 cache_
->CreateEntry(key
, &entry
,
2684 base::Bind(&CallbackTest::Run
,
2685 base::Unretained(&callback1
))));
2686 EXPECT_NE(null
, entry
);
2687 ScopedEntryPtr
entry_closer(entry
);
2689 // This write may or may not be optimistic (it depends if the previous
2690 // optimistic create already finished by the time we call the write here).
2691 int ret
= entry
->WriteData(
2696 base::Bind(&CallbackTest::Run
, base::Unretained(&callback2
)),
2698 EXPECT_TRUE(kSize1
== ret
|| net::ERR_IO_PENDING
== ret
);
2699 if (net::ERR_IO_PENDING
== ret
)
2702 // This Read must not be optimistic, since we don't support that yet.
2703 EXPECT_EQ(net::ERR_IO_PENDING
,
2709 base::Bind(&CallbackTest::Run
, base::Unretained(&callback3
))));
2711 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
2712 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read
->data(), kSize1
));
2714 // At this point after waiting, the pending operations queue on the entry
2715 // should be empty, so the next Write operation must run as optimistic.
2722 base::Bind(&CallbackTest::Run
, base::Unretained(&callback4
)),
2725 // Lets do another read so we block until both the write and the read
2726 // operation finishes and we can then test for HasOneRef() below.
2727 EXPECT_EQ(net::ERR_IO_PENDING
,
2733 base::Bind(&CallbackTest::Run
, base::Unretained(&callback5
))));
2736 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
2737 EXPECT_EQ(0, memcmp(buffer2
->data(), buffer2_read
->data(), kSize2
));
2739 // Check that we are not leaking.
2740 EXPECT_NE(entry
, null
);
2742 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
2745 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic2
) {
2747 // Create, Open, Close, Close.
2748 SetSimpleCacheMode();
2750 disk_cache::Entry
* null
= NULL
;
2751 const char key
[] = "the first key";
2753 MessageLoopHelper helper
;
2754 CallbackTest
callback1(&helper
, false);
2755 CallbackTest
callback2(&helper
, false);
2757 disk_cache::Entry
* entry
= NULL
;
2759 cache_
->CreateEntry(key
, &entry
,
2760 base::Bind(&CallbackTest::Run
,
2761 base::Unretained(&callback1
))));
2762 EXPECT_NE(null
, entry
);
2763 ScopedEntryPtr
entry_closer(entry
);
2765 disk_cache::Entry
* entry2
= NULL
;
2766 ASSERT_EQ(net::ERR_IO_PENDING
,
2767 cache_
->OpenEntry(key
, &entry2
,
2768 base::Bind(&CallbackTest::Run
,
2769 base::Unretained(&callback2
))));
2770 ASSERT_TRUE(helper
.WaitUntilCacheIoFinished(1));
2772 EXPECT_NE(null
, entry2
);
2773 EXPECT_EQ(entry
, entry2
);
2775 // We have to call close twice, since we called create and open above.
2778 // Check that we are not leaking.
2780 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
2783 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic3
) {
2785 // Create, Close, Open, Close.
2786 SetSimpleCacheMode();
2788 disk_cache::Entry
* null
= NULL
;
2789 const char key
[] = "the first key";
2791 disk_cache::Entry
* entry
= NULL
;
2793 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
2794 EXPECT_NE(null
, entry
);
2797 net::TestCompletionCallback cb
;
2798 disk_cache::Entry
* entry2
= NULL
;
2799 ASSERT_EQ(net::ERR_IO_PENDING
,
2800 cache_
->OpenEntry(key
, &entry2
, cb
.callback()));
2801 ASSERT_EQ(net::OK
, cb
.GetResult(net::ERR_IO_PENDING
));
2802 ScopedEntryPtr
entry_closer(entry2
);
2804 EXPECT_NE(null
, entry2
);
2805 EXPECT_EQ(entry
, entry2
);
2807 // Check that we are not leaking.
2809 static_cast<disk_cache::SimpleEntryImpl
*>(entry2
)->HasOneRef());
2812 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic4
) {
2814 // Create, Close, Write, Open, Open, Close, Write, Read, Close.
2815 SetSimpleCacheMode();
2817 disk_cache::Entry
* null
= NULL
;
2818 const char key
[] = "the first key";
2820 net::TestCompletionCallback cb
;
2821 const int kSize1
= 10;
2822 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
2823 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
2824 disk_cache::Entry
* entry
= NULL
;
2827 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
2828 EXPECT_NE(null
, entry
);
2831 // Lets do a Write so we block until both the Close and the Write
2832 // operation finishes. Write must fail since we are writing in a closed entry.
2834 net::ERR_IO_PENDING
,
2835 entry
->WriteData(0, 0, buffer1
.get(), kSize1
, cb
.callback(), false));
2836 EXPECT_EQ(net::ERR_FAILED
, cb
.GetResult(net::ERR_IO_PENDING
));
2838 // Finish running the pending tasks so that we fully complete the close
2839 // operation and destroy the entry object.
2840 base::MessageLoop::current()->RunUntilIdle();
2842 // At this point the |entry| must have been destroyed, and called
2843 // RemoveSelfFromBackend().
2844 disk_cache::Entry
* entry2
= NULL
;
2845 ASSERT_EQ(net::ERR_IO_PENDING
,
2846 cache_
->OpenEntry(key
, &entry2
, cb
.callback()));
2847 ASSERT_EQ(net::OK
, cb
.GetResult(net::ERR_IO_PENDING
));
2848 EXPECT_NE(null
, entry2
);
2850 disk_cache::Entry
* entry3
= NULL
;
2851 ASSERT_EQ(net::ERR_IO_PENDING
,
2852 cache_
->OpenEntry(key
, &entry3
, cb
.callback()));
2853 ASSERT_EQ(net::OK
, cb
.GetResult(net::ERR_IO_PENDING
));
2854 EXPECT_NE(null
, entry3
);
2855 EXPECT_EQ(entry2
, entry3
);
2858 // The previous Close doesn't actually closes the entry since we opened it
2859 // twice, so the next Write operation must succeed and it must be able to
2860 // perform it optimistically, since there is no operation running on this
2864 0, 0, buffer1
.get(), kSize1
, net::CompletionCallback(), false));
2866 // Lets do another read so we block until both the write and the read
2867 // operation finishes and we can then test for HasOneRef() below.
2868 EXPECT_EQ(net::ERR_IO_PENDING
,
2869 entry2
->ReadData(0, 0, buffer1
.get(), kSize1
, cb
.callback()));
2870 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
2872 // Check that we are not leaking.
2874 static_cast<disk_cache::SimpleEntryImpl
*>(entry2
)->HasOneRef());
2878 // This test is flaky because of the race of Create followed by a Doom.
2879 // See test SimpleCacheCreateDoomRace.
2880 TEST_F(DiskCacheEntryTest
, DISABLED_SimpleCacheOptimistic5
) {
2882 // Create, Doom, Write, Read, Close.
2883 SetSimpleCacheMode();
2885 disk_cache::Entry
* null
= NULL
;
2886 const char key
[] = "the first key";
2888 net::TestCompletionCallback cb
;
2889 const int kSize1
= 10;
2890 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
2891 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
2892 disk_cache::Entry
* entry
= NULL
;
2895 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
2896 EXPECT_NE(null
, entry
);
2897 ScopedEntryPtr
entry_closer(entry
);
2901 net::ERR_IO_PENDING
,
2902 entry
->WriteData(0, 0, buffer1
.get(), kSize1
, cb
.callback(), false));
2903 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
2905 EXPECT_EQ(net::ERR_IO_PENDING
,
2906 entry
->ReadData(0, 0, buffer1
.get(), kSize1
, cb
.callback()));
2907 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
2909 // Check that we are not leaking.
2911 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
2914 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimistic6
) {
2916 // Create, Write, Doom, Doom, Read, Doom, Close.
2917 SetSimpleCacheMode();
2919 disk_cache::Entry
* null
= NULL
;
2920 const char key
[] = "the first key";
2922 net::TestCompletionCallback cb
;
2923 const int kSize1
= 10;
2924 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
2925 scoped_refptr
<net::IOBuffer
> buffer1_read(new net::IOBuffer(kSize1
));
2926 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
2927 disk_cache::Entry
* entry
= NULL
;
2930 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
2931 EXPECT_NE(null
, entry
);
2932 ScopedEntryPtr
entry_closer(entry
);
2935 net::ERR_IO_PENDING
,
2936 entry
->WriteData(0, 0, buffer1
.get(), kSize1
, cb
.callback(), false));
2937 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
2942 // This Read must not be optimistic, since we don't support that yet.
2943 EXPECT_EQ(net::ERR_IO_PENDING
,
2944 entry
->ReadData(0, 0, buffer1_read
.get(), kSize1
, cb
.callback()));
2945 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
2946 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read
->data(), kSize1
));
2950 // Check that we are not leaking.
2952 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
2955 // Confirm that IO buffers are not referenced by the Simple Cache after a write
2957 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimisticWriteReleases
) {
2958 SetSimpleCacheMode();
2961 const char key
[] = "the first key";
2962 disk_cache::Entry
* entry
= NULL
;
2964 // First, an optimistic create.
2966 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
2968 ScopedEntryPtr
entry_closer(entry
);
2970 const int kWriteSize
= 512;
2971 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kWriteSize
));
2972 EXPECT_TRUE(buffer1
->HasOneRef());
2973 CacheTestFillBuffer(buffer1
->data(), kWriteSize
, false);
2975 // An optimistic write happens only when there is an empty queue of pending
2976 // operations. To ensure the queue is empty, we issue a write and wait until
2978 EXPECT_EQ(kWriteSize
,
2979 WriteData(entry
, 0, 0, buffer1
.get(), kWriteSize
, false));
2980 EXPECT_TRUE(buffer1
->HasOneRef());
2982 // Finally, we should perform an optimistic write and confirm that all
2983 // references to the IO buffer have been released.
2987 1, 0, buffer1
.get(), kWriteSize
, net::CompletionCallback(), false));
2988 EXPECT_TRUE(buffer1
->HasOneRef());
2991 TEST_F(DiskCacheEntryTest
, DISABLED_SimpleCacheCreateDoomRace
) {
2993 // Create, Doom, Write, Close, Check files are not on disk anymore.
2994 SetSimpleCacheMode();
2996 disk_cache::Entry
* null
= NULL
;
2997 const char key
[] = "the first key";
2999 net::TestCompletionCallback cb
;
3000 const int kSize1
= 10;
3001 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize1
));
3002 CacheTestFillBuffer(buffer1
->data(), kSize1
, false);
3003 disk_cache::Entry
* entry
= NULL
;
3006 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
3007 EXPECT_NE(null
, entry
);
3009 cache_
->DoomEntry(key
, cb
.callback());
3010 EXPECT_EQ(net::OK
, cb
.GetResult(net::ERR_IO_PENDING
));
3012 // Lets do a Write so we block until all operations are done, so we can check
3013 // the HasOneRef() below. This call can't be optimistic and we are checking
3016 net::ERR_IO_PENDING
,
3017 entry
->WriteData(0, 0, buffer1
.get(), kSize1
, cb
.callback(), false));
3018 EXPECT_EQ(kSize1
, cb
.GetResult(net::ERR_IO_PENDING
));
3020 // Check that we are not leaking.
3022 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
3025 // Finish running the pending tasks so that we fully complete the close
3026 // operation and destroy the entry object.
3027 base::MessageLoop::current()->RunUntilIdle();
3029 for (int i
= 0; i
< disk_cache::kSimpleEntryFileCount
; ++i
) {
3030 base::FilePath entry_file_path
= cache_path_
.AppendASCII(
3031 disk_cache::simple_util::GetFilenameFromKeyAndIndex(key
, i
));
3032 base::PlatformFileInfo info
;
3033 EXPECT_FALSE(file_util::GetFileInfo(entry_file_path
, &info
));
3037 // Checks that an optimistic Create would fail later on a racing Open.
3038 TEST_F(DiskCacheEntryTest
, SimpleCacheOptimisticCreateFailsOnOpen
) {
3039 SetSimpleCacheMode();
3042 // Create a corrupt file in place of a future entry. Optimistic create should
3043 // initially succeed, but realize later that creation failed.
3044 const std::string key
= "the key";
3045 net::TestCompletionCallback cb
;
3046 disk_cache::Entry
* entry
= NULL
;
3047 disk_cache::Entry
* entry2
= NULL
;
3049 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3051 EXPECT_EQ(net::OK
, cache_
->CreateEntry(key
, &entry
, cb
.callback()));
3053 ScopedEntryPtr
entry_closer(entry
);
3054 ASSERT_NE(net::OK
, OpenEntry(key
, &entry2
));
3056 // Check that we are not leaking.
3058 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
3060 DisableIntegrityCheck();
3063 // Tests that old entries are evicted while new entries remain in the index.
3064 // This test relies on non-mandatory properties of the simple Cache Backend:
3065 // LRU eviction, specific values of high-watermark and low-watermark etc.
3066 // When changing the eviction algorithm, the test will have to be re-engineered.
3067 TEST_F(DiskCacheEntryTest
, SimpleCacheEvictOldEntries
) {
3068 const int kMaxSize
= 200 * 1024;
3069 const int kWriteSize
= kMaxSize
/ 10;
3070 const int kNumExtraEntries
= 12;
3071 SetSimpleCacheMode();
3072 SetMaxSize(kMaxSize
);
3075 std::string
key1("the first key");
3076 disk_cache::Entry
* entry
;
3077 ASSERT_EQ(net::OK
, CreateEntry(key1
, &entry
));
3078 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kWriteSize
));
3079 CacheTestFillBuffer(buffer
->data(), kWriteSize
, false);
3080 EXPECT_EQ(kWriteSize
,
3081 WriteData(entry
, 0, 0, buffer
.get(), kWriteSize
, false));
3084 std::string
key2("the key prefix");
3085 for (int i
= 0; i
< kNumExtraEntries
; i
++) {
3086 ASSERT_EQ(net::OK
, CreateEntry(key2
+ base::StringPrintf("%d", i
), &entry
));
3087 ScopedEntryPtr
entry_closer(entry
);
3088 EXPECT_EQ(kWriteSize
,
3089 WriteData(entry
, 0, 0, buffer
.get(), kWriteSize
, false));
3092 // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using
3093 // the internal knowledge about |SimpleBackendImpl|.
3094 ASSERT_NE(net::OK
, OpenEntry(key1
, &entry
))
3095 << "Should have evicted the old entry";
3096 for (int i
= 0; i
< 2; i
++) {
3097 int entry_no
= kNumExtraEntries
- i
- 1;
3098 // Generally there is no guarantee that at this point the backround eviction
3099 // is finished. We are testing the positive case, i.e. when the eviction
3100 // never reaches this entry, should be non-flaky.
3101 ASSERT_EQ(net::OK
, OpenEntry(key2
+ base::StringPrintf("%d", entry_no
),
3103 << "Should not have evicted fresh entry " << entry_no
;
3108 // Tests that if a read and a following in-flight truncate are both in progress
3109 // simultaniously that they both can occur successfully. See
3110 // http://crbug.com/239223
3111 TEST_F(DiskCacheEntryTest
, SimpleCacheInFlightTruncate
) {
3112 SetSimpleCacheMode();
3115 const char key
[] = "the first key";
3117 const int kBufferSize
= 1024;
3118 scoped_refptr
<net::IOBuffer
> write_buffer(new net::IOBuffer(kBufferSize
));
3119 CacheTestFillBuffer(write_buffer
->data(), kBufferSize
, false);
3121 disk_cache::Entry
* entry
= NULL
;
3122 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3124 EXPECT_EQ(kBufferSize
,
3125 WriteData(entry
, 0, 0, write_buffer
.get(), kBufferSize
, false));
3129 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3130 ScopedEntryPtr
entry_closer(entry
);
3132 MessageLoopHelper helper
;
3135 // Make a short read.
3136 const int kReadBufferSize
= 512;
3137 scoped_refptr
<net::IOBuffer
> read_buffer(new net::IOBuffer(kReadBufferSize
));
3138 CallbackTest
read_callback(&helper
, false);
3139 EXPECT_EQ(net::ERR_IO_PENDING
,
3144 base::Bind(&CallbackTest::Run
,
3145 base::Unretained(&read_callback
))));
3148 // Truncate the entry to the length of that read.
3149 scoped_refptr
<net::IOBuffer
>
3150 truncate_buffer(new net::IOBuffer(kReadBufferSize
));
3151 CacheTestFillBuffer(truncate_buffer
->data(), kReadBufferSize
, false);
3152 CallbackTest
truncate_callback(&helper
, false);
3153 EXPECT_EQ(net::ERR_IO_PENDING
,
3156 truncate_buffer
.get(),
3158 base::Bind(&CallbackTest::Run
,
3159 base::Unretained(&truncate_callback
)),
3163 // Wait for both the read and truncation to finish, and confirm that both
3165 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
3166 EXPECT_EQ(kReadBufferSize
, read_callback
.last_result());
3167 EXPECT_EQ(kReadBufferSize
, truncate_callback
.last_result());
3169 memcmp(write_buffer
->data(), read_buffer
->data(), kReadBufferSize
));
3172 // Tests that if a write and a read dependant on it are both in flight
3173 // simultaneiously that they both can complete successfully without erroneous
3174 // early returns. See http://crbug.com/239223
3175 TEST_F(DiskCacheEntryTest
, SimpleCacheInFlightRead
) {
3176 SetSimpleCacheMode();
3179 const char key
[] = "the first key";
3180 disk_cache::Entry
* entry
= NULL
;
3182 cache_
->CreateEntry(key
, &entry
, net::CompletionCallback()));
3183 ScopedEntryPtr
entry_closer(entry
);
3185 const int kBufferSize
= 1024;
3186 scoped_refptr
<net::IOBuffer
> write_buffer(new net::IOBuffer(kBufferSize
));
3187 CacheTestFillBuffer(write_buffer
->data(), kBufferSize
, false);
3189 MessageLoopHelper helper
;
3192 CallbackTest
write_callback(&helper
, false);
3193 EXPECT_EQ(net::ERR_IO_PENDING
,
3198 base::Bind(&CallbackTest::Run
,
3199 base::Unretained(&write_callback
)),
3203 scoped_refptr
<net::IOBuffer
> read_buffer(new net::IOBuffer(kBufferSize
));
3204 CallbackTest
read_callback(&helper
, false);
3205 EXPECT_EQ(net::ERR_IO_PENDING
,
3210 base::Bind(&CallbackTest::Run
,
3211 base::Unretained(&read_callback
))));
3214 EXPECT_TRUE(helper
.WaitUntilCacheIoFinished(expected
));
3215 EXPECT_EQ(kBufferSize
, write_callback
.last_result());
3216 EXPECT_EQ(kBufferSize
, read_callback
.last_result());
3217 EXPECT_EQ(0, memcmp(write_buffer
->data(), read_buffer
->data(), kBufferSize
));
3220 TEST_F(DiskCacheEntryTest
, SimpleCacheOpenCreateRaceWithNoIndex
) {
3221 SetSimpleCacheMode();
3222 DisableSimpleCacheWaitForIndex();
3223 DisableIntegrityCheck();
3226 // Assume the index is not initialized, which is likely, since we are blocking
3227 // the IO thread from executing the index finalization step.
3228 disk_cache::Entry
* entry1
;
3229 net::TestCompletionCallback cb1
;
3230 disk_cache::Entry
* entry2
;
3231 net::TestCompletionCallback cb2
;
3232 int rv1
= cache_
->OpenEntry("key", &entry1
, cb1
.callback());
3233 int rv2
= cache_
->CreateEntry("key", &entry2
, cb2
.callback());
3235 EXPECT_EQ(net::ERR_FAILED
, cb1
.GetResult(rv1
));
3236 ASSERT_EQ(net::OK
, cb2
.GetResult(rv2
));
3240 // Checks that reading two entries simultaneously does not discard a CRC check.
3241 // TODO(pasko): make it work with Simple Cache.
3242 TEST_F(DiskCacheEntryTest
, DISABLED_SimpleCacheMultipleReadersCheckCRC
) {
3243 SetSimpleCacheMode();
3246 const char key
[] = "key";
3249 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key
, &size
));
3251 scoped_refptr
<net::IOBuffer
> read_buffer1(new net::IOBuffer(size
));
3252 scoped_refptr
<net::IOBuffer
> read_buffer2(new net::IOBuffer(size
));
3254 // Advance the first reader a little.
3255 disk_cache::Entry
* entry
= NULL
;
3256 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3257 EXPECT_EQ(1, ReadData(entry
, 0, 0, read_buffer1
.get(), 1));
3259 // Make the second reader pass the point where the first one is, and close.
3260 disk_cache::Entry
* entry2
= NULL
;
3261 EXPECT_EQ(net::OK
, OpenEntry(key
, &entry2
));
3262 EXPECT_EQ(1, ReadData(entry2
, 0, 0, read_buffer2
.get(), 1));
3263 EXPECT_EQ(1, ReadData(entry2
, 0, 1, read_buffer2
.get(), 1));
3266 // Read the data till the end should produce an error.
3267 EXPECT_GT(0, ReadData(entry
, 0, 1, read_buffer1
.get(), size
));
3269 DisableIntegrityCheck();
3272 // Checking one more scenario of overlapped reading of a bad entry.
3273 // Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of
3275 TEST_F(DiskCacheEntryTest
, SimpleCacheMultipleReadersCheckCRC2
) {
3276 SetSimpleCacheMode();
3279 const char key
[] = "key";
3281 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key
, &size
));
3283 scoped_refptr
<net::IOBuffer
> read_buffer1(new net::IOBuffer(size
));
3284 scoped_refptr
<net::IOBuffer
> read_buffer2(new net::IOBuffer(size
));
3286 // Advance the first reader a little.
3287 disk_cache::Entry
* entry
= NULL
;
3288 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3289 ScopedEntryPtr
entry_closer(entry
);
3290 EXPECT_EQ(1, ReadData(entry
, 0, 0, read_buffer1
.get(), 1));
3292 // Advance the 2nd reader by the same amount.
3293 disk_cache::Entry
* entry2
= NULL
;
3294 EXPECT_EQ(net::OK
, OpenEntry(key
, &entry2
));
3295 ScopedEntryPtr
entry2_closer(entry2
);
3296 EXPECT_EQ(1, ReadData(entry2
, 0, 0, read_buffer2
.get(), 1));
3298 // Continue reading 1st.
3299 EXPECT_GT(0, ReadData(entry
, 0, 1, read_buffer1
.get(), size
));
3301 // This read should fail as well because we have previous read failures.
3302 EXPECT_GT(0, ReadData(entry2
, 0, 1, read_buffer2
.get(), 1));
3303 DisableIntegrityCheck();
3306 // Test if we can sequentially read each subset of the data until all the data
3307 // is read, then the CRC is calculated correctly and the reads are successful.
3308 TEST_F(DiskCacheEntryTest
, SimpleCacheReadCombineCRC
) {
3310 // Create, Write, Read (first half of data), Read (second half of data),
3312 SetSimpleCacheMode();
3314 disk_cache::Entry
* null
= NULL
;
3315 const char key
[] = "the first key";
3317 const int kHalfSize
= 200;
3318 const int kSize
= 2 * kHalfSize
;
3319 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3320 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
3321 disk_cache::Entry
* entry
= NULL
;
3323 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3324 EXPECT_NE(null
, entry
);
3326 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer1
.get(), kSize
, false));
3329 disk_cache::Entry
* entry2
= NULL
;
3330 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry2
));
3331 EXPECT_EQ(entry
, entry2
);
3333 // Read the first half of the data.
3335 int buf_len
= kHalfSize
;
3336 scoped_refptr
<net::IOBuffer
> buffer1_read1(new net::IOBuffer(buf_len
));
3337 EXPECT_EQ(buf_len
, ReadData(entry2
, 0, offset
, buffer1_read1
.get(), buf_len
));
3338 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read1
->data(), buf_len
));
3340 // Read the second half of the data.
3342 buf_len
= kHalfSize
;
3343 scoped_refptr
<net::IOBuffer
> buffer1_read2(new net::IOBuffer(buf_len
));
3344 EXPECT_EQ(buf_len
, ReadData(entry2
, 0, offset
, buffer1_read2
.get(), buf_len
));
3345 char* buffer1_data
= buffer1
->data() + offset
;
3346 EXPECT_EQ(0, memcmp(buffer1_data
, buffer1_read2
->data(), buf_len
));
3348 // Check that we are not leaking.
3349 EXPECT_NE(entry
, null
);
3351 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
3356 // Test if we can write the data not in sequence and read correctly. In
3357 // this case the CRC will not be present.
3358 TEST_F(DiskCacheEntryTest
, SimpleCacheNonSequentialWrite
) {
3360 // Create, Write (second half of data), Write (first half of data), Read,
3362 SetSimpleCacheMode();
3364 disk_cache::Entry
* null
= NULL
;
3365 const char key
[] = "the first key";
3367 const int kHalfSize
= 200;
3368 const int kSize
= 2 * kHalfSize
;
3369 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
3370 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
3371 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
3372 char* buffer1_data
= buffer1
->data() + kHalfSize
;
3373 memcpy(buffer2
->data(), buffer1_data
, kHalfSize
);
3374 disk_cache::Entry
* entry
= NULL
;
3376 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3377 EXPECT_NE(null
, entry
);
3379 int offset
= kHalfSize
;
3380 int buf_len
= kHalfSize
;
3383 WriteData(entry
, 0, offset
, buffer2
.get(), buf_len
, false));
3385 buf_len
= kHalfSize
;
3387 WriteData(entry
, 0, offset
, buffer1
.get(), buf_len
, false));
3390 disk_cache::Entry
* entry2
= NULL
;
3391 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry2
));
3392 EXPECT_EQ(entry
, entry2
);
3394 scoped_refptr
<net::IOBuffer
> buffer1_read1(new net::IOBuffer(kSize
));
3395 EXPECT_EQ(kSize
, ReadData(entry2
, 0, 0, buffer1_read1
.get(), kSize
));
3396 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer1_read1
->data(), kSize
));
3398 // Check that we are not leaking.
3399 ASSERT_NE(entry
, null
);
3401 static_cast<disk_cache::SimpleEntryImpl
*>(entry
)->HasOneRef());
3406 #endif // defined(OS_POSIX)