Roll src/third_party/WebKit d9c6159:8139f33 (svn 201974:201975)
[chromium-blink-merge.git] / net / disk_cache / entry_unittest.cc
blobbf154f23d90366df35b7d399fc42d413a8dada73
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/bind.h"
7 #include "base/bind_helpers.h"
8 #include "base/files/file.h"
9 #include "base/files/file_util.h"
10 #include "base/strings/string_number_conversions.h"
11 #include "base/strings/string_util.h"
12 #include "base/threading/platform_thread.h"
13 #include "net/base/completion_callback.h"
14 #include "net/base/io_buffer.h"
15 #include "net/base/net_errors.h"
16 #include "net/base/test_completion_callback.h"
17 #include "net/disk_cache/blockfile/backend_impl.h"
18 #include "net/disk_cache/blockfile/entry_impl.h"
19 #include "net/disk_cache/disk_cache_test_base.h"
20 #include "net/disk_cache/disk_cache_test_util.h"
21 #include "net/disk_cache/memory/mem_entry_impl.h"
22 #include "net/disk_cache/simple/simple_entry_format.h"
23 #include "net/disk_cache/simple/simple_entry_impl.h"
24 #include "net/disk_cache/simple/simple_synchronous_entry.h"
25 #include "net/disk_cache/simple/simple_test_util.h"
26 #include "net/disk_cache/simple/simple_util.h"
27 #include "testing/gtest/include/gtest/gtest.h"
29 using base::Time;
30 using disk_cache::ScopedEntryPtr;
32 // Tests that can run with different types of caches.
33 class DiskCacheEntryTest : public DiskCacheTestWithCache {
34 public:
35 void InternalSyncIOBackground(disk_cache::Entry* entry);
36 void ExternalSyncIOBackground(disk_cache::Entry* entry);
38 protected:
39 void InternalSyncIO();
40 void InternalAsyncIO();
41 void ExternalSyncIO();
42 void ExternalAsyncIO();
43 void ReleaseBuffer(int stream_index);
44 void StreamAccess();
45 void GetKey();
46 void GetTimes(int stream_index);
47 void GrowData(int stream_index);
48 void TruncateData(int stream_index);
49 void ZeroLengthIO(int stream_index);
50 void Buffering();
51 void SizeAtCreate();
52 void SizeChanges(int stream_index);
53 void ReuseEntry(int size, int stream_index);
54 void InvalidData(int stream_index);
55 void ReadWriteDestroyBuffer(int stream_index);
56 void DoomNormalEntry();
57 void DoomEntryNextToOpenEntry();
58 void DoomedEntry(int stream_index);
59 void BasicSparseIO();
60 void HugeSparseIO();
61 void GetAvailableRange();
62 void CouldBeSparse();
63 void UpdateSparseEntry();
64 void DoomSparseEntry();
65 void PartialSparseEntry();
66 bool SimpleCacheMakeBadChecksumEntry(const std::string& key, int* data_size);
67 bool SimpleCacheThirdStreamFileExists(const char* key);
68 void SyncDoomEntry(const char* key);
71 // This part of the test runs on the background thread.
72 void DiskCacheEntryTest::InternalSyncIOBackground(disk_cache::Entry* entry) {
73 const int kSize1 = 10;
74 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
75 CacheTestFillBuffer(buffer1->data(), kSize1, false);
76 EXPECT_EQ(
78 entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
79 base::strlcpy(buffer1->data(), "the data", kSize1);
80 EXPECT_EQ(10,
81 entry->WriteData(
82 0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
83 memset(buffer1->data(), 0, kSize1);
84 EXPECT_EQ(
85 10,
86 entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
87 EXPECT_STREQ("the data", buffer1->data());
89 const int kSize2 = 5000;
90 const int kSize3 = 10000;
91 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
92 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
93 memset(buffer3->data(), 0, kSize3);
94 CacheTestFillBuffer(buffer2->data(), kSize2, false);
95 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
96 EXPECT_EQ(
97 5000,
98 entry->WriteData(
99 1, 1500, buffer2.get(), kSize2, net::CompletionCallback(), false));
100 memset(buffer2->data(), 0, kSize2);
101 EXPECT_EQ(4989,
102 entry->ReadData(
103 1, 1511, buffer2.get(), kSize2, net::CompletionCallback()));
104 EXPECT_STREQ("big data goes here", buffer2->data());
105 EXPECT_EQ(
106 5000,
107 entry->ReadData(1, 0, buffer2.get(), kSize2, net::CompletionCallback()));
108 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
109 EXPECT_EQ(1500,
110 entry->ReadData(
111 1, 5000, buffer2.get(), kSize2, net::CompletionCallback()));
113 EXPECT_EQ(0,
114 entry->ReadData(
115 1, 6500, buffer2.get(), kSize2, net::CompletionCallback()));
116 EXPECT_EQ(
117 6500,
118 entry->ReadData(1, 0, buffer3.get(), kSize3, net::CompletionCallback()));
119 EXPECT_EQ(8192,
120 entry->WriteData(
121 1, 0, buffer3.get(), 8192, net::CompletionCallback(), false));
122 EXPECT_EQ(
123 8192,
124 entry->ReadData(1, 0, buffer3.get(), kSize3, net::CompletionCallback()));
125 EXPECT_EQ(8192, entry->GetDataSize(1));
127 // We need to delete the memory buffer on this thread.
128 EXPECT_EQ(0, entry->WriteData(
129 0, 0, NULL, 0, net::CompletionCallback(), true));
130 EXPECT_EQ(0, entry->WriteData(
131 1, 0, NULL, 0, net::CompletionCallback(), true));
134 // We need to support synchronous IO even though it is not a supported operation
135 // from the point of view of the disk cache's public interface, because we use
136 // it internally, not just by a few tests, but as part of the implementation
137 // (see sparse_control.cc, for example).
138 void DiskCacheEntryTest::InternalSyncIO() {
139 disk_cache::Entry* entry = NULL;
140 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
141 ASSERT_TRUE(NULL != entry);
143 // The bulk of the test runs from within the callback, on the cache thread.
144 RunTaskForTest(base::Bind(&DiskCacheEntryTest::InternalSyncIOBackground,
145 base::Unretained(this),
146 entry));
149 entry->Doom();
150 entry->Close();
151 FlushQueueForTest();
152 EXPECT_EQ(0, cache_->GetEntryCount());
155 TEST_F(DiskCacheEntryTest, InternalSyncIO) {
156 InitCache();
157 InternalSyncIO();
160 TEST_F(DiskCacheEntryTest, MemoryOnlyInternalSyncIO) {
161 SetMemoryOnlyMode();
162 InitCache();
163 InternalSyncIO();
166 void DiskCacheEntryTest::InternalAsyncIO() {
167 disk_cache::Entry* entry = NULL;
168 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
169 ASSERT_TRUE(NULL != entry);
171 // Avoid using internal buffers for the test. We have to write something to
172 // the entry and close it so that we flush the internal buffer to disk. After
173 // that, IO operations will be really hitting the disk. We don't care about
174 // the content, so just extending the entry is enough (all extensions zero-
175 // fill any holes).
176 EXPECT_EQ(0, WriteData(entry, 0, 15 * 1024, NULL, 0, false));
177 EXPECT_EQ(0, WriteData(entry, 1, 15 * 1024, NULL, 0, false));
178 entry->Close();
179 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
181 MessageLoopHelper helper;
182 // Let's verify that each IO goes to the right callback object.
183 CallbackTest callback1(&helper, false);
184 CallbackTest callback2(&helper, false);
185 CallbackTest callback3(&helper, false);
186 CallbackTest callback4(&helper, false);
187 CallbackTest callback5(&helper, false);
188 CallbackTest callback6(&helper, false);
189 CallbackTest callback7(&helper, false);
190 CallbackTest callback8(&helper, false);
191 CallbackTest callback9(&helper, false);
192 CallbackTest callback10(&helper, false);
193 CallbackTest callback11(&helper, false);
194 CallbackTest callback12(&helper, false);
195 CallbackTest callback13(&helper, false);
197 const int kSize1 = 10;
198 const int kSize2 = 5000;
199 const int kSize3 = 10000;
200 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
201 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
202 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
203 CacheTestFillBuffer(buffer1->data(), kSize1, false);
204 CacheTestFillBuffer(buffer2->data(), kSize2, false);
205 CacheTestFillBuffer(buffer3->data(), kSize3, false);
207 EXPECT_EQ(0,
208 entry->ReadData(
210 15 * 1024,
211 buffer1.get(),
212 kSize1,
213 base::Bind(&CallbackTest::Run, base::Unretained(&callback1))));
214 base::strlcpy(buffer1->data(), "the data", kSize1);
215 int expected = 0;
216 int ret = entry->WriteData(
219 buffer1.get(),
220 kSize1,
221 base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
222 false);
223 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
224 if (net::ERR_IO_PENDING == ret)
225 expected++;
227 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
228 memset(buffer2->data(), 0, kSize2);
229 ret = entry->ReadData(
232 buffer2.get(),
233 kSize1,
234 base::Bind(&CallbackTest::Run, base::Unretained(&callback3)));
235 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
236 if (net::ERR_IO_PENDING == ret)
237 expected++;
239 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
240 EXPECT_STREQ("the data", buffer2->data());
242 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
243 ret = entry->WriteData(
245 1500,
246 buffer2.get(),
247 kSize2,
248 base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
249 true);
250 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
251 if (net::ERR_IO_PENDING == ret)
252 expected++;
254 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
255 memset(buffer3->data(), 0, kSize3);
256 ret = entry->ReadData(
258 1511,
259 buffer3.get(),
260 kSize2,
261 base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
262 EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret);
263 if (net::ERR_IO_PENDING == ret)
264 expected++;
266 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
267 EXPECT_STREQ("big data goes here", buffer3->data());
268 ret = entry->ReadData(
271 buffer2.get(),
272 kSize2,
273 base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
274 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
275 if (net::ERR_IO_PENDING == ret)
276 expected++;
278 memset(buffer3->data(), 0, kSize3);
280 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
281 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
282 ret = entry->ReadData(
284 5000,
285 buffer2.get(),
286 kSize2,
287 base::Bind(&CallbackTest::Run, base::Unretained(&callback7)));
288 EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret);
289 if (net::ERR_IO_PENDING == ret)
290 expected++;
292 ret = entry->ReadData(
295 buffer3.get(),
296 kSize3,
297 base::Bind(&CallbackTest::Run, base::Unretained(&callback9)));
298 EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret);
299 if (net::ERR_IO_PENDING == ret)
300 expected++;
302 ret = entry->WriteData(
305 buffer3.get(),
306 8192,
307 base::Bind(&CallbackTest::Run, base::Unretained(&callback10)),
308 true);
309 EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
310 if (net::ERR_IO_PENDING == ret)
311 expected++;
313 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
314 ret = entry->ReadData(
317 buffer3.get(),
318 kSize3,
319 base::Bind(&CallbackTest::Run, base::Unretained(&callback11)));
320 EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
321 if (net::ERR_IO_PENDING == ret)
322 expected++;
324 EXPECT_EQ(8192, entry->GetDataSize(1));
326 ret = entry->ReadData(
329 buffer1.get(),
330 kSize1,
331 base::Bind(&CallbackTest::Run, base::Unretained(&callback12)));
332 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
333 if (net::ERR_IO_PENDING == ret)
334 expected++;
336 ret = entry->ReadData(
339 buffer2.get(),
340 kSize2,
341 base::Bind(&CallbackTest::Run, base::Unretained(&callback13)));
342 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
343 if (net::ERR_IO_PENDING == ret)
344 expected++;
346 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
348 EXPECT_FALSE(helper.callback_reused_error());
350 entry->Doom();
351 entry->Close();
352 FlushQueueForTest();
353 EXPECT_EQ(0, cache_->GetEntryCount());
356 TEST_F(DiskCacheEntryTest, InternalAsyncIO) {
357 InitCache();
358 InternalAsyncIO();
361 TEST_F(DiskCacheEntryTest, MemoryOnlyInternalAsyncIO) {
362 SetMemoryOnlyMode();
363 InitCache();
364 InternalAsyncIO();
367 // This part of the test runs on the background thread.
368 void DiskCacheEntryTest::ExternalSyncIOBackground(disk_cache::Entry* entry) {
369 const int kSize1 = 17000;
370 const int kSize2 = 25000;
371 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
372 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
373 CacheTestFillBuffer(buffer1->data(), kSize1, false);
374 CacheTestFillBuffer(buffer2->data(), kSize2, false);
375 base::strlcpy(buffer1->data(), "the data", kSize1);
376 EXPECT_EQ(17000,
377 entry->WriteData(
378 0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
379 memset(buffer1->data(), 0, kSize1);
380 EXPECT_EQ(
381 17000,
382 entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
383 EXPECT_STREQ("the data", buffer1->data());
385 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
386 EXPECT_EQ(
387 25000,
388 entry->WriteData(
389 1, 10000, buffer2.get(), kSize2, net::CompletionCallback(), false));
390 memset(buffer2->data(), 0, kSize2);
391 EXPECT_EQ(24989,
392 entry->ReadData(
393 1, 10011, buffer2.get(), kSize2, net::CompletionCallback()));
394 EXPECT_STREQ("big data goes here", buffer2->data());
395 EXPECT_EQ(
396 25000,
397 entry->ReadData(1, 0, buffer2.get(), kSize2, net::CompletionCallback()));
398 EXPECT_EQ(5000,
399 entry->ReadData(
400 1, 30000, buffer2.get(), kSize2, net::CompletionCallback()));
402 EXPECT_EQ(0,
403 entry->ReadData(
404 1, 35000, buffer2.get(), kSize2, net::CompletionCallback()));
405 EXPECT_EQ(
406 17000,
407 entry->ReadData(1, 0, buffer1.get(), kSize1, net::CompletionCallback()));
408 EXPECT_EQ(
409 17000,
410 entry->WriteData(
411 1, 20000, buffer1.get(), kSize1, net::CompletionCallback(), false));
412 EXPECT_EQ(37000, entry->GetDataSize(1));
414 // We need to delete the memory buffer on this thread.
415 EXPECT_EQ(0, entry->WriteData(
416 0, 0, NULL, 0, net::CompletionCallback(), true));
417 EXPECT_EQ(0, entry->WriteData(
418 1, 0, NULL, 0, net::CompletionCallback(), true));
421 void DiskCacheEntryTest::ExternalSyncIO() {
422 disk_cache::Entry* entry;
423 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
425 // The bulk of the test runs from within the callback, on the cache thread.
426 RunTaskForTest(base::Bind(&DiskCacheEntryTest::ExternalSyncIOBackground,
427 base::Unretained(this),
428 entry));
430 entry->Doom();
431 entry->Close();
432 FlushQueueForTest();
433 EXPECT_EQ(0, cache_->GetEntryCount());
436 TEST_F(DiskCacheEntryTest, ExternalSyncIO) {
437 InitCache();
438 ExternalSyncIO();
441 TEST_F(DiskCacheEntryTest, ExternalSyncIONoBuffer) {
442 InitCache();
443 cache_impl_->SetFlags(disk_cache::kNoBuffering);
444 ExternalSyncIO();
447 TEST_F(DiskCacheEntryTest, MemoryOnlyExternalSyncIO) {
448 SetMemoryOnlyMode();
449 InitCache();
450 ExternalSyncIO();
453 void DiskCacheEntryTest::ExternalAsyncIO() {
454 disk_cache::Entry* entry;
455 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
457 int expected = 0;
459 MessageLoopHelper helper;
460 // Let's verify that each IO goes to the right callback object.
461 CallbackTest callback1(&helper, false);
462 CallbackTest callback2(&helper, false);
463 CallbackTest callback3(&helper, false);
464 CallbackTest callback4(&helper, false);
465 CallbackTest callback5(&helper, false);
466 CallbackTest callback6(&helper, false);
467 CallbackTest callback7(&helper, false);
468 CallbackTest callback8(&helper, false);
469 CallbackTest callback9(&helper, false);
471 const int kSize1 = 17000;
472 const int kSize2 = 25000;
473 const int kSize3 = 25000;
474 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
475 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
476 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
477 CacheTestFillBuffer(buffer1->data(), kSize1, false);
478 CacheTestFillBuffer(buffer2->data(), kSize2, false);
479 CacheTestFillBuffer(buffer3->data(), kSize3, false);
480 base::strlcpy(buffer1->data(), "the data", kSize1);
481 int ret = entry->WriteData(
484 buffer1.get(),
485 kSize1,
486 base::Bind(&CallbackTest::Run, base::Unretained(&callback1)),
487 false);
488 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
489 if (net::ERR_IO_PENDING == ret)
490 expected++;
492 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
494 memset(buffer2->data(), 0, kSize1);
495 ret = entry->ReadData(
498 buffer2.get(),
499 kSize1,
500 base::Bind(&CallbackTest::Run, base::Unretained(&callback2)));
501 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
502 if (net::ERR_IO_PENDING == ret)
503 expected++;
505 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
506 EXPECT_STREQ("the data", buffer2->data());
508 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
509 ret = entry->WriteData(
511 10000,
512 buffer2.get(),
513 kSize2,
514 base::Bind(&CallbackTest::Run, base::Unretained(&callback3)),
515 false);
516 EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
517 if (net::ERR_IO_PENDING == ret)
518 expected++;
520 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
522 memset(buffer3->data(), 0, kSize3);
523 ret = entry->ReadData(
525 10011,
526 buffer3.get(),
527 kSize3,
528 base::Bind(&CallbackTest::Run, base::Unretained(&callback4)));
529 EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret);
530 if (net::ERR_IO_PENDING == ret)
531 expected++;
533 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
534 EXPECT_STREQ("big data goes here", buffer3->data());
535 ret = entry->ReadData(
538 buffer2.get(),
539 kSize2,
540 base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
541 EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
542 if (net::ERR_IO_PENDING == ret)
543 expected++;
545 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
546 memset(buffer3->data(), 0, kSize3);
547 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 10000));
548 ret = entry->ReadData(
550 30000,
551 buffer2.get(),
552 kSize2,
553 base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
554 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
555 if (net::ERR_IO_PENDING == ret)
556 expected++;
558 EXPECT_EQ(0,
559 entry->ReadData(
561 35000,
562 buffer2.get(),
563 kSize2,
564 base::Bind(&CallbackTest::Run, base::Unretained(&callback7))));
565 ret = entry->ReadData(
568 buffer1.get(),
569 kSize1,
570 base::Bind(&CallbackTest::Run, base::Unretained(&callback8)));
571 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
572 if (net::ERR_IO_PENDING == ret)
573 expected++;
574 ret = entry->WriteData(
576 20000,
577 buffer3.get(),
578 kSize1,
579 base::Bind(&CallbackTest::Run, base::Unretained(&callback9)),
580 false);
581 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
582 if (net::ERR_IO_PENDING == ret)
583 expected++;
585 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
586 EXPECT_EQ(37000, entry->GetDataSize(1));
588 EXPECT_FALSE(helper.callback_reused_error());
590 entry->Doom();
591 entry->Close();
592 FlushQueueForTest();
593 EXPECT_EQ(0, cache_->GetEntryCount());
596 TEST_F(DiskCacheEntryTest, ExternalAsyncIO) {
597 InitCache();
598 ExternalAsyncIO();
601 // TODO(ios): This test is flaky. http://crbug.com/497101
602 #if defined(OS_IOS)
603 #define MAYBE_ExternalAsyncIONoBuffer DISABLED_ExternalAsyncIONoBuffer
604 #else
605 #define MAYBE_ExternalAsyncIONoBuffer ExternalAsyncIONoBuffer
606 #endif
607 TEST_F(DiskCacheEntryTest, MAYBE_ExternalAsyncIONoBuffer) {
608 InitCache();
609 cache_impl_->SetFlags(disk_cache::kNoBuffering);
610 ExternalAsyncIO();
613 TEST_F(DiskCacheEntryTest, MemoryOnlyExternalAsyncIO) {
614 SetMemoryOnlyMode();
615 InitCache();
616 ExternalAsyncIO();
619 // Tests that IOBuffers are not referenced after IO completes.
620 void DiskCacheEntryTest::ReleaseBuffer(int stream_index) {
621 disk_cache::Entry* entry = NULL;
622 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
623 ASSERT_TRUE(NULL != entry);
625 const int kBufferSize = 1024;
626 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize));
627 CacheTestFillBuffer(buffer->data(), kBufferSize, false);
629 net::ReleaseBufferCompletionCallback cb(buffer.get());
630 int rv = entry->WriteData(
631 stream_index, 0, buffer.get(), kBufferSize, cb.callback(), false);
632 EXPECT_EQ(kBufferSize, cb.GetResult(rv));
633 entry->Close();
636 TEST_F(DiskCacheEntryTest, ReleaseBuffer) {
637 InitCache();
638 cache_impl_->SetFlags(disk_cache::kNoBuffering);
639 ReleaseBuffer(0);
642 TEST_F(DiskCacheEntryTest, MemoryOnlyReleaseBuffer) {
643 SetMemoryOnlyMode();
644 InitCache();
645 ReleaseBuffer(0);
648 void DiskCacheEntryTest::StreamAccess() {
649 disk_cache::Entry* entry = NULL;
650 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
651 ASSERT_TRUE(NULL != entry);
653 const int kBufferSize = 1024;
654 const int kNumStreams = 3;
655 scoped_refptr<net::IOBuffer> reference_buffers[kNumStreams];
656 for (int i = 0; i < kNumStreams; i++) {
657 reference_buffers[i] = new net::IOBuffer(kBufferSize);
658 CacheTestFillBuffer(reference_buffers[i]->data(), kBufferSize, false);
660 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kBufferSize));
661 for (int i = 0; i < kNumStreams; i++) {
662 EXPECT_EQ(
663 kBufferSize,
664 WriteData(entry, i, 0, reference_buffers[i].get(), kBufferSize, false));
665 memset(buffer1->data(), 0, kBufferSize);
666 EXPECT_EQ(kBufferSize, ReadData(entry, i, 0, buffer1.get(), kBufferSize));
667 EXPECT_EQ(
668 0, memcmp(reference_buffers[i]->data(), buffer1->data(), kBufferSize));
670 EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
671 ReadData(entry, kNumStreams, 0, buffer1.get(), kBufferSize));
672 entry->Close();
674 // Open the entry and read it in chunks, including a read past the end.
675 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
676 ASSERT_TRUE(NULL != entry);
677 const int kReadBufferSize = 600;
678 const int kFinalReadSize = kBufferSize - kReadBufferSize;
679 static_assert(kFinalReadSize < kReadBufferSize,
680 "should be exactly two reads");
681 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kReadBufferSize));
682 for (int i = 0; i < kNumStreams; i++) {
683 memset(buffer2->data(), 0, kReadBufferSize);
684 EXPECT_EQ(kReadBufferSize,
685 ReadData(entry, i, 0, buffer2.get(), kReadBufferSize));
686 EXPECT_EQ(
688 memcmp(reference_buffers[i]->data(), buffer2->data(), kReadBufferSize));
690 memset(buffer2->data(), 0, kReadBufferSize);
691 EXPECT_EQ(
692 kFinalReadSize,
693 ReadData(entry, i, kReadBufferSize, buffer2.get(), kReadBufferSize));
694 EXPECT_EQ(0,
695 memcmp(reference_buffers[i]->data() + kReadBufferSize,
696 buffer2->data(),
697 kFinalReadSize));
700 entry->Close();
703 TEST_F(DiskCacheEntryTest, StreamAccess) {
704 InitCache();
705 StreamAccess();
708 TEST_F(DiskCacheEntryTest, MemoryOnlyStreamAccess) {
709 SetMemoryOnlyMode();
710 InitCache();
711 StreamAccess();
714 void DiskCacheEntryTest::GetKey() {
715 std::string key("the first key");
716 disk_cache::Entry* entry;
717 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
718 EXPECT_EQ(key, entry->GetKey()) << "short key";
719 entry->Close();
721 int seed = static_cast<int>(Time::Now().ToInternalValue());
722 srand(seed);
723 char key_buffer[20000];
725 CacheTestFillBuffer(key_buffer, 3000, true);
726 key_buffer[1000] = '\0';
728 key = key_buffer;
729 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
730 EXPECT_TRUE(key == entry->GetKey()) << "1000 bytes key";
731 entry->Close();
733 key_buffer[1000] = 'p';
734 key_buffer[3000] = '\0';
735 key = key_buffer;
736 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
737 EXPECT_TRUE(key == entry->GetKey()) << "medium size key";
738 entry->Close();
740 CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
741 key_buffer[19999] = '\0';
743 key = key_buffer;
744 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
745 EXPECT_TRUE(key == entry->GetKey()) << "long key";
746 entry->Close();
748 CacheTestFillBuffer(key_buffer, 0x4000, true);
749 key_buffer[0x4000] = '\0';
751 key = key_buffer;
752 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
753 EXPECT_TRUE(key == entry->GetKey()) << "16KB key";
754 entry->Close();
757 TEST_F(DiskCacheEntryTest, GetKey) {
758 InitCache();
759 GetKey();
762 TEST_F(DiskCacheEntryTest, MemoryOnlyGetKey) {
763 SetMemoryOnlyMode();
764 InitCache();
765 GetKey();
768 void DiskCacheEntryTest::GetTimes(int stream_index) {
769 std::string key("the first key");
770 disk_cache::Entry* entry;
772 Time t1 = Time::Now();
773 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
774 EXPECT_TRUE(entry->GetLastModified() >= t1);
775 EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
777 AddDelay();
778 Time t2 = Time::Now();
779 EXPECT_TRUE(t2 > t1);
780 EXPECT_EQ(0, WriteData(entry, stream_index, 200, NULL, 0, false));
781 if (type_ == net::APP_CACHE) {
782 EXPECT_TRUE(entry->GetLastModified() < t2);
783 } else {
784 EXPECT_TRUE(entry->GetLastModified() >= t2);
786 EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
788 AddDelay();
789 Time t3 = Time::Now();
790 EXPECT_TRUE(t3 > t2);
791 const int kSize = 200;
792 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
793 EXPECT_EQ(kSize, ReadData(entry, stream_index, 0, buffer.get(), kSize));
794 if (type_ == net::APP_CACHE) {
795 EXPECT_TRUE(entry->GetLastUsed() < t2);
796 EXPECT_TRUE(entry->GetLastModified() < t2);
797 } else if (type_ == net::SHADER_CACHE) {
798 EXPECT_TRUE(entry->GetLastUsed() < t3);
799 EXPECT_TRUE(entry->GetLastModified() < t3);
800 } else {
801 EXPECT_TRUE(entry->GetLastUsed() >= t3);
802 EXPECT_TRUE(entry->GetLastModified() < t3);
804 entry->Close();
807 TEST_F(DiskCacheEntryTest, GetTimes) {
808 InitCache();
809 GetTimes(0);
812 TEST_F(DiskCacheEntryTest, MemoryOnlyGetTimes) {
813 SetMemoryOnlyMode();
814 InitCache();
815 GetTimes(0);
818 TEST_F(DiskCacheEntryTest, AppCacheGetTimes) {
819 SetCacheType(net::APP_CACHE);
820 InitCache();
821 GetTimes(0);
824 TEST_F(DiskCacheEntryTest, ShaderCacheGetTimes) {
825 SetCacheType(net::SHADER_CACHE);
826 InitCache();
827 GetTimes(0);
830 void DiskCacheEntryTest::GrowData(int stream_index) {
831 std::string key1("the first key");
832 disk_cache::Entry* entry;
833 ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
835 const int kSize = 20000;
836 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
837 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
838 CacheTestFillBuffer(buffer1->data(), kSize, false);
839 memset(buffer2->data(), 0, kSize);
841 base::strlcpy(buffer1->data(), "the data", kSize);
842 EXPECT_EQ(10, WriteData(entry, stream_index, 0, buffer1.get(), 10, false));
843 EXPECT_EQ(10, ReadData(entry, stream_index, 0, buffer2.get(), 10));
844 EXPECT_STREQ("the data", buffer2->data());
845 EXPECT_EQ(10, entry->GetDataSize(stream_index));
847 EXPECT_EQ(2000,
848 WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
849 EXPECT_EQ(2000, entry->GetDataSize(stream_index));
850 EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
851 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
853 EXPECT_EQ(20000,
854 WriteData(entry, stream_index, 0, buffer1.get(), kSize, false));
855 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
856 EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), kSize));
857 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
858 entry->Close();
860 memset(buffer2->data(), 0, kSize);
861 std::string key2("Second key");
862 ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
863 EXPECT_EQ(10, WriteData(entry, stream_index, 0, buffer1.get(), 10, false));
864 EXPECT_EQ(10, entry->GetDataSize(stream_index));
865 entry->Close();
867 // Go from an internal address to a bigger block size.
868 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
869 EXPECT_EQ(2000,
870 WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
871 EXPECT_EQ(2000, entry->GetDataSize(stream_index));
872 EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
873 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
874 entry->Close();
875 memset(buffer2->data(), 0, kSize);
877 // Go from an internal address to an external one.
878 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
879 EXPECT_EQ(20000,
880 WriteData(entry, stream_index, 0, buffer1.get(), kSize, false));
881 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
882 EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), kSize));
883 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
884 entry->Close();
886 // Double check the size from disk.
887 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
888 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
890 // Now extend the entry without actual data.
891 EXPECT_EQ(0, WriteData(entry, stream_index, 45500, buffer1.get(), 0, false));
892 entry->Close();
894 // And check again from disk.
895 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
896 EXPECT_EQ(45500, entry->GetDataSize(stream_index));
897 entry->Close();
900 TEST_F(DiskCacheEntryTest, GrowData) {
901 InitCache();
902 GrowData(0);
905 TEST_F(DiskCacheEntryTest, GrowDataNoBuffer) {
906 InitCache();
907 cache_impl_->SetFlags(disk_cache::kNoBuffering);
908 GrowData(0);
911 TEST_F(DiskCacheEntryTest, MemoryOnlyGrowData) {
912 SetMemoryOnlyMode();
913 InitCache();
914 GrowData(0);
917 void DiskCacheEntryTest::TruncateData(int stream_index) {
918 std::string key("the first key");
919 disk_cache::Entry* entry;
920 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
922 const int kSize1 = 20000;
923 const int kSize2 = 20000;
924 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
925 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
927 CacheTestFillBuffer(buffer1->data(), kSize1, false);
928 memset(buffer2->data(), 0, kSize2);
930 // Simple truncation:
931 EXPECT_EQ(200, WriteData(entry, stream_index, 0, buffer1.get(), 200, false));
932 EXPECT_EQ(200, entry->GetDataSize(stream_index));
933 EXPECT_EQ(100, WriteData(entry, stream_index, 0, buffer1.get(), 100, false));
934 EXPECT_EQ(200, entry->GetDataSize(stream_index));
935 EXPECT_EQ(100, WriteData(entry, stream_index, 0, buffer1.get(), 100, true));
936 EXPECT_EQ(100, entry->GetDataSize(stream_index));
937 EXPECT_EQ(0, WriteData(entry, stream_index, 50, buffer1.get(), 0, true));
938 EXPECT_EQ(50, entry->GetDataSize(stream_index));
939 EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer1.get(), 0, true));
940 EXPECT_EQ(0, entry->GetDataSize(stream_index));
941 entry->Close();
942 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
944 // Go to an external file.
945 EXPECT_EQ(20000,
946 WriteData(entry, stream_index, 0, buffer1.get(), 20000, true));
947 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
948 EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), 20000));
949 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000));
950 memset(buffer2->data(), 0, kSize2);
952 // External file truncation
953 EXPECT_EQ(18000,
954 WriteData(entry, stream_index, 0, buffer1.get(), 18000, false));
955 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
956 EXPECT_EQ(18000,
957 WriteData(entry, stream_index, 0, buffer1.get(), 18000, true));
958 EXPECT_EQ(18000, entry->GetDataSize(stream_index));
959 EXPECT_EQ(0, WriteData(entry, stream_index, 17500, buffer1.get(), 0, true));
960 EXPECT_EQ(17500, entry->GetDataSize(stream_index));
962 // And back to an internal block.
963 EXPECT_EQ(600,
964 WriteData(entry, stream_index, 1000, buffer1.get(), 600, true));
965 EXPECT_EQ(1600, entry->GetDataSize(stream_index));
966 EXPECT_EQ(600, ReadData(entry, stream_index, 1000, buffer2.get(), 600));
967 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600));
968 EXPECT_EQ(1000, ReadData(entry, stream_index, 0, buffer2.get(), 1000));
969 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000))
970 << "Preserves previous data";
972 // Go from external file to zero length.
973 EXPECT_EQ(20000,
974 WriteData(entry, stream_index, 0, buffer1.get(), 20000, true));
975 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
976 EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer1.get(), 0, true));
977 EXPECT_EQ(0, entry->GetDataSize(stream_index));
979 entry->Close();
982 TEST_F(DiskCacheEntryTest, TruncateData) {
983 InitCache();
984 TruncateData(0);
987 TEST_F(DiskCacheEntryTest, TruncateDataNoBuffer) {
988 InitCache();
989 cache_impl_->SetFlags(disk_cache::kNoBuffering);
990 TruncateData(0);
993 TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateData) {
994 SetMemoryOnlyMode();
995 InitCache();
996 TruncateData(0);
999 void DiskCacheEntryTest::ZeroLengthIO(int stream_index) {
1000 std::string key("the first key");
1001 disk_cache::Entry* entry;
1002 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1004 EXPECT_EQ(0, ReadData(entry, stream_index, 0, NULL, 0));
1005 EXPECT_EQ(0, WriteData(entry, stream_index, 0, NULL, 0, false));
1007 // This write should extend the entry.
1008 EXPECT_EQ(0, WriteData(entry, stream_index, 1000, NULL, 0, false));
1009 EXPECT_EQ(0, ReadData(entry, stream_index, 500, NULL, 0));
1010 EXPECT_EQ(0, ReadData(entry, stream_index, 2000, NULL, 0));
1011 EXPECT_EQ(1000, entry->GetDataSize(stream_index));
1013 EXPECT_EQ(0, WriteData(entry, stream_index, 100000, NULL, 0, true));
1014 EXPECT_EQ(0, ReadData(entry, stream_index, 50000, NULL, 0));
1015 EXPECT_EQ(100000, entry->GetDataSize(stream_index));
1017 // Let's verify the actual content.
1018 const int kSize = 20;
1019 const char zeros[kSize] = {};
1020 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1022 CacheTestFillBuffer(buffer->data(), kSize, false);
1023 EXPECT_EQ(kSize, ReadData(entry, stream_index, 500, buffer.get(), kSize));
1024 EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1026 CacheTestFillBuffer(buffer->data(), kSize, false);
1027 EXPECT_EQ(kSize, ReadData(entry, stream_index, 5000, buffer.get(), kSize));
1028 EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1030 CacheTestFillBuffer(buffer->data(), kSize, false);
1031 EXPECT_EQ(kSize, ReadData(entry, stream_index, 50000, buffer.get(), kSize));
1032 EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1034 entry->Close();
1037 TEST_F(DiskCacheEntryTest, ZeroLengthIO) {
1038 InitCache();
1039 ZeroLengthIO(0);
1042 TEST_F(DiskCacheEntryTest, ZeroLengthIONoBuffer) {
1043 InitCache();
1044 cache_impl_->SetFlags(disk_cache::kNoBuffering);
1045 ZeroLengthIO(0);
1048 TEST_F(DiskCacheEntryTest, MemoryOnlyZeroLengthIO) {
1049 SetMemoryOnlyMode();
1050 InitCache();
1051 ZeroLengthIO(0);
1054 // Tests that we handle the content correctly when buffering, a feature of the
1055 // standard cache that permits fast responses to certain reads.
1056 void DiskCacheEntryTest::Buffering() {
1057 std::string key("the first key");
1058 disk_cache::Entry* entry;
1059 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1061 const int kSize = 200;
1062 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1063 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
1064 CacheTestFillBuffer(buffer1->data(), kSize, true);
1065 CacheTestFillBuffer(buffer2->data(), kSize, true);
1067 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
1068 entry->Close();
1070 // Write a little more and read what we wrote before.
1071 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1072 EXPECT_EQ(kSize, WriteData(entry, 1, 5000, buffer1.get(), kSize, false));
1073 EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
1074 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1076 // Now go to an external file.
1077 EXPECT_EQ(kSize, WriteData(entry, 1, 18000, buffer1.get(), kSize, false));
1078 entry->Close();
1080 // Write something else and verify old data.
1081 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1082 EXPECT_EQ(kSize, WriteData(entry, 1, 10000, buffer1.get(), kSize, false));
1083 CacheTestFillBuffer(buffer2->data(), kSize, true);
1084 EXPECT_EQ(kSize, ReadData(entry, 1, 5000, buffer2.get(), kSize));
1085 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1086 CacheTestFillBuffer(buffer2->data(), kSize, true);
1087 EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
1088 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1089 CacheTestFillBuffer(buffer2->data(), kSize, true);
1090 EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
1091 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1093 // Extend the file some more.
1094 EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, false));
1095 entry->Close();
1097 // And now make sure that we can deal with data in both places (ram/disk).
1098 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1099 EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, false));
1101 // We should not overwrite the data at 18000 with this.
1102 EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, false));
1103 CacheTestFillBuffer(buffer2->data(), kSize, true);
1104 EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
1105 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1106 CacheTestFillBuffer(buffer2->data(), kSize, true);
1107 EXPECT_EQ(kSize, ReadData(entry, 1, 17000, buffer2.get(), kSize));
1108 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1110 EXPECT_EQ(kSize, WriteData(entry, 1, 22900, buffer1.get(), kSize, false));
1111 CacheTestFillBuffer(buffer2->data(), kSize, true);
1112 EXPECT_EQ(100, ReadData(entry, 1, 23000, buffer2.get(), kSize));
1113 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
1115 CacheTestFillBuffer(buffer2->data(), kSize, true);
1116 EXPECT_EQ(100, ReadData(entry, 1, 23100, buffer2.get(), kSize));
1117 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
1119 // Extend the file again and read before without closing the entry.
1120 EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, false));
1121 EXPECT_EQ(kSize, WriteData(entry, 1, 45000, buffer1.get(), kSize, false));
1122 CacheTestFillBuffer(buffer2->data(), kSize, true);
1123 EXPECT_EQ(kSize, ReadData(entry, 1, 25000, buffer2.get(), kSize));
1124 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1125 CacheTestFillBuffer(buffer2->data(), kSize, true);
1126 EXPECT_EQ(kSize, ReadData(entry, 1, 45000, buffer2.get(), kSize));
1127 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1129 entry->Close();
1132 TEST_F(DiskCacheEntryTest, Buffering) {
1133 InitCache();
1134 Buffering();
1137 TEST_F(DiskCacheEntryTest, BufferingNoBuffer) {
1138 InitCache();
1139 cache_impl_->SetFlags(disk_cache::kNoBuffering);
1140 Buffering();
1143 // Checks that entries are zero length when created.
1144 void DiskCacheEntryTest::SizeAtCreate() {
1145 const char key[] = "the first key";
1146 disk_cache::Entry* entry;
1147 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1149 const int kNumStreams = 3;
1150 for (int i = 0; i < kNumStreams; ++i)
1151 EXPECT_EQ(0, entry->GetDataSize(i));
1152 entry->Close();
1155 TEST_F(DiskCacheEntryTest, SizeAtCreate) {
1156 InitCache();
1157 SizeAtCreate();
1160 TEST_F(DiskCacheEntryTest, MemoryOnlySizeAtCreate) {
1161 SetMemoryOnlyMode();
1162 InitCache();
1163 SizeAtCreate();
1166 // Some extra tests to make sure that buffering works properly when changing
1167 // the entry size.
1168 void DiskCacheEntryTest::SizeChanges(int stream_index) {
1169 std::string key("the first key");
1170 disk_cache::Entry* entry;
1171 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1173 const int kSize = 200;
1174 const char zeros[kSize] = {};
1175 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1176 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
1177 CacheTestFillBuffer(buffer1->data(), kSize, true);
1178 CacheTestFillBuffer(buffer2->data(), kSize, true);
1180 EXPECT_EQ(kSize,
1181 WriteData(entry, stream_index, 0, buffer1.get(), kSize, true));
1182 EXPECT_EQ(kSize,
1183 WriteData(entry, stream_index, 17000, buffer1.get(), kSize, true));
1184 EXPECT_EQ(kSize,
1185 WriteData(entry, stream_index, 23000, buffer1.get(), kSize, true));
1186 entry->Close();
1188 // Extend the file and read between the old size and the new write.
1189 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1190 EXPECT_EQ(23000 + kSize, entry->GetDataSize(stream_index));
1191 EXPECT_EQ(kSize,
1192 WriteData(entry, stream_index, 25000, buffer1.get(), kSize, true));
1193 EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1194 EXPECT_EQ(kSize, ReadData(entry, stream_index, 24000, buffer2.get(), kSize));
1195 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, kSize));
1197 // Read at the end of the old file size.
1198 EXPECT_EQ(
1199 kSize,
1200 ReadData(entry, stream_index, 23000 + kSize - 35, buffer2.get(), kSize));
1201 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 35, 35));
1203 // Read slightly before the last write.
1204 CacheTestFillBuffer(buffer2->data(), kSize, true);
1205 EXPECT_EQ(kSize, ReadData(entry, stream_index, 24900, buffer2.get(), kSize));
1206 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1207 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1209 // Extend the entry a little more.
1210 EXPECT_EQ(kSize,
1211 WriteData(entry, stream_index, 26000, buffer1.get(), kSize, true));
1212 EXPECT_EQ(26000 + kSize, entry->GetDataSize(stream_index));
1213 CacheTestFillBuffer(buffer2->data(), kSize, true);
1214 EXPECT_EQ(kSize, ReadData(entry, stream_index, 25900, buffer2.get(), kSize));
1215 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1216 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1218 // And now reduce the size.
1219 EXPECT_EQ(kSize,
1220 WriteData(entry, stream_index, 25000, buffer1.get(), kSize, true));
1221 EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1222 EXPECT_EQ(
1224 ReadData(entry, stream_index, 25000 + kSize - 28, buffer2.get(), kSize));
1225 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 28, 28));
1227 // Reduce the size with a buffer that is not extending the size.
1228 EXPECT_EQ(kSize,
1229 WriteData(entry, stream_index, 24000, buffer1.get(), kSize, false));
1230 EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1231 EXPECT_EQ(kSize,
1232 WriteData(entry, stream_index, 24500, buffer1.get(), kSize, true));
1233 EXPECT_EQ(24500 + kSize, entry->GetDataSize(stream_index));
1234 EXPECT_EQ(kSize, ReadData(entry, stream_index, 23900, buffer2.get(), kSize));
1235 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1236 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1238 // And now reduce the size below the old size.
1239 EXPECT_EQ(kSize,
1240 WriteData(entry, stream_index, 19000, buffer1.get(), kSize, true));
1241 EXPECT_EQ(19000 + kSize, entry->GetDataSize(stream_index));
1242 EXPECT_EQ(kSize, ReadData(entry, stream_index, 18900, buffer2.get(), kSize));
1243 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1244 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1246 // Verify that the actual file is truncated.
1247 entry->Close();
1248 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1249 EXPECT_EQ(19000 + kSize, entry->GetDataSize(stream_index));
1251 // Extend the newly opened file with a zero length write, expect zero fill.
1252 EXPECT_EQ(
1254 WriteData(entry, stream_index, 20000 + kSize, buffer1.get(), 0, false));
1255 EXPECT_EQ(kSize,
1256 ReadData(entry, stream_index, 19000 + kSize, buffer1.get(), kSize));
1257 EXPECT_EQ(0, memcmp(buffer1->data(), zeros, kSize));
1259 entry->Close();
1262 TEST_F(DiskCacheEntryTest, SizeChanges) {
1263 InitCache();
1264 SizeChanges(1);
1267 TEST_F(DiskCacheEntryTest, SizeChangesNoBuffer) {
1268 InitCache();
1269 cache_impl_->SetFlags(disk_cache::kNoBuffering);
1270 SizeChanges(1);
1273 // Write more than the total cache capacity but to a single entry. |size| is the
1274 // amount of bytes to write each time.
1275 void DiskCacheEntryTest::ReuseEntry(int size, int stream_index) {
1276 std::string key1("the first key");
1277 disk_cache::Entry* entry;
1278 ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
1280 entry->Close();
1281 std::string key2("the second key");
1282 ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
1284 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size));
1285 CacheTestFillBuffer(buffer->data(), size, false);
1287 for (int i = 0; i < 15; i++) {
1288 EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer.get(), 0, true));
1289 EXPECT_EQ(size,
1290 WriteData(entry, stream_index, 0, buffer.get(), size, false));
1291 entry->Close();
1292 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
1295 entry->Close();
1296 ASSERT_EQ(net::OK, OpenEntry(key1, &entry)) << "have not evicted this entry";
1297 entry->Close();
1300 TEST_F(DiskCacheEntryTest, ReuseExternalEntry) {
1301 SetMaxSize(200 * 1024);
1302 InitCache();
1303 ReuseEntry(20 * 1024, 0);
1306 TEST_F(DiskCacheEntryTest, MemoryOnlyReuseExternalEntry) {
1307 SetMemoryOnlyMode();
1308 SetMaxSize(200 * 1024);
1309 InitCache();
1310 ReuseEntry(20 * 1024, 0);
1313 TEST_F(DiskCacheEntryTest, ReuseInternalEntry) {
1314 SetMaxSize(100 * 1024);
1315 InitCache();
1316 ReuseEntry(10 * 1024, 0);
1319 TEST_F(DiskCacheEntryTest, MemoryOnlyReuseInternalEntry) {
1320 SetMemoryOnlyMode();
1321 SetMaxSize(100 * 1024);
1322 InitCache();
1323 ReuseEntry(10 * 1024, 0);
1326 // Reading somewhere that was not written should return zeros.
1327 void DiskCacheEntryTest::InvalidData(int stream_index) {
1328 std::string key("the first key");
1329 disk_cache::Entry* entry;
1330 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1332 const int kSize1 = 20000;
1333 const int kSize2 = 20000;
1334 const int kSize3 = 20000;
1335 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
1336 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
1337 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
1339 CacheTestFillBuffer(buffer1->data(), kSize1, false);
1340 memset(buffer2->data(), 0, kSize2);
1342 // Simple data grow:
1343 EXPECT_EQ(200,
1344 WriteData(entry, stream_index, 400, buffer1.get(), 200, false));
1345 EXPECT_EQ(600, entry->GetDataSize(stream_index));
1346 EXPECT_EQ(100, ReadData(entry, stream_index, 300, buffer3.get(), 100));
1347 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1348 entry->Close();
1349 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1351 // The entry is now on disk. Load it and extend it.
1352 EXPECT_EQ(200,
1353 WriteData(entry, stream_index, 800, buffer1.get(), 200, false));
1354 EXPECT_EQ(1000, entry->GetDataSize(stream_index));
1355 EXPECT_EQ(100, ReadData(entry, stream_index, 700, buffer3.get(), 100));
1356 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1357 entry->Close();
1358 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1360 // This time using truncate.
1361 EXPECT_EQ(200,
1362 WriteData(entry, stream_index, 1800, buffer1.get(), 200, true));
1363 EXPECT_EQ(2000, entry->GetDataSize(stream_index));
1364 EXPECT_EQ(100, ReadData(entry, stream_index, 1500, buffer3.get(), 100));
1365 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1367 // Go to an external file.
1368 EXPECT_EQ(200,
1369 WriteData(entry, stream_index, 19800, buffer1.get(), 200, false));
1370 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
1371 EXPECT_EQ(4000, ReadData(entry, stream_index, 14000, buffer3.get(), 4000));
1372 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 4000));
1374 // And back to an internal block.
1375 EXPECT_EQ(600,
1376 WriteData(entry, stream_index, 1000, buffer1.get(), 600, true));
1377 EXPECT_EQ(1600, entry->GetDataSize(stream_index));
1378 EXPECT_EQ(600, ReadData(entry, stream_index, 1000, buffer3.get(), 600));
1379 EXPECT_TRUE(!memcmp(buffer3->data(), buffer1->data(), 600));
1381 // Extend it again.
1382 EXPECT_EQ(600,
1383 WriteData(entry, stream_index, 2000, buffer1.get(), 600, false));
1384 EXPECT_EQ(2600, entry->GetDataSize(stream_index));
1385 EXPECT_EQ(200, ReadData(entry, stream_index, 1800, buffer3.get(), 200));
1386 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
1388 // And again (with truncation flag).
1389 EXPECT_EQ(600,
1390 WriteData(entry, stream_index, 3000, buffer1.get(), 600, true));
1391 EXPECT_EQ(3600, entry->GetDataSize(stream_index));
1392 EXPECT_EQ(200, ReadData(entry, stream_index, 2800, buffer3.get(), 200));
1393 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
1395 entry->Close();
1398 TEST_F(DiskCacheEntryTest, InvalidData) {
1399 InitCache();
1400 InvalidData(0);
1403 TEST_F(DiskCacheEntryTest, InvalidDataNoBuffer) {
1404 InitCache();
1405 cache_impl_->SetFlags(disk_cache::kNoBuffering);
1406 InvalidData(0);
1409 TEST_F(DiskCacheEntryTest, MemoryOnlyInvalidData) {
1410 SetMemoryOnlyMode();
1411 InitCache();
1412 InvalidData(0);
1415 // Tests that the cache preserves the buffer of an IO operation.
1416 void DiskCacheEntryTest::ReadWriteDestroyBuffer(int stream_index) {
1417 std::string key("the first key");
1418 disk_cache::Entry* entry;
1419 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1421 const int kSize = 200;
1422 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1423 CacheTestFillBuffer(buffer->data(), kSize, false);
1425 net::TestCompletionCallback cb;
1426 EXPECT_EQ(net::ERR_IO_PENDING,
1427 entry->WriteData(
1428 stream_index, 0, buffer.get(), kSize, cb.callback(), false));
1430 // Release our reference to the buffer.
1431 buffer = NULL;
1432 EXPECT_EQ(kSize, cb.WaitForResult());
1434 // And now test with a Read().
1435 buffer = new net::IOBuffer(kSize);
1436 CacheTestFillBuffer(buffer->data(), kSize, false);
1438 EXPECT_EQ(
1439 net::ERR_IO_PENDING,
1440 entry->ReadData(stream_index, 0, buffer.get(), kSize, cb.callback()));
1441 buffer = NULL;
1442 EXPECT_EQ(kSize, cb.WaitForResult());
1444 entry->Close();
1447 TEST_F(DiskCacheEntryTest, ReadWriteDestroyBuffer) {
1448 InitCache();
1449 ReadWriteDestroyBuffer(0);
1452 void DiskCacheEntryTest::DoomNormalEntry() {
1453 std::string key("the first key");
1454 disk_cache::Entry* entry;
1455 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1456 entry->Doom();
1457 entry->Close();
1459 const int kSize = 20000;
1460 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1461 CacheTestFillBuffer(buffer->data(), kSize, true);
1462 buffer->data()[19999] = '\0';
1464 key = buffer->data();
1465 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1466 EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1467 EXPECT_EQ(20000, WriteData(entry, 1, 0, buffer.get(), kSize, false));
1468 entry->Doom();
1469 entry->Close();
1471 FlushQueueForTest();
1472 EXPECT_EQ(0, cache_->GetEntryCount());
1475 TEST_F(DiskCacheEntryTest, DoomEntry) {
1476 InitCache();
1477 DoomNormalEntry();
1480 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomEntry) {
1481 SetMemoryOnlyMode();
1482 InitCache();
1483 DoomNormalEntry();
1486 // Tests dooming an entry that's linked to an open entry.
1487 void DiskCacheEntryTest::DoomEntryNextToOpenEntry() {
1488 disk_cache::Entry* entry1;
1489 disk_cache::Entry* entry2;
1490 ASSERT_EQ(net::OK, CreateEntry("fixed", &entry1));
1491 entry1->Close();
1492 ASSERT_EQ(net::OK, CreateEntry("foo", &entry1));
1493 entry1->Close();
1494 ASSERT_EQ(net::OK, CreateEntry("bar", &entry1));
1495 entry1->Close();
1497 ASSERT_EQ(net::OK, OpenEntry("foo", &entry1));
1498 ASSERT_EQ(net::OK, OpenEntry("bar", &entry2));
1499 entry2->Doom();
1500 entry2->Close();
1502 ASSERT_EQ(net::OK, OpenEntry("foo", &entry2));
1503 entry2->Doom();
1504 entry2->Close();
1505 entry1->Close();
1507 ASSERT_EQ(net::OK, OpenEntry("fixed", &entry1));
1508 entry1->Close();
1511 TEST_F(DiskCacheEntryTest, DoomEntryNextToOpenEntry) {
1512 InitCache();
1513 DoomEntryNextToOpenEntry();
1516 TEST_F(DiskCacheEntryTest, NewEvictionDoomEntryNextToOpenEntry) {
1517 SetNewEviction();
1518 InitCache();
1519 DoomEntryNextToOpenEntry();
1522 TEST_F(DiskCacheEntryTest, AppCacheDoomEntryNextToOpenEntry) {
1523 SetCacheType(net::APP_CACHE);
1524 InitCache();
1525 DoomEntryNextToOpenEntry();
1528 // Verify that basic operations work as expected with doomed entries.
1529 void DiskCacheEntryTest::DoomedEntry(int stream_index) {
1530 std::string key("the first key");
1531 disk_cache::Entry* entry;
1532 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1533 entry->Doom();
1535 FlushQueueForTest();
1536 EXPECT_EQ(0, cache_->GetEntryCount());
1537 Time initial = Time::Now();
1538 AddDelay();
1540 const int kSize1 = 2000;
1541 const int kSize2 = 2000;
1542 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
1543 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
1544 CacheTestFillBuffer(buffer1->data(), kSize1, false);
1545 memset(buffer2->data(), 0, kSize2);
1547 EXPECT_EQ(2000,
1548 WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
1549 EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
1550 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize1));
1551 EXPECT_EQ(key, entry->GetKey());
1552 EXPECT_TRUE(initial < entry->GetLastModified());
1553 EXPECT_TRUE(initial < entry->GetLastUsed());
1555 entry->Close();
1558 TEST_F(DiskCacheEntryTest, DoomedEntry) {
1559 InitCache();
1560 DoomedEntry(0);
1563 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomedEntry) {
1564 SetMemoryOnlyMode();
1565 InitCache();
1566 DoomedEntry(0);
1569 // Tests that we discard entries if the data is missing.
1570 TEST_F(DiskCacheEntryTest, MissingData) {
1571 InitCache();
1573 std::string key("the first key");
1574 disk_cache::Entry* entry;
1575 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1577 // Write to an external file.
1578 const int kSize = 20000;
1579 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1580 CacheTestFillBuffer(buffer->data(), kSize, false);
1581 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1582 entry->Close();
1583 FlushQueueForTest();
1585 disk_cache::Addr address(0x80000001);
1586 base::FilePath name = cache_impl_->GetFileName(address);
1587 EXPECT_TRUE(base::DeleteFile(name, false));
1589 // Attempt to read the data.
1590 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1591 EXPECT_EQ(net::ERR_FILE_NOT_FOUND,
1592 ReadData(entry, 0, 0, buffer.get(), kSize));
1593 entry->Close();
1595 // The entry should be gone.
1596 ASSERT_NE(net::OK, OpenEntry(key, &entry));
1599 // Test that child entries in a memory cache backend are not visible from
1600 // enumerations.
1601 TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) {
1602 SetMemoryOnlyMode();
1603 InitCache();
1605 const int kSize = 4096;
1606 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1607 CacheTestFillBuffer(buf->data(), kSize, false);
1609 std::string key("the first key");
1610 disk_cache::Entry* parent_entry;
1611 ASSERT_EQ(net::OK, CreateEntry(key, &parent_entry));
1613 // Writes to the parent entry.
1614 EXPECT_EQ(kSize,
1615 parent_entry->WriteSparseData(
1616 0, buf.get(), kSize, net::CompletionCallback()));
1618 // This write creates a child entry and writes to it.
1619 EXPECT_EQ(kSize,
1620 parent_entry->WriteSparseData(
1621 8192, buf.get(), kSize, net::CompletionCallback()));
1623 parent_entry->Close();
1625 // Perform the enumerations.
1626 scoped_ptr<TestIterator> iter = CreateIterator();
1627 disk_cache::Entry* entry = NULL;
1628 int count = 0;
1629 while (iter->OpenNextEntry(&entry) == net::OK) {
1630 ASSERT_TRUE(entry != NULL);
1631 ++count;
1632 disk_cache::MemEntryImpl* mem_entry =
1633 reinterpret_cast<disk_cache::MemEntryImpl*>(entry);
1634 EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry, mem_entry->type());
1635 mem_entry->Close();
1637 EXPECT_EQ(1, count);
1640 // Writes |buf_1| to offset and reads it back as |buf_2|.
1641 void VerifySparseIO(disk_cache::Entry* entry, int64 offset,
1642 net::IOBuffer* buf_1, int size, net::IOBuffer* buf_2) {
1643 net::TestCompletionCallback cb;
1645 memset(buf_2->data(), 0, size);
1646 int ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
1647 EXPECT_EQ(0, cb.GetResult(ret));
1649 ret = entry->WriteSparseData(offset, buf_1, size, cb.callback());
1650 EXPECT_EQ(size, cb.GetResult(ret));
1652 ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
1653 EXPECT_EQ(size, cb.GetResult(ret));
1655 EXPECT_EQ(0, memcmp(buf_1->data(), buf_2->data(), size));
1658 // Reads |size| bytes from |entry| at |offset| and verifies that they are the
1659 // same as the content of the provided |buffer|.
1660 void VerifyContentSparseIO(disk_cache::Entry* entry, int64 offset, char* buffer,
1661 int size) {
1662 net::TestCompletionCallback cb;
1664 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(size));
1665 memset(buf_1->data(), 0, size);
1666 int ret = entry->ReadSparseData(offset, buf_1.get(), size, cb.callback());
1667 EXPECT_EQ(size, cb.GetResult(ret));
1668 EXPECT_EQ(0, memcmp(buf_1->data(), buffer, size));
1671 void DiskCacheEntryTest::BasicSparseIO() {
1672 std::string key("the first key");
1673 disk_cache::Entry* entry;
1674 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1676 const int kSize = 2048;
1677 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1678 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1679 CacheTestFillBuffer(buf_1->data(), kSize, false);
1681 // Write at offset 0.
1682 VerifySparseIO(entry, 0, buf_1.get(), kSize, buf_2.get());
1684 // Write at offset 0x400000 (4 MB).
1685 VerifySparseIO(entry, 0x400000, buf_1.get(), kSize, buf_2.get());
1687 // Write at offset 0x800000000 (32 GB).
1688 VerifySparseIO(entry, 0x800000000LL, buf_1.get(), kSize, buf_2.get());
1690 entry->Close();
1692 // Check everything again.
1693 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1694 VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
1695 VerifyContentSparseIO(entry, 0x400000, buf_1->data(), kSize);
1696 VerifyContentSparseIO(entry, 0x800000000LL, buf_1->data(), kSize);
1697 entry->Close();
1700 TEST_F(DiskCacheEntryTest, BasicSparseIO) {
1701 InitCache();
1702 BasicSparseIO();
1705 TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseIO) {
1706 SetMemoryOnlyMode();
1707 InitCache();
1708 BasicSparseIO();
1711 void DiskCacheEntryTest::HugeSparseIO() {
1712 std::string key("the first key");
1713 disk_cache::Entry* entry;
1714 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1716 // Write 1.2 MB so that we cover multiple entries.
1717 const int kSize = 1200 * 1024;
1718 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1719 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1720 CacheTestFillBuffer(buf_1->data(), kSize, false);
1722 // Write at offset 0x20F0000 (33 MB - 64 KB).
1723 VerifySparseIO(entry, 0x20F0000, buf_1.get(), kSize, buf_2.get());
1724 entry->Close();
1726 // Check it again.
1727 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1728 VerifyContentSparseIO(entry, 0x20F0000, buf_1->data(), kSize);
1729 entry->Close();
1732 TEST_F(DiskCacheEntryTest, HugeSparseIO) {
1733 InitCache();
1734 HugeSparseIO();
1737 TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseIO) {
1738 SetMemoryOnlyMode();
1739 InitCache();
1740 HugeSparseIO();
1743 void DiskCacheEntryTest::GetAvailableRange() {
1744 std::string key("the first key");
1745 disk_cache::Entry* entry;
1746 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1748 const int kSize = 16 * 1024;
1749 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1750 CacheTestFillBuffer(buf->data(), kSize, false);
1752 // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
1753 EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
1754 EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F4400, buf.get(), kSize));
1756 // We stop at the first empty block.
1757 int64 start;
1758 net::TestCompletionCallback cb;
1759 int rv = entry->GetAvailableRange(
1760 0x20F0000, kSize * 2, &start, cb.callback());
1761 EXPECT_EQ(kSize, cb.GetResult(rv));
1762 EXPECT_EQ(0x20F0000, start);
1764 start = 0;
1765 rv = entry->GetAvailableRange(0, kSize, &start, cb.callback());
1766 EXPECT_EQ(0, cb.GetResult(rv));
1767 rv = entry->GetAvailableRange(
1768 0x20F0000 - kSize, kSize, &start, cb.callback());
1769 EXPECT_EQ(0, cb.GetResult(rv));
1770 rv = entry->GetAvailableRange(0, 0x2100000, &start, cb.callback());
1771 EXPECT_EQ(kSize, cb.GetResult(rv));
1772 EXPECT_EQ(0x20F0000, start);
1774 // We should be able to Read based on the results of GetAvailableRange.
1775 start = -1;
1776 rv = entry->GetAvailableRange(0x2100000, kSize, &start, cb.callback());
1777 EXPECT_EQ(0, cb.GetResult(rv));
1778 rv = entry->ReadSparseData(start, buf.get(), kSize, cb.callback());
1779 EXPECT_EQ(0, cb.GetResult(rv));
1781 start = 0;
1782 rv = entry->GetAvailableRange(0x20F2000, kSize, &start, cb.callback());
1783 EXPECT_EQ(0x2000, cb.GetResult(rv));
1784 EXPECT_EQ(0x20F2000, start);
1785 EXPECT_EQ(0x2000, ReadSparseData(entry, start, buf.get(), kSize));
1787 // Make sure that we respect the |len| argument.
1788 start = 0;
1789 rv = entry->GetAvailableRange(
1790 0x20F0001 - kSize, kSize, &start, cb.callback());
1791 EXPECT_EQ(1, cb.GetResult(rv));
1792 EXPECT_EQ(0x20F0000, start);
1794 // Use very small ranges. Write at offset 50.
1795 const int kTinyLen = 10;
1796 EXPECT_EQ(kTinyLen, WriteSparseData(entry, 50, buf.get(), kTinyLen));
1798 start = -1;
1799 rv = entry->GetAvailableRange(kTinyLen * 2, kTinyLen, &start, cb.callback());
1800 EXPECT_EQ(0, cb.GetResult(rv));
1801 EXPECT_EQ(kTinyLen * 2, start);
1803 entry->Close();
1806 TEST_F(DiskCacheEntryTest, GetAvailableRange) {
1807 InitCache();
1808 GetAvailableRange();
1811 TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) {
1812 SetMemoryOnlyMode();
1813 InitCache();
1814 GetAvailableRange();
1817 // Tests that non-sequential writes that are not aligned with the minimum sparse
1818 // data granularity (1024 bytes) do in fact result in dropped data.
1819 TEST_F(DiskCacheEntryTest, SparseWriteDropped) {
1820 InitCache();
1821 std::string key("the first key");
1822 disk_cache::Entry* entry;
1823 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1825 const int kSize = 180;
1826 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1827 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1828 CacheTestFillBuffer(buf_1->data(), kSize, false);
1830 // Do small writes (180 bytes) that get increasingly close to a 1024-byte
1831 // boundary. All data should be dropped until a boundary is crossed, at which
1832 // point the data after the boundary is saved (at least for a while).
1833 int offset = 1024 - 500;
1834 int rv = 0;
1835 net::TestCompletionCallback cb;
1836 int64 start;
1837 for (int i = 0; i < 5; i++) {
1838 // Check result of last GetAvailableRange.
1839 EXPECT_EQ(0, rv);
1841 rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback());
1842 EXPECT_EQ(kSize, cb.GetResult(rv));
1844 rv = entry->GetAvailableRange(offset - 100, kSize, &start, cb.callback());
1845 EXPECT_EQ(0, cb.GetResult(rv));
1847 rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
1848 rv = cb.GetResult(rv);
1849 if (!rv) {
1850 rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
1851 EXPECT_EQ(0, cb.GetResult(rv));
1852 rv = 0;
1854 offset += 1024 * i + 100;
1857 // The last write started 100 bytes below a bundary, so there should be 80
1858 // bytes after the boundary.
1859 EXPECT_EQ(80, rv);
1860 EXPECT_EQ(1024 * 7, start);
1861 rv = entry->ReadSparseData(start, buf_2.get(), kSize, cb.callback());
1862 EXPECT_EQ(80, cb.GetResult(rv));
1863 EXPECT_EQ(0, memcmp(buf_1.get()->data() + 100, buf_2.get()->data(), 80));
1865 // And even that part is dropped when another write changes the offset.
1866 offset = start;
1867 rv = entry->WriteSparseData(0, buf_1.get(), kSize, cb.callback());
1868 EXPECT_EQ(kSize, cb.GetResult(rv));
1870 rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
1871 EXPECT_EQ(0, cb.GetResult(rv));
1872 entry->Close();
1875 // Tests that small sequential writes are not dropped.
1876 TEST_F(DiskCacheEntryTest, SparseSquentialWriteNotDropped) {
1877 InitCache();
1878 std::string key("the first key");
1879 disk_cache::Entry* entry;
1880 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1882 const int kSize = 180;
1883 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1884 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1885 CacheTestFillBuffer(buf_1->data(), kSize, false);
1887 // Any starting offset is fine as long as it is 1024-bytes aligned.
1888 int rv = 0;
1889 net::TestCompletionCallback cb;
1890 int64 start;
1891 int64 offset = 1024 * 11;
1892 for (; offset < 20000; offset += kSize) {
1893 rv = entry->WriteSparseData(offset, buf_1.get(), kSize, cb.callback());
1894 EXPECT_EQ(kSize, cb.GetResult(rv));
1896 rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
1897 EXPECT_EQ(kSize, cb.GetResult(rv));
1898 EXPECT_EQ(offset, start);
1900 rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
1901 EXPECT_EQ(kSize, cb.GetResult(rv));
1902 EXPECT_EQ(0, memcmp(buf_1.get()->data(), buf_2.get()->data(), kSize));
1905 entry->Close();
1906 FlushQueueForTest();
1908 // Verify again the last write made.
1909 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1910 offset -= kSize;
1911 rv = entry->GetAvailableRange(offset, kSize, &start, cb.callback());
1912 EXPECT_EQ(kSize, cb.GetResult(rv));
1913 EXPECT_EQ(offset, start);
1915 rv = entry->ReadSparseData(offset, buf_2.get(), kSize, cb.callback());
1916 EXPECT_EQ(kSize, cb.GetResult(rv));
1917 EXPECT_EQ(0, memcmp(buf_1.get()->data(), buf_2.get()->data(), kSize));
1919 entry->Close();
1922 void DiskCacheEntryTest::CouldBeSparse() {
1923 std::string key("the first key");
1924 disk_cache::Entry* entry;
1925 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1927 const int kSize = 16 * 1024;
1928 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1929 CacheTestFillBuffer(buf->data(), kSize, false);
1931 // Write at offset 0x20F0000 (33 MB - 64 KB).
1932 EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
1934 EXPECT_TRUE(entry->CouldBeSparse());
1935 entry->Close();
1937 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1938 EXPECT_TRUE(entry->CouldBeSparse());
1939 entry->Close();
1941 // Now verify a regular entry.
1942 key.assign("another key");
1943 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1944 EXPECT_FALSE(entry->CouldBeSparse());
1946 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buf.get(), kSize, false));
1947 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buf.get(), kSize, false));
1948 EXPECT_EQ(kSize, WriteData(entry, 2, 0, buf.get(), kSize, false));
1950 EXPECT_FALSE(entry->CouldBeSparse());
1951 entry->Close();
1953 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1954 EXPECT_FALSE(entry->CouldBeSparse());
1955 entry->Close();
1958 TEST_F(DiskCacheEntryTest, CouldBeSparse) {
1959 InitCache();
1960 CouldBeSparse();
1963 TEST_F(DiskCacheEntryTest, MemoryCouldBeSparse) {
1964 SetMemoryOnlyMode();
1965 InitCache();
1966 CouldBeSparse();
1969 TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedSparseIO) {
1970 SetMemoryOnlyMode();
1971 InitCache();
1973 const int kSize = 8192;
1974 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1975 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1976 CacheTestFillBuffer(buf_1->data(), kSize, false);
1978 std::string key("the first key");
1979 disk_cache::Entry* entry;
1980 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1982 // This loop writes back to back starting from offset 0 and 9000.
1983 for (int i = 0; i < kSize; i += 1024) {
1984 scoped_refptr<net::WrappedIOBuffer> buf_3(
1985 new net::WrappedIOBuffer(buf_1->data() + i));
1986 VerifySparseIO(entry, i, buf_3.get(), 1024, buf_2.get());
1987 VerifySparseIO(entry, 9000 + i, buf_3.get(), 1024, buf_2.get());
1990 // Make sure we have data written.
1991 VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
1992 VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize);
1994 // This tests a large write that spans 3 entries from a misaligned offset.
1995 VerifySparseIO(entry, 20481, buf_1.get(), 8192, buf_2.get());
1997 entry->Close();
2000 TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedGetAvailableRange) {
2001 SetMemoryOnlyMode();
2002 InitCache();
2004 const int kSize = 8192;
2005 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2006 CacheTestFillBuffer(buf->data(), kSize, false);
2008 disk_cache::Entry* entry;
2009 std::string key("the first key");
2010 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2012 // Writes in the middle of an entry.
2013 EXPECT_EQ(
2014 1024,
2015 entry->WriteSparseData(0, buf.get(), 1024, net::CompletionCallback()));
2016 EXPECT_EQ(
2017 1024,
2018 entry->WriteSparseData(5120, buf.get(), 1024, net::CompletionCallback()));
2019 EXPECT_EQ(1024,
2020 entry->WriteSparseData(
2021 10000, buf.get(), 1024, net::CompletionCallback()));
2023 // Writes in the middle of an entry and spans 2 child entries.
2024 EXPECT_EQ(8192,
2025 entry->WriteSparseData(
2026 50000, buf.get(), 8192, net::CompletionCallback()));
2028 int64 start;
2029 net::TestCompletionCallback cb;
2030 // Test that we stop at a discontinuous child at the second block.
2031 int rv = entry->GetAvailableRange(0, 10000, &start, cb.callback());
2032 EXPECT_EQ(1024, cb.GetResult(rv));
2033 EXPECT_EQ(0, start);
2035 // Test that number of bytes is reported correctly when we start from the
2036 // middle of a filled region.
2037 rv = entry->GetAvailableRange(512, 10000, &start, cb.callback());
2038 EXPECT_EQ(512, cb.GetResult(rv));
2039 EXPECT_EQ(512, start);
2041 // Test that we found bytes in the child of next block.
2042 rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
2043 EXPECT_EQ(1024, cb.GetResult(rv));
2044 EXPECT_EQ(5120, start);
2046 // Test that the desired length is respected. It starts within a filled
2047 // region.
2048 rv = entry->GetAvailableRange(5500, 512, &start, cb.callback());
2049 EXPECT_EQ(512, cb.GetResult(rv));
2050 EXPECT_EQ(5500, start);
2052 // Test that the desired length is respected. It starts before a filled
2053 // region.
2054 rv = entry->GetAvailableRange(5000, 620, &start, cb.callback());
2055 EXPECT_EQ(500, cb.GetResult(rv));
2056 EXPECT_EQ(5120, start);
2058 // Test that multiple blocks are scanned.
2059 rv = entry->GetAvailableRange(40000, 20000, &start, cb.callback());
2060 EXPECT_EQ(8192, cb.GetResult(rv));
2061 EXPECT_EQ(50000, start);
2063 entry->Close();
2066 void DiskCacheEntryTest::UpdateSparseEntry() {
2067 std::string key("the first key");
2068 disk_cache::Entry* entry1;
2069 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
2071 const int kSize = 2048;
2072 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
2073 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
2074 CacheTestFillBuffer(buf_1->data(), kSize, false);
2076 // Write at offset 0.
2077 VerifySparseIO(entry1, 0, buf_1.get(), kSize, buf_2.get());
2078 entry1->Close();
2080 // Write at offset 2048.
2081 ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
2082 VerifySparseIO(entry1, 2048, buf_1.get(), kSize, buf_2.get());
2084 disk_cache::Entry* entry2;
2085 ASSERT_EQ(net::OK, CreateEntry("the second key", &entry2));
2087 entry1->Close();
2088 entry2->Close();
2089 FlushQueueForTest();
2090 if (memory_only_ || simple_cache_mode_)
2091 EXPECT_EQ(2, cache_->GetEntryCount());
2092 else
2093 EXPECT_EQ(3, cache_->GetEntryCount());
2096 TEST_F(DiskCacheEntryTest, UpdateSparseEntry) {
2097 SetCacheType(net::MEDIA_CACHE);
2098 InitCache();
2099 UpdateSparseEntry();
2102 TEST_F(DiskCacheEntryTest, MemoryOnlyUpdateSparseEntry) {
2103 SetMemoryOnlyMode();
2104 SetCacheType(net::MEDIA_CACHE);
2105 InitCache();
2106 UpdateSparseEntry();
2109 void DiskCacheEntryTest::DoomSparseEntry() {
2110 std::string key1("the first key");
2111 std::string key2("the second key");
2112 disk_cache::Entry *entry1, *entry2;
2113 ASSERT_EQ(net::OK, CreateEntry(key1, &entry1));
2114 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2116 const int kSize = 4 * 1024;
2117 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2118 CacheTestFillBuffer(buf->data(), kSize, false);
2120 int64 offset = 1024;
2121 // Write to a bunch of ranges.
2122 for (int i = 0; i < 12; i++) {
2123 EXPECT_EQ(kSize, WriteSparseData(entry1, offset, buf.get(), kSize));
2124 // Keep the second map under the default size.
2125 if (i < 9)
2126 EXPECT_EQ(kSize, WriteSparseData(entry2, offset, buf.get(), kSize));
2128 offset *= 4;
2131 if (memory_only_ || simple_cache_mode_)
2132 EXPECT_EQ(2, cache_->GetEntryCount());
2133 else
2134 EXPECT_EQ(15, cache_->GetEntryCount());
2136 // Doom the first entry while it's still open.
2137 entry1->Doom();
2138 entry1->Close();
2139 entry2->Close();
2141 // Doom the second entry after it's fully saved.
2142 EXPECT_EQ(net::OK, DoomEntry(key2));
2144 // Make sure we do all needed work. This may fail for entry2 if between Close
2145 // and DoomEntry the system decides to remove all traces of the file from the
2146 // system cache so we don't see that there is pending IO.
2147 base::MessageLoop::current()->RunUntilIdle();
2149 if (memory_only_) {
2150 EXPECT_EQ(0, cache_->GetEntryCount());
2151 } else {
2152 if (5 == cache_->GetEntryCount()) {
2153 // Most likely we are waiting for the result of reading the sparse info
2154 // (it's always async on Posix so it is easy to miss). Unfortunately we
2155 // don't have any signal to watch for so we can only wait.
2156 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500));
2157 base::MessageLoop::current()->RunUntilIdle();
2159 EXPECT_EQ(0, cache_->GetEntryCount());
2163 TEST_F(DiskCacheEntryTest, DoomSparseEntry) {
2164 UseCurrentThread();
2165 InitCache();
2166 DoomSparseEntry();
2169 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomSparseEntry) {
2170 SetMemoryOnlyMode();
2171 InitCache();
2172 DoomSparseEntry();
2175 // A CompletionCallback wrapper that deletes the cache from within the callback.
2176 // The way a CompletionCallback works means that all tasks (even new ones)
2177 // are executed by the message loop before returning to the caller so the only
2178 // way to simulate a race is to execute what we want on the callback.
2179 class SparseTestCompletionCallback: public net::TestCompletionCallback {
2180 public:
2181 explicit SparseTestCompletionCallback(scoped_ptr<disk_cache::Backend> cache)
2182 : cache_(cache.Pass()) {
2185 private:
2186 void SetResult(int result) override {
2187 cache_.reset();
2188 TestCompletionCallback::SetResult(result);
2191 scoped_ptr<disk_cache::Backend> cache_;
2192 DISALLOW_COPY_AND_ASSIGN(SparseTestCompletionCallback);
2195 // Tests that we don't crash when the backend is deleted while we are working
2196 // deleting the sub-entries of a sparse entry.
2197 TEST_F(DiskCacheEntryTest, DoomSparseEntry2) {
2198 UseCurrentThread();
2199 InitCache();
2200 std::string key("the key");
2201 disk_cache::Entry* entry;
2202 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2204 const int kSize = 4 * 1024;
2205 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2206 CacheTestFillBuffer(buf->data(), kSize, false);
2208 int64 offset = 1024;
2209 // Write to a bunch of ranges.
2210 for (int i = 0; i < 12; i++) {
2211 EXPECT_EQ(kSize,
2212 entry->WriteSparseData(
2213 offset, buf.get(), kSize, net::CompletionCallback()));
2214 offset *= 4;
2216 EXPECT_EQ(9, cache_->GetEntryCount());
2218 entry->Close();
2219 disk_cache::Backend* cache = cache_.get();
2220 SparseTestCompletionCallback cb(cache_.Pass());
2221 int rv = cache->DoomEntry(key, cb.callback());
2222 EXPECT_EQ(net::ERR_IO_PENDING, rv);
2223 EXPECT_EQ(net::OK, cb.WaitForResult());
2226 void DiskCacheEntryTest::PartialSparseEntry() {
2227 std::string key("the first key");
2228 disk_cache::Entry* entry;
2229 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2231 // We should be able to deal with IO that is not aligned to the block size
2232 // of a sparse entry, at least to write a big range without leaving holes.
2233 const int kSize = 4 * 1024;
2234 const int kSmallSize = 128;
2235 scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
2236 CacheTestFillBuffer(buf1->data(), kSize, false);
2238 // The first write is just to extend the entry. The third write occupies
2239 // a 1KB block partially, it may not be written internally depending on the
2240 // implementation.
2241 EXPECT_EQ(kSize, WriteSparseData(entry, 20000, buf1.get(), kSize));
2242 EXPECT_EQ(kSize, WriteSparseData(entry, 500, buf1.get(), kSize));
2243 EXPECT_EQ(kSmallSize,
2244 WriteSparseData(entry, 1080321, buf1.get(), kSmallSize));
2245 entry->Close();
2246 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2248 scoped_refptr<net::IOBuffer> buf2(new net::IOBuffer(kSize));
2249 memset(buf2->data(), 0, kSize);
2250 EXPECT_EQ(0, ReadSparseData(entry, 8000, buf2.get(), kSize));
2252 EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
2253 EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
2254 EXPECT_EQ(0, ReadSparseData(entry, 0, buf2.get(), kSize));
2256 // This read should not change anything.
2257 if (memory_only_ || simple_cache_mode_)
2258 EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize));
2259 else
2260 EXPECT_EQ(0, ReadSparseData(entry, 24000, buf2.get(), kSize));
2262 EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
2263 EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize));
2265 int rv;
2266 int64 start;
2267 net::TestCompletionCallback cb;
2268 if (memory_only_ || simple_cache_mode_) {
2269 rv = entry->GetAvailableRange(0, 600, &start, cb.callback());
2270 EXPECT_EQ(100, cb.GetResult(rv));
2271 EXPECT_EQ(500, start);
2272 } else {
2273 rv = entry->GetAvailableRange(0, 2048, &start, cb.callback());
2274 EXPECT_EQ(1024, cb.GetResult(rv));
2275 EXPECT_EQ(1024, start);
2277 rv = entry->GetAvailableRange(kSize, kSize, &start, cb.callback());
2278 EXPECT_EQ(500, cb.GetResult(rv));
2279 EXPECT_EQ(kSize, start);
2280 rv = entry->GetAvailableRange(20 * 1024, 10000, &start, cb.callback());
2281 if (memory_only_ || simple_cache_mode_)
2282 EXPECT_EQ(3616, cb.GetResult(rv));
2283 else
2284 EXPECT_EQ(3072, cb.GetResult(rv));
2286 EXPECT_EQ(20 * 1024, start);
2288 // 1. Query before a filled 1KB block.
2289 // 2. Query within a filled 1KB block.
2290 // 3. Query beyond a filled 1KB block.
2291 if (memory_only_ || simple_cache_mode_) {
2292 rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
2293 EXPECT_EQ(3496, cb.GetResult(rv));
2294 EXPECT_EQ(20000, start);
2295 } else {
2296 rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
2297 EXPECT_EQ(3016, cb.GetResult(rv));
2298 EXPECT_EQ(20480, start);
2300 rv = entry->GetAvailableRange(3073, kSize, &start, cb.callback());
2301 EXPECT_EQ(1523, cb.GetResult(rv));
2302 EXPECT_EQ(3073, start);
2303 rv = entry->GetAvailableRange(4600, kSize, &start, cb.callback());
2304 EXPECT_EQ(0, cb.GetResult(rv));
2305 EXPECT_EQ(4600, start);
2307 // Now make another write and verify that there is no hole in between.
2308 EXPECT_EQ(kSize, WriteSparseData(entry, 500 + kSize, buf1.get(), kSize));
2309 rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
2310 EXPECT_EQ(7 * 1024 + 500, cb.GetResult(rv));
2311 EXPECT_EQ(1024, start);
2312 EXPECT_EQ(kSize, ReadSparseData(entry, kSize, buf2.get(), kSize));
2313 EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
2314 EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500));
2316 entry->Close();
2319 TEST_F(DiskCacheEntryTest, PartialSparseEntry) {
2320 InitCache();
2321 PartialSparseEntry();
2324 TEST_F(DiskCacheEntryTest, MemoryPartialSparseEntry) {
2325 SetMemoryOnlyMode();
2326 InitCache();
2327 PartialSparseEntry();
2330 // Tests that corrupt sparse children are removed automatically.
2331 TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
2332 InitCache();
2333 std::string key("the first key");
2334 disk_cache::Entry* entry;
2335 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2337 const int kSize = 4 * 1024;
2338 scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
2339 CacheTestFillBuffer(buf1->data(), kSize, false);
2341 const int k1Meg = 1024 * 1024;
2342 EXPECT_EQ(kSize, WriteSparseData(entry, 8192, buf1.get(), kSize));
2343 EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
2344 EXPECT_EQ(kSize, WriteSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
2345 entry->Close();
2346 EXPECT_EQ(4, cache_->GetEntryCount());
2348 scoped_ptr<TestIterator> iter = CreateIterator();
2349 int count = 0;
2350 std::string child_key[2];
2351 while (iter->OpenNextEntry(&entry) == net::OK) {
2352 ASSERT_TRUE(entry != NULL);
2353 // Writing to an entry will alter the LRU list and invalidate the iterator.
2354 if (entry->GetKey() != key && count < 2)
2355 child_key[count++] = entry->GetKey();
2356 entry->Close();
2358 for (int i = 0; i < 2; i++) {
2359 ASSERT_EQ(net::OK, OpenEntry(child_key[i], &entry));
2360 // Overwrite the header's magic and signature.
2361 EXPECT_EQ(12, WriteData(entry, 2, 0, buf1.get(), 12, false));
2362 entry->Close();
2365 EXPECT_EQ(4, cache_->GetEntryCount());
2366 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2368 // Two children should be gone. One while reading and one while writing.
2369 EXPECT_EQ(0, ReadSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
2370 EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 16384, buf1.get(), kSize));
2371 EXPECT_EQ(0, ReadSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
2373 // We never touched this one.
2374 EXPECT_EQ(kSize, ReadSparseData(entry, 8192, buf1.get(), kSize));
2375 entry->Close();
2377 // We re-created one of the corrupt children.
2378 EXPECT_EQ(3, cache_->GetEntryCount());
2381 TEST_F(DiskCacheEntryTest, CancelSparseIO) {
2382 UseCurrentThread();
2383 InitCache();
2384 std::string key("the first key");
2385 disk_cache::Entry* entry;
2386 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2388 const int kSize = 40 * 1024;
2389 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2390 CacheTestFillBuffer(buf->data(), kSize, false);
2392 // This will open and write two "real" entries.
2393 net::TestCompletionCallback cb1, cb2, cb3, cb4, cb5;
2394 int rv = entry->WriteSparseData(
2395 1024 * 1024 - 4096, buf.get(), kSize, cb1.callback());
2396 EXPECT_EQ(net::ERR_IO_PENDING, rv);
2398 int64 offset = 0;
2399 rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
2400 rv = cb5.GetResult(rv);
2401 if (!cb1.have_result()) {
2402 // We may or may not have finished writing to the entry. If we have not,
2403 // we cannot start another operation at this time.
2404 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv);
2407 // We cancel the pending operation, and register multiple notifications.
2408 entry->CancelSparseIO();
2409 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb2.callback()));
2410 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb3.callback()));
2411 entry->CancelSparseIO(); // Should be a no op at this point.
2412 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb4.callback()));
2414 if (!cb1.have_result()) {
2415 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
2416 entry->ReadSparseData(
2417 offset, buf.get(), kSize, net::CompletionCallback()));
2418 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
2419 entry->WriteSparseData(
2420 offset, buf.get(), kSize, net::CompletionCallback()));
2423 // Now see if we receive all notifications. Note that we should not be able
2424 // to write everything (unless the timing of the system is really weird).
2425 rv = cb1.WaitForResult();
2426 EXPECT_TRUE(rv == 4096 || rv == kSize);
2427 EXPECT_EQ(net::OK, cb2.WaitForResult());
2428 EXPECT_EQ(net::OK, cb3.WaitForResult());
2429 EXPECT_EQ(net::OK, cb4.WaitForResult());
2431 rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
2432 EXPECT_EQ(0, cb5.GetResult(rv));
2433 entry->Close();
2436 // Tests that we perform sanity checks on an entry's key. Note that there are
2437 // other tests that exercise sanity checks by using saved corrupt files.
2438 TEST_F(DiskCacheEntryTest, KeySanityCheck) {
2439 UseCurrentThread();
2440 InitCache();
2441 std::string key("the first key");
2442 disk_cache::Entry* entry;
2443 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2445 disk_cache::EntryImpl* entry_impl =
2446 static_cast<disk_cache::EntryImpl*>(entry);
2447 disk_cache::EntryStore* store = entry_impl->entry()->Data();
2449 // We have reserved space for a short key (one block), let's say that the key
2450 // takes more than one block, and remove the NULLs after the actual key.
2451 store->key_len = 800;
2452 memset(store->key + key.size(), 'k', sizeof(store->key) - key.size());
2453 entry_impl->entry()->set_modified();
2454 entry->Close();
2456 // We have a corrupt entry. Now reload it. We should NOT read beyond the
2457 // allocated buffer here.
2458 ASSERT_NE(net::OK, OpenEntry(key, &entry));
2459 DisableIntegrityCheck();
2462 TEST_F(DiskCacheEntryTest, SimpleCacheInternalAsyncIO) {
2463 SetSimpleCacheMode();
2464 InitCache();
2465 InternalAsyncIO();
2468 TEST_F(DiskCacheEntryTest, SimpleCacheExternalAsyncIO) {
2469 SetSimpleCacheMode();
2470 InitCache();
2471 ExternalAsyncIO();
2474 TEST_F(DiskCacheEntryTest, SimpleCacheReleaseBuffer) {
2475 SetSimpleCacheMode();
2476 InitCache();
2477 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2478 EXPECT_EQ(net::OK, DoomAllEntries());
2479 ReleaseBuffer(i);
2483 TEST_F(DiskCacheEntryTest, SimpleCacheStreamAccess) {
2484 SetSimpleCacheMode();
2485 InitCache();
2486 StreamAccess();
2489 TEST_F(DiskCacheEntryTest, SimpleCacheGetKey) {
2490 SetSimpleCacheMode();
2491 InitCache();
2492 GetKey();
2495 TEST_F(DiskCacheEntryTest, SimpleCacheGetTimes) {
2496 SetSimpleCacheMode();
2497 InitCache();
2498 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2499 EXPECT_EQ(net::OK, DoomAllEntries());
2500 GetTimes(i);
2504 TEST_F(DiskCacheEntryTest, SimpleCacheGrowData) {
2505 SetSimpleCacheMode();
2506 InitCache();
2507 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2508 EXPECT_EQ(net::OK, DoomAllEntries());
2509 GrowData(i);
2513 TEST_F(DiskCacheEntryTest, SimpleCacheTruncateData) {
2514 SetSimpleCacheMode();
2515 InitCache();
2516 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2517 EXPECT_EQ(net::OK, DoomAllEntries());
2518 TruncateData(i);
2522 TEST_F(DiskCacheEntryTest, SimpleCacheZeroLengthIO) {
2523 SetSimpleCacheMode();
2524 InitCache();
2525 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2526 EXPECT_EQ(net::OK, DoomAllEntries());
2527 ZeroLengthIO(i);
2531 TEST_F(DiskCacheEntryTest, SimpleCacheSizeAtCreate) {
2532 SetSimpleCacheMode();
2533 InitCache();
2534 SizeAtCreate();
2537 TEST_F(DiskCacheEntryTest, SimpleCacheReuseExternalEntry) {
2538 SetSimpleCacheMode();
2539 SetMaxSize(200 * 1024);
2540 InitCache();
2541 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2542 EXPECT_EQ(net::OK, DoomAllEntries());
2543 ReuseEntry(20 * 1024, i);
2547 TEST_F(DiskCacheEntryTest, SimpleCacheReuseInternalEntry) {
2548 SetSimpleCacheMode();
2549 SetMaxSize(100 * 1024);
2550 InitCache();
2551 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2552 EXPECT_EQ(net::OK, DoomAllEntries());
2553 ReuseEntry(10 * 1024, i);
2557 TEST_F(DiskCacheEntryTest, SimpleCacheSizeChanges) {
2558 SetSimpleCacheMode();
2559 InitCache();
2560 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2561 EXPECT_EQ(net::OK, DoomAllEntries());
2562 SizeChanges(i);
2566 TEST_F(DiskCacheEntryTest, SimpleCacheInvalidData) {
2567 SetSimpleCacheMode();
2568 InitCache();
2569 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2570 EXPECT_EQ(net::OK, DoomAllEntries());
2571 InvalidData(i);
2575 TEST_F(DiskCacheEntryTest, SimpleCacheReadWriteDestroyBuffer) {
2576 // Proving that the test works well with optimistic operations enabled is
2577 // subtle, instead run only in APP_CACHE mode to disable optimistic
2578 // operations. Stream 0 always uses optimistic operations, so the test is not
2579 // run on stream 0.
2580 SetCacheType(net::APP_CACHE);
2581 SetSimpleCacheMode();
2582 InitCache();
2583 for (int i = 1; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2584 EXPECT_EQ(net::OK, DoomAllEntries());
2585 ReadWriteDestroyBuffer(i);
2589 TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntry) {
2590 SetSimpleCacheMode();
2591 InitCache();
2592 DoomNormalEntry();
2595 TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntryNextToOpenEntry) {
2596 SetSimpleCacheMode();
2597 InitCache();
2598 DoomEntryNextToOpenEntry();
2601 TEST_F(DiskCacheEntryTest, SimpleCacheDoomedEntry) {
2602 SetSimpleCacheMode();
2603 InitCache();
2604 // Stream 2 is excluded because the implementation does not support writing to
2605 // it on a doomed entry, if it was previously lazily omitted.
2606 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount - 1; ++i) {
2607 EXPECT_EQ(net::OK, DoomAllEntries());
2608 DoomedEntry(i);
2612 // Creates an entry with corrupted last byte in stream 0.
2613 // Requires SimpleCacheMode.
2614 bool DiskCacheEntryTest::SimpleCacheMakeBadChecksumEntry(const std::string& key,
2615 int* data_size) {
2616 disk_cache::Entry* entry = NULL;
2618 if (CreateEntry(key, &entry) != net::OK || !entry) {
2619 LOG(ERROR) << "Could not create entry";
2620 return false;
2623 const char data[] = "this is very good data";
2624 const int kDataSize = arraysize(data);
2625 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kDataSize));
2626 base::strlcpy(buffer->data(), data, kDataSize);
2628 EXPECT_EQ(kDataSize, WriteData(entry, 1, 0, buffer.get(), kDataSize, false));
2629 entry->Close();
2630 entry = NULL;
2632 // Corrupt the last byte of the data.
2633 base::FilePath entry_file0_path = cache_path_.AppendASCII(
2634 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
2635 base::File entry_file0(entry_file0_path,
2636 base::File::FLAG_WRITE | base::File::FLAG_OPEN);
2637 if (!entry_file0.IsValid())
2638 return false;
2640 int64 file_offset =
2641 sizeof(disk_cache::SimpleFileHeader) + key.size() + kDataSize - 2;
2642 EXPECT_EQ(1, entry_file0.Write(file_offset, "X", 1));
2643 *data_size = kDataSize;
2644 return true;
2647 // Tests that the simple cache can detect entries that have bad data.
2648 TEST_F(DiskCacheEntryTest, SimpleCacheBadChecksum) {
2649 SetSimpleCacheMode();
2650 InitCache();
2652 const char key[] = "the first key";
2653 int size_unused;
2654 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size_unused));
2656 disk_cache::Entry* entry = NULL;
2658 // Open the entry.
2659 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2660 ScopedEntryPtr entry_closer(entry);
2662 const int kReadBufferSize = 200;
2663 EXPECT_GE(kReadBufferSize, entry->GetDataSize(1));
2664 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
2665 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
2666 ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize));
2669 // Tests that an entry that has had an IO error occur can still be Doomed().
2670 TEST_F(DiskCacheEntryTest, SimpleCacheErrorThenDoom) {
2671 SetSimpleCacheMode();
2672 InitCache();
2674 const char key[] = "the first key";
2675 int size_unused;
2676 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size_unused));
2678 disk_cache::Entry* entry = NULL;
2680 // Open the entry, forcing an IO error.
2681 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2682 ScopedEntryPtr entry_closer(entry);
2684 const int kReadBufferSize = 200;
2685 EXPECT_GE(kReadBufferSize, entry->GetDataSize(1));
2686 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
2687 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
2688 ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize));
2690 entry->Doom(); // Should not crash.
2693 bool TruncatePath(const base::FilePath& file_path, int64 length) {
2694 base::File file(file_path, base::File::FLAG_WRITE | base::File::FLAG_OPEN);
2695 if (!file.IsValid())
2696 return false;
2697 return file.SetLength(length);
2700 TEST_F(DiskCacheEntryTest, SimpleCacheNoEOF) {
2701 SetSimpleCacheMode();
2702 InitCache();
2704 const char key[] = "the first key";
2706 disk_cache::Entry* entry = NULL;
2707 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2708 disk_cache::Entry* null = NULL;
2709 EXPECT_NE(null, entry);
2710 entry->Close();
2711 entry = NULL;
2713 // Force the entry to flush to disk, so subsequent platform file operations
2714 // succed.
2715 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2716 entry->Close();
2717 entry = NULL;
2719 // Truncate the file such that the length isn't sufficient to have an EOF
2720 // record.
2721 int kTruncationBytes = -implicit_cast<int>(sizeof(disk_cache::SimpleFileEOF));
2722 const base::FilePath entry_path = cache_path_.AppendASCII(
2723 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
2724 const int64 invalid_size =
2725 disk_cache::simple_util::GetFileSizeFromKeyAndDataSize(key,
2726 kTruncationBytes);
2727 EXPECT_TRUE(TruncatePath(entry_path, invalid_size));
2728 EXPECT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
2729 DisableIntegrityCheck();
2732 TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsBasic) {
2733 // Test sequence:
2734 // Create, Write, Read, Close.
2735 SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
2736 SetSimpleCacheMode();
2737 InitCache();
2738 disk_cache::Entry* const null_entry = NULL;
2740 disk_cache::Entry* entry = NULL;
2741 EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2742 ASSERT_NE(null_entry, entry);
2743 ScopedEntryPtr entry_closer(entry);
2745 const int kBufferSize = 10;
2746 scoped_refptr<net::IOBufferWithSize> write_buffer(
2747 new net::IOBufferWithSize(kBufferSize));
2748 CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2749 EXPECT_EQ(
2750 write_buffer->size(),
2751 WriteData(entry, 1, 0, write_buffer.get(), write_buffer->size(), false));
2753 scoped_refptr<net::IOBufferWithSize> read_buffer(
2754 new net::IOBufferWithSize(kBufferSize));
2755 EXPECT_EQ(read_buffer->size(),
2756 ReadData(entry, 1, 0, read_buffer.get(), read_buffer->size()));
2759 TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsDontBlock) {
2760 // Test sequence:
2761 // Create, Write, Close.
2762 SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
2763 SetSimpleCacheMode();
2764 InitCache();
2765 disk_cache::Entry* const null_entry = NULL;
2767 MessageLoopHelper helper;
2768 CallbackTest create_callback(&helper, false);
2770 int expected_callback_runs = 0;
2771 const int kBufferSize = 10;
2772 scoped_refptr<net::IOBufferWithSize> write_buffer(
2773 new net::IOBufferWithSize(kBufferSize));
2775 disk_cache::Entry* entry = NULL;
2776 EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2777 ASSERT_NE(null_entry, entry);
2778 ScopedEntryPtr entry_closer(entry);
2780 CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2781 CallbackTest write_callback(&helper, false);
2782 int ret = entry->WriteData(
2785 write_buffer.get(),
2786 write_buffer->size(),
2787 base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
2788 false);
2789 ASSERT_EQ(net::ERR_IO_PENDING, ret);
2790 helper.WaitUntilCacheIoFinished(++expected_callback_runs);
2793 TEST_F(DiskCacheEntryTest,
2794 SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting) {
2795 // Test sequence:
2796 // Create, Write, Read, Close.
2797 SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
2798 SetSimpleCacheMode();
2799 InitCache();
2800 disk_cache::Entry* const null_entry = NULL;
2801 MessageLoopHelper helper;
2803 disk_cache::Entry* entry = NULL;
2804 // Note that |entry| is only set once CreateEntry() completed which is why we
2805 // have to wait (i.e. use the helper CreateEntry() function).
2806 EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2807 ASSERT_NE(null_entry, entry);
2808 ScopedEntryPtr entry_closer(entry);
2810 const int kBufferSize = 10;
2811 scoped_refptr<net::IOBufferWithSize> write_buffer(
2812 new net::IOBufferWithSize(kBufferSize));
2813 CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2814 CallbackTest write_callback(&helper, false);
2815 int ret = entry->WriteData(
2818 write_buffer.get(),
2819 write_buffer->size(),
2820 base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
2821 false);
2822 EXPECT_EQ(net::ERR_IO_PENDING, ret);
2823 int expected_callback_runs = 1;
2825 scoped_refptr<net::IOBufferWithSize> read_buffer(
2826 new net::IOBufferWithSize(kBufferSize));
2827 CallbackTest read_callback(&helper, false);
2828 ret = entry->ReadData(
2831 read_buffer.get(),
2832 read_buffer->size(),
2833 base::Bind(&CallbackTest::Run, base::Unretained(&read_callback)));
2834 EXPECT_EQ(net::ERR_IO_PENDING, ret);
2835 ++expected_callback_runs;
2837 helper.WaitUntilCacheIoFinished(expected_callback_runs);
2838 ASSERT_EQ(read_buffer->size(), write_buffer->size());
2839 EXPECT_EQ(
2841 memcmp(read_buffer->data(), write_buffer->data(), read_buffer->size()));
2844 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic) {
2845 // Test sequence:
2846 // Create, Write, Read, Write, Read, Close.
2847 SetSimpleCacheMode();
2848 InitCache();
2849 disk_cache::Entry* null = NULL;
2850 const char key[] = "the first key";
2852 MessageLoopHelper helper;
2853 CallbackTest callback1(&helper, false);
2854 CallbackTest callback2(&helper, false);
2855 CallbackTest callback3(&helper, false);
2856 CallbackTest callback4(&helper, false);
2857 CallbackTest callback5(&helper, false);
2859 int expected = 0;
2860 const int kSize1 = 10;
2861 const int kSize2 = 20;
2862 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2863 scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
2864 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
2865 scoped_refptr<net::IOBuffer> buffer2_read(new net::IOBuffer(kSize2));
2866 CacheTestFillBuffer(buffer1->data(), kSize1, false);
2867 CacheTestFillBuffer(buffer2->data(), kSize2, false);
2869 disk_cache::Entry* entry = NULL;
2870 // Create is optimistic, must return OK.
2871 ASSERT_EQ(net::OK,
2872 cache_->CreateEntry(key, &entry,
2873 base::Bind(&CallbackTest::Run,
2874 base::Unretained(&callback1))));
2875 EXPECT_NE(null, entry);
2876 ScopedEntryPtr entry_closer(entry);
2878 // This write may or may not be optimistic (it depends if the previous
2879 // optimistic create already finished by the time we call the write here).
2880 int ret = entry->WriteData(
2883 buffer1.get(),
2884 kSize1,
2885 base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
2886 false);
2887 EXPECT_TRUE(kSize1 == ret || net::ERR_IO_PENDING == ret);
2888 if (net::ERR_IO_PENDING == ret)
2889 expected++;
2891 // This Read must not be optimistic, since we don't support that yet.
2892 EXPECT_EQ(net::ERR_IO_PENDING,
2893 entry->ReadData(
2896 buffer1_read.get(),
2897 kSize1,
2898 base::Bind(&CallbackTest::Run, base::Unretained(&callback3))));
2899 expected++;
2900 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
2901 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
2903 // At this point after waiting, the pending operations queue on the entry
2904 // should be empty, so the next Write operation must run as optimistic.
2905 EXPECT_EQ(kSize2,
2906 entry->WriteData(
2909 buffer2.get(),
2910 kSize2,
2911 base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
2912 false));
2914 // Lets do another read so we block until both the write and the read
2915 // operation finishes and we can then test for HasOneRef() below.
2916 EXPECT_EQ(net::ERR_IO_PENDING,
2917 entry->ReadData(
2920 buffer2_read.get(),
2921 kSize2,
2922 base::Bind(&CallbackTest::Run, base::Unretained(&callback5))));
2923 expected++;
2925 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
2926 EXPECT_EQ(0, memcmp(buffer2->data(), buffer2_read->data(), kSize2));
2928 // Check that we are not leaking.
2929 EXPECT_NE(entry, null);
2930 EXPECT_TRUE(
2931 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2934 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic2) {
2935 // Test sequence:
2936 // Create, Open, Close, Close.
2937 SetSimpleCacheMode();
2938 InitCache();
2939 disk_cache::Entry* null = NULL;
2940 const char key[] = "the first key";
2942 MessageLoopHelper helper;
2943 CallbackTest callback1(&helper, false);
2944 CallbackTest callback2(&helper, false);
2946 disk_cache::Entry* entry = NULL;
2947 ASSERT_EQ(net::OK,
2948 cache_->CreateEntry(key, &entry,
2949 base::Bind(&CallbackTest::Run,
2950 base::Unretained(&callback1))));
2951 EXPECT_NE(null, entry);
2952 ScopedEntryPtr entry_closer(entry);
2954 disk_cache::Entry* entry2 = NULL;
2955 ASSERT_EQ(net::ERR_IO_PENDING,
2956 cache_->OpenEntry(key, &entry2,
2957 base::Bind(&CallbackTest::Run,
2958 base::Unretained(&callback2))));
2959 ASSERT_TRUE(helper.WaitUntilCacheIoFinished(1));
2961 EXPECT_NE(null, entry2);
2962 EXPECT_EQ(entry, entry2);
2964 // We have to call close twice, since we called create and open above.
2965 entry->Close();
2967 // Check that we are not leaking.
2968 EXPECT_TRUE(
2969 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2972 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic3) {
2973 // Test sequence:
2974 // Create, Close, Open, Close.
2975 SetSimpleCacheMode();
2976 InitCache();
2977 disk_cache::Entry* null = NULL;
2978 const char key[] = "the first key";
2980 disk_cache::Entry* entry = NULL;
2981 ASSERT_EQ(net::OK,
2982 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2983 EXPECT_NE(null, entry);
2984 entry->Close();
2986 net::TestCompletionCallback cb;
2987 disk_cache::Entry* entry2 = NULL;
2988 ASSERT_EQ(net::ERR_IO_PENDING,
2989 cache_->OpenEntry(key, &entry2, cb.callback()));
2990 ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
2991 ScopedEntryPtr entry_closer(entry2);
2993 EXPECT_NE(null, entry2);
2994 EXPECT_EQ(entry, entry2);
2996 // Check that we are not leaking.
2997 EXPECT_TRUE(
2998 static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
3001 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic4) {
3002 // Test sequence:
3003 // Create, Close, Write, Open, Open, Close, Write, Read, Close.
3004 SetSimpleCacheMode();
3005 InitCache();
3006 disk_cache::Entry* null = NULL;
3007 const char key[] = "the first key";
3009 net::TestCompletionCallback cb;
3010 const int kSize1 = 10;
3011 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
3012 CacheTestFillBuffer(buffer1->data(), kSize1, false);
3013 disk_cache::Entry* entry = NULL;
3015 ASSERT_EQ(net::OK,
3016 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3017 EXPECT_NE(null, entry);
3018 entry->Close();
3020 // Lets do a Write so we block until both the Close and the Write
3021 // operation finishes. Write must fail since we are writing in a closed entry.
3022 EXPECT_EQ(
3023 net::ERR_IO_PENDING,
3024 entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
3025 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(net::ERR_IO_PENDING));
3027 // Finish running the pending tasks so that we fully complete the close
3028 // operation and destroy the entry object.
3029 base::MessageLoop::current()->RunUntilIdle();
3031 // At this point the |entry| must have been destroyed, and called
3032 // RemoveSelfFromBackend().
3033 disk_cache::Entry* entry2 = NULL;
3034 ASSERT_EQ(net::ERR_IO_PENDING,
3035 cache_->OpenEntry(key, &entry2, cb.callback()));
3036 ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
3037 EXPECT_NE(null, entry2);
3039 disk_cache::Entry* entry3 = NULL;
3040 ASSERT_EQ(net::ERR_IO_PENDING,
3041 cache_->OpenEntry(key, &entry3, cb.callback()));
3042 ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
3043 EXPECT_NE(null, entry3);
3044 EXPECT_EQ(entry2, entry3);
3045 entry3->Close();
3047 // The previous Close doesn't actually closes the entry since we opened it
3048 // twice, so the next Write operation must succeed and it must be able to
3049 // perform it optimistically, since there is no operation running on this
3050 // entry.
3051 EXPECT_EQ(kSize1,
3052 entry2->WriteData(
3053 1, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
3055 // Lets do another read so we block until both the write and the read
3056 // operation finishes and we can then test for HasOneRef() below.
3057 EXPECT_EQ(net::ERR_IO_PENDING,
3058 entry2->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
3059 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3061 // Check that we are not leaking.
3062 EXPECT_TRUE(
3063 static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
3064 entry2->Close();
3067 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic5) {
3068 // Test sequence:
3069 // Create, Doom, Write, Read, Close.
3070 SetSimpleCacheMode();
3071 InitCache();
3072 disk_cache::Entry* null = NULL;
3073 const char key[] = "the first key";
3075 net::TestCompletionCallback cb;
3076 const int kSize1 = 10;
3077 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
3078 CacheTestFillBuffer(buffer1->data(), kSize1, false);
3079 disk_cache::Entry* entry = NULL;
3081 ASSERT_EQ(net::OK,
3082 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3083 EXPECT_NE(null, entry);
3084 ScopedEntryPtr entry_closer(entry);
3085 entry->Doom();
3087 EXPECT_EQ(
3088 net::ERR_IO_PENDING,
3089 entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
3090 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3092 EXPECT_EQ(net::ERR_IO_PENDING,
3093 entry->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
3094 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3096 // Check that we are not leaking.
3097 EXPECT_TRUE(
3098 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3101 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic6) {
3102 // Test sequence:
3103 // Create, Write, Doom, Doom, Read, Doom, Close.
3104 SetSimpleCacheMode();
3105 InitCache();
3106 disk_cache::Entry* null = NULL;
3107 const char key[] = "the first key";
3109 net::TestCompletionCallback cb;
3110 const int kSize1 = 10;
3111 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
3112 scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
3113 CacheTestFillBuffer(buffer1->data(), kSize1, false);
3114 disk_cache::Entry* entry = NULL;
3116 ASSERT_EQ(net::OK,
3117 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3118 EXPECT_NE(null, entry);
3119 ScopedEntryPtr entry_closer(entry);
3121 EXPECT_EQ(
3122 net::ERR_IO_PENDING,
3123 entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
3124 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3126 entry->Doom();
3127 entry->Doom();
3129 // This Read must not be optimistic, since we don't support that yet.
3130 EXPECT_EQ(net::ERR_IO_PENDING,
3131 entry->ReadData(1, 0, buffer1_read.get(), kSize1, cb.callback()));
3132 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3133 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
3135 entry->Doom();
3138 // Confirm that IO buffers are not referenced by the Simple Cache after a write
3139 // completes.
3140 TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticWriteReleases) {
3141 SetSimpleCacheMode();
3142 InitCache();
3144 const char key[] = "the first key";
3145 disk_cache::Entry* entry = NULL;
3147 // First, an optimistic create.
3148 ASSERT_EQ(net::OK,
3149 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3150 ASSERT_TRUE(entry);
3151 ScopedEntryPtr entry_closer(entry);
3153 const int kWriteSize = 512;
3154 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kWriteSize));
3155 EXPECT_TRUE(buffer1->HasOneRef());
3156 CacheTestFillBuffer(buffer1->data(), kWriteSize, false);
3158 // An optimistic write happens only when there is an empty queue of pending
3159 // operations. To ensure the queue is empty, we issue a write and wait until
3160 // it completes.
3161 EXPECT_EQ(kWriteSize,
3162 WriteData(entry, 1, 0, buffer1.get(), kWriteSize, false));
3163 EXPECT_TRUE(buffer1->HasOneRef());
3165 // Finally, we should perform an optimistic write and confirm that all
3166 // references to the IO buffer have been released.
3167 EXPECT_EQ(
3168 kWriteSize,
3169 entry->WriteData(
3170 1, 0, buffer1.get(), kWriteSize, net::CompletionCallback(), false));
3171 EXPECT_TRUE(buffer1->HasOneRef());
3174 TEST_F(DiskCacheEntryTest, SimpleCacheCreateDoomRace) {
3175 // Test sequence:
3176 // Create, Doom, Write, Close, Check files are not on disk anymore.
3177 SetSimpleCacheMode();
3178 InitCache();
3179 disk_cache::Entry* null = NULL;
3180 const char key[] = "the first key";
3182 net::TestCompletionCallback cb;
3183 const int kSize1 = 10;
3184 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
3185 CacheTestFillBuffer(buffer1->data(), kSize1, false);
3186 disk_cache::Entry* entry = NULL;
3188 ASSERT_EQ(net::OK,
3189 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3190 EXPECT_NE(null, entry);
3192 EXPECT_EQ(net::ERR_IO_PENDING, cache_->DoomEntry(key, cb.callback()));
3193 EXPECT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
3195 EXPECT_EQ(
3196 kSize1,
3197 entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false));
3199 entry->Close();
3201 // Finish running the pending tasks so that we fully complete the close
3202 // operation and destroy the entry object.
3203 base::MessageLoop::current()->RunUntilIdle();
3205 for (int i = 0; i < disk_cache::kSimpleEntryFileCount; ++i) {
3206 base::FilePath entry_file_path = cache_path_.AppendASCII(
3207 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i));
3208 base::File::Info info;
3209 EXPECT_FALSE(base::GetFileInfo(entry_file_path, &info));
3213 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateRace) {
3214 // This test runs as APP_CACHE to make operations more synchronous. Test
3215 // sequence:
3216 // Create, Doom, Create.
3217 SetCacheType(net::APP_CACHE);
3218 SetSimpleCacheMode();
3219 InitCache();
3220 disk_cache::Entry* null = NULL;
3221 const char key[] = "the first key";
3223 net::TestCompletionCallback create_callback;
3225 disk_cache::Entry* entry1 = NULL;
3226 ASSERT_EQ(net::OK,
3227 create_callback.GetResult(
3228 cache_->CreateEntry(key, &entry1, create_callback.callback())));
3229 ScopedEntryPtr entry1_closer(entry1);
3230 EXPECT_NE(null, entry1);
3232 net::TestCompletionCallback doom_callback;
3233 EXPECT_EQ(net::ERR_IO_PENDING,
3234 cache_->DoomEntry(key, doom_callback.callback()));
3236 disk_cache::Entry* entry2 = NULL;
3237 ASSERT_EQ(net::OK,
3238 create_callback.GetResult(
3239 cache_->CreateEntry(key, &entry2, create_callback.callback())));
3240 ScopedEntryPtr entry2_closer(entry2);
3241 EXPECT_EQ(net::OK, doom_callback.GetResult(net::ERR_IO_PENDING));
3244 TEST_F(DiskCacheEntryTest, SimpleCacheDoomDoom) {
3245 // Test sequence:
3246 // Create, Doom, Create, Doom (1st entry), Open.
3247 SetSimpleCacheMode();
3248 InitCache();
3249 disk_cache::Entry* null = NULL;
3251 const char key[] = "the first key";
3253 disk_cache::Entry* entry1 = NULL;
3254 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3255 ScopedEntryPtr entry1_closer(entry1);
3256 EXPECT_NE(null, entry1);
3258 EXPECT_EQ(net::OK, DoomEntry(key));
3260 disk_cache::Entry* entry2 = NULL;
3261 ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3262 ScopedEntryPtr entry2_closer(entry2);
3263 EXPECT_NE(null, entry2);
3265 // Redundantly dooming entry1 should not delete entry2.
3266 disk_cache::SimpleEntryImpl* simple_entry1 =
3267 static_cast<disk_cache::SimpleEntryImpl*>(entry1);
3268 net::TestCompletionCallback cb;
3269 EXPECT_EQ(net::OK,
3270 cb.GetResult(simple_entry1->DoomEntry(cb.callback())));
3272 disk_cache::Entry* entry3 = NULL;
3273 ASSERT_EQ(net::OK, OpenEntry(key, &entry3));
3274 ScopedEntryPtr entry3_closer(entry3);
3275 EXPECT_NE(null, entry3);
3278 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateDoom) {
3279 // Test sequence:
3280 // Create, Doom, Create, Doom.
3281 SetSimpleCacheMode();
3282 InitCache();
3284 disk_cache::Entry* null = NULL;
3286 const char key[] = "the first key";
3288 disk_cache::Entry* entry1 = NULL;
3289 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3290 ScopedEntryPtr entry1_closer(entry1);
3291 EXPECT_NE(null, entry1);
3293 entry1->Doom();
3295 disk_cache::Entry* entry2 = NULL;
3296 ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3297 ScopedEntryPtr entry2_closer(entry2);
3298 EXPECT_NE(null, entry2);
3300 entry2->Doom();
3302 // This test passes if it doesn't crash.
3305 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCloseCreateCloseOpen) {
3306 // Test sequence: Create, Doom, Close, Create, Close, Open.
3307 SetSimpleCacheMode();
3308 InitCache();
3310 disk_cache::Entry* null = NULL;
3312 const char key[] = "this is a key";
3314 disk_cache::Entry* entry1 = NULL;
3315 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3316 ScopedEntryPtr entry1_closer(entry1);
3317 EXPECT_NE(null, entry1);
3319 entry1->Doom();
3320 entry1_closer.reset();
3321 entry1 = NULL;
3323 disk_cache::Entry* entry2 = NULL;
3324 ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3325 ScopedEntryPtr entry2_closer(entry2);
3326 EXPECT_NE(null, entry2);
3328 entry2_closer.reset();
3329 entry2 = NULL;
3331 disk_cache::Entry* entry3 = NULL;
3332 ASSERT_EQ(net::OK, OpenEntry(key, &entry3));
3333 ScopedEntryPtr entry3_closer(entry3);
3334 EXPECT_NE(null, entry3);
3337 // Checks that an optimistic Create would fail later on a racing Open.
3338 TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticCreateFailsOnOpen) {
3339 SetSimpleCacheMode();
3340 InitCache();
3342 // Create a corrupt file in place of a future entry. Optimistic create should
3343 // initially succeed, but realize later that creation failed.
3344 const std::string key = "the key";
3345 net::TestCompletionCallback cb;
3346 disk_cache::Entry* entry = NULL;
3347 disk_cache::Entry* entry2 = NULL;
3349 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3350 key, cache_path_));
3351 EXPECT_EQ(net::OK, cache_->CreateEntry(key, &entry, cb.callback()));
3352 ASSERT_TRUE(entry);
3353 ScopedEntryPtr entry_closer(entry);
3354 ASSERT_NE(net::OK, OpenEntry(key, &entry2));
3356 // Check that we are not leaking.
3357 EXPECT_TRUE(
3358 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3360 DisableIntegrityCheck();
3363 // Tests that old entries are evicted while new entries remain in the index.
3364 // This test relies on non-mandatory properties of the simple Cache Backend:
3365 // LRU eviction, specific values of high-watermark and low-watermark etc.
3366 // When changing the eviction algorithm, the test will have to be re-engineered.
3367 TEST_F(DiskCacheEntryTest, SimpleCacheEvictOldEntries) {
3368 const int kMaxSize = 200 * 1024;
3369 const int kWriteSize = kMaxSize / 10;
3370 const int kNumExtraEntries = 12;
3371 SetSimpleCacheMode();
3372 SetMaxSize(kMaxSize);
3373 InitCache();
3375 std::string key1("the first key");
3376 disk_cache::Entry* entry;
3377 ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
3378 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kWriteSize));
3379 CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3380 EXPECT_EQ(kWriteSize,
3381 WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3382 entry->Close();
3383 AddDelay();
3385 std::string key2("the key prefix");
3386 for (int i = 0; i < kNumExtraEntries; i++) {
3387 if (i == kNumExtraEntries - 2) {
3388 // Create a distinct timestamp for the last two entries. These entries
3389 // will be checked for outliving the eviction.
3390 AddDelay();
3392 ASSERT_EQ(net::OK, CreateEntry(key2 + base::IntToString(i), &entry));
3393 ScopedEntryPtr entry_closer(entry);
3394 EXPECT_EQ(kWriteSize,
3395 WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3398 // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using
3399 // the internal knowledge about |SimpleBackendImpl|.
3400 ASSERT_NE(net::OK, OpenEntry(key1, &entry))
3401 << "Should have evicted the old entry";
3402 for (int i = 0; i < 2; i++) {
3403 int entry_no = kNumExtraEntries - i - 1;
3404 // Generally there is no guarantee that at this point the backround eviction
3405 // is finished. We are testing the positive case, i.e. when the eviction
3406 // never reaches this entry, should be non-flaky.
3407 ASSERT_EQ(net::OK, OpenEntry(key2 + base::IntToString(entry_no), &entry))
3408 << "Should not have evicted fresh entry " << entry_no;
3409 entry->Close();
3413 // Tests that if a read and a following in-flight truncate are both in progress
3414 // simultaniously that they both can occur successfully. See
3415 // http://crbug.com/239223
3416 TEST_F(DiskCacheEntryTest, SimpleCacheInFlightTruncate) {
3417 SetSimpleCacheMode();
3418 InitCache();
3420 const char key[] = "the first key";
3422 const int kBufferSize = 1024;
3423 scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
3424 CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
3426 disk_cache::Entry* entry = NULL;
3427 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3429 EXPECT_EQ(kBufferSize,
3430 WriteData(entry, 1, 0, write_buffer.get(), kBufferSize, false));
3431 entry->Close();
3432 entry = NULL;
3434 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3435 ScopedEntryPtr entry_closer(entry);
3437 MessageLoopHelper helper;
3438 int expected = 0;
3440 // Make a short read.
3441 const int kReadBufferSize = 512;
3442 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
3443 CallbackTest read_callback(&helper, false);
3444 EXPECT_EQ(net::ERR_IO_PENDING,
3445 entry->ReadData(1,
3447 read_buffer.get(),
3448 kReadBufferSize,
3449 base::Bind(&CallbackTest::Run,
3450 base::Unretained(&read_callback))));
3451 ++expected;
3453 // Truncate the entry to the length of that read.
3454 scoped_refptr<net::IOBuffer>
3455 truncate_buffer(new net::IOBuffer(kReadBufferSize));
3456 CacheTestFillBuffer(truncate_buffer->data(), kReadBufferSize, false);
3457 CallbackTest truncate_callback(&helper, false);
3458 EXPECT_EQ(net::ERR_IO_PENDING,
3459 entry->WriteData(1,
3461 truncate_buffer.get(),
3462 kReadBufferSize,
3463 base::Bind(&CallbackTest::Run,
3464 base::Unretained(&truncate_callback)),
3465 true));
3466 ++expected;
3468 // Wait for both the read and truncation to finish, and confirm that both
3469 // succeeded.
3470 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
3471 EXPECT_EQ(kReadBufferSize, read_callback.last_result());
3472 EXPECT_EQ(kReadBufferSize, truncate_callback.last_result());
3473 EXPECT_EQ(0,
3474 memcmp(write_buffer->data(), read_buffer->data(), kReadBufferSize));
3477 // Tests that if a write and a read dependant on it are both in flight
3478 // simultaneiously that they both can complete successfully without erroneous
3479 // early returns. See http://crbug.com/239223
3480 TEST_F(DiskCacheEntryTest, SimpleCacheInFlightRead) {
3481 SetSimpleCacheMode();
3482 InitCache();
3484 const char key[] = "the first key";
3485 disk_cache::Entry* entry = NULL;
3486 ASSERT_EQ(net::OK,
3487 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3488 ScopedEntryPtr entry_closer(entry);
3490 const int kBufferSize = 1024;
3491 scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
3492 CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
3494 MessageLoopHelper helper;
3495 int expected = 0;
3497 CallbackTest write_callback(&helper, false);
3498 EXPECT_EQ(net::ERR_IO_PENDING,
3499 entry->WriteData(1,
3501 write_buffer.get(),
3502 kBufferSize,
3503 base::Bind(&CallbackTest::Run,
3504 base::Unretained(&write_callback)),
3505 true));
3506 ++expected;
3508 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kBufferSize));
3509 CallbackTest read_callback(&helper, false);
3510 EXPECT_EQ(net::ERR_IO_PENDING,
3511 entry->ReadData(1,
3513 read_buffer.get(),
3514 kBufferSize,
3515 base::Bind(&CallbackTest::Run,
3516 base::Unretained(&read_callback))));
3517 ++expected;
3519 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
3520 EXPECT_EQ(kBufferSize, write_callback.last_result());
3521 EXPECT_EQ(kBufferSize, read_callback.last_result());
3522 EXPECT_EQ(0, memcmp(write_buffer->data(), read_buffer->data(), kBufferSize));
3525 TEST_F(DiskCacheEntryTest, SimpleCacheOpenCreateRaceWithNoIndex) {
3526 SetSimpleCacheMode();
3527 DisableSimpleCacheWaitForIndex();
3528 DisableIntegrityCheck();
3529 InitCache();
3531 // Assume the index is not initialized, which is likely, since we are blocking
3532 // the IO thread from executing the index finalization step.
3533 disk_cache::Entry* entry1;
3534 net::TestCompletionCallback cb1;
3535 disk_cache::Entry* entry2;
3536 net::TestCompletionCallback cb2;
3537 int rv1 = cache_->OpenEntry("key", &entry1, cb1.callback());
3538 int rv2 = cache_->CreateEntry("key", &entry2, cb2.callback());
3540 EXPECT_EQ(net::ERR_FAILED, cb1.GetResult(rv1));
3541 ASSERT_EQ(net::OK, cb2.GetResult(rv2));
3542 entry2->Close();
3545 // Checking one more scenario of overlapped reading of a bad entry.
3546 // Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of
3547 // last two reads.
3548 TEST_F(DiskCacheEntryTest, SimpleCacheMultipleReadersCheckCRC2) {
3549 SetSimpleCacheMode();
3550 InitCache();
3552 const char key[] = "key";
3553 int size;
3554 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size));
3556 scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
3557 scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
3559 // Advance the first reader a little.
3560 disk_cache::Entry* entry = NULL;
3561 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3562 ScopedEntryPtr entry_closer(entry);
3563 EXPECT_EQ(1, ReadData(entry, 1, 0, read_buffer1.get(), 1));
3565 // Advance the 2nd reader by the same amount.
3566 disk_cache::Entry* entry2 = NULL;
3567 EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
3568 ScopedEntryPtr entry2_closer(entry2);
3569 EXPECT_EQ(1, ReadData(entry2, 1, 0, read_buffer2.get(), 1));
3571 // Continue reading 1st.
3572 EXPECT_GT(0, ReadData(entry, 1, 1, read_buffer1.get(), size));
3574 // This read should fail as well because we have previous read failures.
3575 EXPECT_GT(0, ReadData(entry2, 1, 1, read_buffer2.get(), 1));
3576 DisableIntegrityCheck();
3579 // Test if we can sequentially read each subset of the data until all the data
3580 // is read, then the CRC is calculated correctly and the reads are successful.
3581 TEST_F(DiskCacheEntryTest, SimpleCacheReadCombineCRC) {
3582 // Test sequence:
3583 // Create, Write, Read (first half of data), Read (second half of data),
3584 // Close.
3585 SetSimpleCacheMode();
3586 InitCache();
3587 disk_cache::Entry* null = NULL;
3588 const char key[] = "the first key";
3590 const int kHalfSize = 200;
3591 const int kSize = 2 * kHalfSize;
3592 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3593 CacheTestFillBuffer(buffer1->data(), kSize, false);
3594 disk_cache::Entry* entry = NULL;
3596 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3597 EXPECT_NE(null, entry);
3599 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
3600 entry->Close();
3602 disk_cache::Entry* entry2 = NULL;
3603 ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
3604 EXPECT_EQ(entry, entry2);
3606 // Read the first half of the data.
3607 int offset = 0;
3608 int buf_len = kHalfSize;
3609 scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(buf_len));
3610 EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read1.get(), buf_len));
3611 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), buf_len));
3613 // Read the second half of the data.
3614 offset = buf_len;
3615 buf_len = kHalfSize;
3616 scoped_refptr<net::IOBuffer> buffer1_read2(new net::IOBuffer(buf_len));
3617 EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read2.get(), buf_len));
3618 char* buffer1_data = buffer1->data() + offset;
3619 EXPECT_EQ(0, memcmp(buffer1_data, buffer1_read2->data(), buf_len));
3621 // Check that we are not leaking.
3622 EXPECT_NE(entry, null);
3623 EXPECT_TRUE(
3624 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3625 entry->Close();
3626 entry = NULL;
3629 // Test if we can write the data not in sequence and read correctly. In
3630 // this case the CRC will not be present.
3631 TEST_F(DiskCacheEntryTest, SimpleCacheNonSequentialWrite) {
3632 // Test sequence:
3633 // Create, Write (second half of data), Write (first half of data), Read,
3634 // Close.
3635 SetSimpleCacheMode();
3636 InitCache();
3637 disk_cache::Entry* null = NULL;
3638 const char key[] = "the first key";
3640 const int kHalfSize = 200;
3641 const int kSize = 2 * kHalfSize;
3642 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3643 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3644 CacheTestFillBuffer(buffer1->data(), kSize, false);
3645 char* buffer1_data = buffer1->data() + kHalfSize;
3646 memcpy(buffer2->data(), buffer1_data, kHalfSize);
3648 disk_cache::Entry* entry = NULL;
3649 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3650 entry->Close();
3651 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3652 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3653 EXPECT_NE(null, entry);
3655 int offset = kHalfSize;
3656 int buf_len = kHalfSize;
3658 EXPECT_EQ(buf_len,
3659 WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3660 offset = 0;
3661 buf_len = kHalfSize;
3662 EXPECT_EQ(buf_len,
3663 WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3664 entry->Close();
3666 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3668 scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
3669 EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
3670 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kSize));
3671 // Check that we are not leaking.
3672 ASSERT_NE(entry, null);
3673 EXPECT_TRUE(static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3674 entry->Close();
3678 // Test that changing stream1 size does not affect stream0 (stream0 and stream1
3679 // are stored in the same file in Simple Cache).
3680 TEST_F(DiskCacheEntryTest, SimpleCacheStream1SizeChanges) {
3681 SetSimpleCacheMode();
3682 InitCache();
3683 disk_cache::Entry* entry = NULL;
3684 const char key[] = "the key";
3685 const int kSize = 100;
3686 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3687 scoped_refptr<net::IOBuffer> buffer_read(new net::IOBuffer(kSize));
3688 CacheTestFillBuffer(buffer->data(), kSize, false);
3690 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3691 EXPECT_TRUE(entry);
3693 // Write something into stream0.
3694 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
3695 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3696 EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3697 entry->Close();
3699 // Extend stream1.
3700 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3701 int stream1_size = 100;
3702 EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, false));
3703 EXPECT_EQ(stream1_size, entry->GetDataSize(1));
3704 entry->Close();
3706 // Check that stream0 data has not been modified and that the EOF record for
3707 // stream 0 contains a crc.
3708 // The entry needs to be reopened before checking the crc: Open will perform
3709 // the synchronization with the previous Close. This ensures the EOF records
3710 // have been written to disk before we attempt to read them independently.
3711 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3712 base::FilePath entry_file0_path = cache_path_.AppendASCII(
3713 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3714 base::File entry_file0(entry_file0_path,
3715 base::File::FLAG_READ | base::File::FLAG_OPEN);
3716 ASSERT_TRUE(entry_file0.IsValid());
3718 int data_size[disk_cache::kSimpleEntryStreamCount] = {kSize, stream1_size, 0};
3719 int sparse_data_size = 0;
3720 disk_cache::SimpleEntryStat entry_stat(
3721 base::Time::Now(), base::Time::Now(), data_size, sparse_data_size);
3722 int eof_offset = entry_stat.GetEOFOffsetInFile(key, 0);
3723 disk_cache::SimpleFileEOF eof_record;
3724 ASSERT_EQ(static_cast<int>(sizeof(eof_record)),
3725 entry_file0.Read(eof_offset, reinterpret_cast<char*>(&eof_record),
3726 sizeof(eof_record)));
3727 EXPECT_EQ(disk_cache::kSimpleFinalMagicNumber, eof_record.final_magic_number);
3728 EXPECT_TRUE((eof_record.flags & disk_cache::SimpleFileEOF::FLAG_HAS_CRC32) ==
3729 disk_cache::SimpleFileEOF::FLAG_HAS_CRC32);
3731 buffer_read = new net::IOBuffer(kSize);
3732 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3733 EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3735 // Shrink stream1.
3736 stream1_size = 50;
3737 EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, true));
3738 EXPECT_EQ(stream1_size, entry->GetDataSize(1));
3739 entry->Close();
3741 // Check that stream0 data has not been modified.
3742 buffer_read = new net::IOBuffer(kSize);
3743 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3744 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3745 EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3746 entry->Close();
3747 entry = NULL;
3750 // Test that writing within the range for which the crc has already been
3751 // computed will properly invalidate the computed crc.
3752 TEST_F(DiskCacheEntryTest, SimpleCacheCRCRewrite) {
3753 // Test sequence:
3754 // Create, Write (big data), Write (small data in the middle), Close.
3755 // Open, Read (all), Close.
3756 SetSimpleCacheMode();
3757 InitCache();
3758 disk_cache::Entry* null = NULL;
3759 const char key[] = "the first key";
3761 const int kHalfSize = 200;
3762 const int kSize = 2 * kHalfSize;
3763 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3764 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kHalfSize));
3765 CacheTestFillBuffer(buffer1->data(), kSize, false);
3766 CacheTestFillBuffer(buffer2->data(), kHalfSize, false);
3768 disk_cache::Entry* entry = NULL;
3769 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3770 EXPECT_NE(null, entry);
3771 entry->Close();
3773 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3774 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3775 int offset = 0;
3776 int buf_len = kSize;
3778 EXPECT_EQ(buf_len,
3779 WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3780 offset = kHalfSize;
3781 buf_len = kHalfSize;
3782 EXPECT_EQ(buf_len,
3783 WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3784 entry->Close();
3786 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3788 scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
3789 EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
3790 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kHalfSize));
3791 EXPECT_EQ(
3793 memcmp(buffer2->data(), buffer1_read1->data() + kHalfSize, kHalfSize));
3795 entry->Close();
3799 bool DiskCacheEntryTest::SimpleCacheThirdStreamFileExists(const char* key) {
3800 int third_stream_file_index =
3801 disk_cache::simple_util::GetFileIndexFromStreamIndex(2);
3802 base::FilePath third_stream_file_path = cache_path_.AppendASCII(
3803 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(
3804 key, third_stream_file_index));
3805 return PathExists(third_stream_file_path);
3808 void DiskCacheEntryTest::SyncDoomEntry(const char* key) {
3809 net::TestCompletionCallback callback;
3810 cache_->DoomEntry(key, callback.callback());
3811 callback.WaitForResult();
3814 // Check that a newly-created entry with no third-stream writes omits the
3815 // third stream file.
3816 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream1) {
3817 SetSimpleCacheMode();
3818 InitCache();
3820 const char key[] = "key";
3822 disk_cache::Entry* entry;
3824 // Create entry and close without writing: third stream file should be
3825 // omitted, since the stream is empty.
3826 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3827 entry->Close();
3828 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3830 SyncDoomEntry(key);
3831 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3834 // Check that a newly-created entry with only a single zero-offset, zero-length
3835 // write omits the third stream file.
3836 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream2) {
3837 SetSimpleCacheMode();
3838 InitCache();
3840 const int kHalfSize = 8;
3841 const int kSize = kHalfSize * 2;
3842 const char key[] = "key";
3843 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3844 CacheTestFillBuffer(buffer->data(), kHalfSize, false);
3846 disk_cache::Entry* entry;
3848 // Create entry, write empty buffer to third stream, and close: third stream
3849 // should still be omitted, since the entry ignores writes that don't modify
3850 // data or change the length.
3851 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3852 EXPECT_EQ(0, WriteData(entry, 2, 0, buffer.get(), 0, true));
3853 entry->Close();
3854 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3856 SyncDoomEntry(key);
3857 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3860 // Check that we can read back data written to the third stream.
3861 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream3) {
3862 SetSimpleCacheMode();
3863 InitCache();
3865 const int kHalfSize = 8;
3866 const int kSize = kHalfSize * 2;
3867 const char key[] = "key";
3868 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3869 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3870 CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
3872 disk_cache::Entry* entry;
3874 // Create entry, write data to third stream, and close: third stream should
3875 // not be omitted, since it contains data. Re-open entry and ensure there
3876 // are that many bytes in the third stream.
3877 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3878 EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1.get(), kHalfSize, true));
3879 entry->Close();
3880 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3882 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3883 EXPECT_EQ(kHalfSize, ReadData(entry, 2, 0, buffer2.get(), kSize));
3884 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kHalfSize));
3885 entry->Close();
3886 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3888 SyncDoomEntry(key);
3889 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3892 // Check that we remove the third stream file upon opening an entry and finding
3893 // the third stream empty. (This is the upgrade path for entries written
3894 // before the third stream was optional.)
3895 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream4) {
3896 SetSimpleCacheMode();
3897 InitCache();
3899 const int kHalfSize = 8;
3900 const int kSize = kHalfSize * 2;
3901 const char key[] = "key";
3902 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3903 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3904 CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
3906 disk_cache::Entry* entry;
3908 // Create entry, write data to third stream, truncate third stream back to
3909 // empty, and close: third stream will not initially be omitted, since entry
3910 // creates the file when the first significant write comes in, and only
3911 // removes it on open if it is empty. Reopen, ensure that the file is
3912 // deleted, and that there's no data in the third stream.
3913 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3914 EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1.get(), kHalfSize, true));
3915 EXPECT_EQ(0, WriteData(entry, 2, 0, buffer1.get(), 0, true));
3916 entry->Close();
3917 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3919 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3920 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3921 EXPECT_EQ(0, ReadData(entry, 2, 0, buffer2.get(), kSize));
3922 entry->Close();
3923 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3925 SyncDoomEntry(key);
3926 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3929 // Check that we don't accidentally create the third stream file once the entry
3930 // has been doomed.
3931 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream5) {
3932 SetSimpleCacheMode();
3933 InitCache();
3935 const int kHalfSize = 8;
3936 const int kSize = kHalfSize * 2;
3937 const char key[] = "key";
3938 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3939 CacheTestFillBuffer(buffer->data(), kHalfSize, false);
3941 disk_cache::Entry* entry;
3943 // Create entry, doom entry, write data to third stream, and close: third
3944 // stream should not exist. (Note: We don't care if the write fails, just
3945 // that it doesn't cause the file to be created on disk.)
3946 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3947 entry->Doom();
3948 WriteData(entry, 2, 0, buffer.get(), kHalfSize, true);
3949 entry->Close();
3950 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3953 // There could be a race between Doom and an optimistic write.
3954 TEST_F(DiskCacheEntryTest, SimpleCacheDoomOptimisticWritesRace) {
3955 // Test sequence:
3956 // Create, first Write, second Write, Close.
3957 // Open, Close.
3958 SetSimpleCacheMode();
3959 InitCache();
3960 disk_cache::Entry* null = NULL;
3961 const char key[] = "the first key";
3963 const int kSize = 200;
3964 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3965 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3966 CacheTestFillBuffer(buffer1->data(), kSize, false);
3967 CacheTestFillBuffer(buffer2->data(), kSize, false);
3969 // The race only happens on stream 1 and stream 2.
3970 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3971 ASSERT_EQ(net::OK, DoomAllEntries());
3972 disk_cache::Entry* entry = NULL;
3974 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3975 EXPECT_NE(null, entry);
3976 entry->Close();
3977 entry = NULL;
3979 ASSERT_EQ(net::OK, DoomAllEntries());
3980 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3981 EXPECT_NE(null, entry);
3983 int offset = 0;
3984 int buf_len = kSize;
3985 // This write should not be optimistic (since create is).
3986 EXPECT_EQ(buf_len,
3987 WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3989 offset = kSize;
3990 // This write should be optimistic.
3991 EXPECT_EQ(buf_len,
3992 WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3993 entry->Close();
3995 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3996 EXPECT_NE(null, entry);
3998 entry->Close();
3999 entry = NULL;
4003 // Tests for a regression in crbug.com/317138 , in which deleting an already
4004 // doomed entry was removing the active entry from the index.
4005 TEST_F(DiskCacheEntryTest, SimpleCachePreserveActiveEntries) {
4006 SetSimpleCacheMode();
4007 InitCache();
4009 disk_cache::Entry* null = NULL;
4011 const char key[] = "this is a key";
4013 disk_cache::Entry* entry1 = NULL;
4014 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
4015 ScopedEntryPtr entry1_closer(entry1);
4016 EXPECT_NE(null, entry1);
4017 entry1->Doom();
4019 disk_cache::Entry* entry2 = NULL;
4020 ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
4021 ScopedEntryPtr entry2_closer(entry2);
4022 EXPECT_NE(null, entry2);
4023 entry2_closer.reset();
4025 // Closing then reopening entry2 insures that entry2 is serialized, and so
4026 // it can be opened from files without error.
4027 entry2 = NULL;
4028 ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
4029 EXPECT_NE(null, entry2);
4030 entry2_closer.reset(entry2);
4032 scoped_refptr<disk_cache::SimpleEntryImpl>
4033 entry1_refptr = static_cast<disk_cache::SimpleEntryImpl*>(entry1);
4035 // If crbug.com/317138 has regressed, this will remove |entry2| from
4036 // the backend's |active_entries_| while |entry2| is still alive and its
4037 // files are still on disk.
4038 entry1_closer.reset();
4039 entry1 = NULL;
4041 // Close does not have a callback. However, we need to be sure the close is
4042 // finished before we continue the test. We can take advantage of how the ref
4043 // counting of a SimpleEntryImpl works to fake out a callback: When the
4044 // last Close() call is made to an entry, an IO operation is sent to the
4045 // synchronous entry to close the platform files. This IO operation holds a
4046 // ref pointer to the entry, which expires when the operation is done. So,
4047 // we take a refpointer, and watch the SimpleEntry object until it has only
4048 // one ref; this indicates the IO operation is complete.
4049 while (!entry1_refptr->HasOneRef()) {
4050 base::PlatformThread::YieldCurrentThread();
4051 base::MessageLoop::current()->RunUntilIdle();
4053 entry1_refptr = NULL;
4055 // In the bug case, this new entry ends up being a duplicate object pointing
4056 // at the same underlying files.
4057 disk_cache::Entry* entry3 = NULL;
4058 EXPECT_EQ(net::OK, OpenEntry(key, &entry3));
4059 ScopedEntryPtr entry3_closer(entry3);
4060 EXPECT_NE(null, entry3);
4062 // The test passes if these two dooms do not crash.
4063 entry2->Doom();
4064 entry3->Doom();
4067 TEST_F(DiskCacheEntryTest, SimpleCacheBasicSparseIO) {
4068 SetSimpleCacheMode();
4069 InitCache();
4070 BasicSparseIO();
4073 TEST_F(DiskCacheEntryTest, SimpleCacheHugeSparseIO) {
4074 SetSimpleCacheMode();
4075 InitCache();
4076 HugeSparseIO();
4079 TEST_F(DiskCacheEntryTest, SimpleCacheGetAvailableRange) {
4080 SetSimpleCacheMode();
4081 InitCache();
4082 GetAvailableRange();
4085 TEST_F(DiskCacheEntryTest, SimpleCacheUpdateSparseEntry) {
4086 SetSimpleCacheMode();
4087 InitCache();
4088 UpdateSparseEntry();
4091 TEST_F(DiskCacheEntryTest, SimpleCacheDoomSparseEntry) {
4092 SetSimpleCacheMode();
4093 InitCache();
4094 DoomSparseEntry();
4097 TEST_F(DiskCacheEntryTest, SimpleCachePartialSparseEntry) {
4098 SetSimpleCacheMode();
4099 InitCache();
4100 PartialSparseEntry();
4103 TEST_F(DiskCacheEntryTest, SimpleCacheTruncateLargeSparseFile) {
4104 const int kSize = 1024;
4106 SetSimpleCacheMode();
4107 // An entry is allowed sparse data 1/10 the size of the cache, so this size
4108 // allows for one |kSize|-sized range plus overhead, but not two ranges.
4109 SetMaxSize(kSize * 15);
4110 InitCache();
4112 const char key[] = "key";
4113 disk_cache::Entry* null = NULL;
4114 disk_cache::Entry* entry;
4115 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
4116 EXPECT_NE(null, entry);
4118 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
4119 CacheTestFillBuffer(buffer->data(), kSize, false);
4120 net::TestCompletionCallback callback;
4121 int ret;
4123 // Verify initial conditions.
4124 ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4125 EXPECT_EQ(0, callback.GetResult(ret));
4127 ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
4128 EXPECT_EQ(0, callback.GetResult(ret));
4130 // Write a range and make sure it reads back.
4131 ret = entry->WriteSparseData(0, buffer.get(), kSize, callback.callback());
4132 EXPECT_EQ(kSize, callback.GetResult(ret));
4134 ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4135 EXPECT_EQ(kSize, callback.GetResult(ret));
4137 // Write another range and make sure it reads back.
4138 ret = entry->WriteSparseData(kSize, buffer.get(), kSize, callback.callback());
4139 EXPECT_EQ(kSize, callback.GetResult(ret));
4141 ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
4142 EXPECT_EQ(kSize, callback.GetResult(ret));
4144 // Make sure the first range was removed when the second was written.
4145 ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4146 EXPECT_EQ(0, callback.GetResult(ret));
4148 entry->Close();