Blink roll 25b6bd3a7a131ffe68d809546ad1a20707915cdc:3a503f41ae42e5b79cfcd2ff10e65afde...
[chromium-blink-merge.git] / net / disk_cache / entry_unittest.cc
blobaac45ac318ef4099950cf38b5b11c2131e707d69
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/bind.h"
7 #include "base/bind_helpers.h"
8 #include "base/files/file.h"
9 #include "base/files/file_util.h"
10 #include "base/strings/string_util.h"
11 #include "base/strings/stringprintf.h"
12 #include "base/threading/platform_thread.h"
13 #include "net/base/completion_callback.h"
14 #include "net/base/io_buffer.h"
15 #include "net/base/net_errors.h"
16 #include "net/base/test_completion_callback.h"
17 #include "net/disk_cache/blockfile/backend_impl.h"
18 #include "net/disk_cache/blockfile/entry_impl.h"
19 #include "net/disk_cache/disk_cache_test_base.h"
20 #include "net/disk_cache/disk_cache_test_util.h"
21 #include "net/disk_cache/memory/mem_entry_impl.h"
22 #include "net/disk_cache/simple/simple_entry_format.h"
23 #include "net/disk_cache/simple/simple_entry_impl.h"
24 #include "net/disk_cache/simple/simple_synchronous_entry.h"
25 #include "net/disk_cache/simple/simple_test_util.h"
26 #include "net/disk_cache/simple/simple_util.h"
27 #include "testing/gtest/include/gtest/gtest.h"
29 using base::Time;
30 using disk_cache::ScopedEntryPtr;
32 // Tests that can run with different types of caches.
33 class DiskCacheEntryTest : public DiskCacheTestWithCache {
34 public:
35 void InternalSyncIOBackground(disk_cache::Entry* entry);
36 void ExternalSyncIOBackground(disk_cache::Entry* entry);
38 protected:
39 void InternalSyncIO();
40 void InternalAsyncIO();
41 void ExternalSyncIO();
42 void ExternalAsyncIO();
43 void ReleaseBuffer(int stream_index);
44 void StreamAccess();
45 void GetKey();
46 void GetTimes(int stream_index);
47 void GrowData(int stream_index);
48 void TruncateData(int stream_index);
49 void ZeroLengthIO(int stream_index);
50 void Buffering();
51 void SizeAtCreate();
52 void SizeChanges(int stream_index);
53 void ReuseEntry(int size, int stream_index);
54 void InvalidData(int stream_index);
55 void ReadWriteDestroyBuffer(int stream_index);
56 void DoomNormalEntry();
57 void DoomEntryNextToOpenEntry();
58 void DoomedEntry(int stream_index);
59 void BasicSparseIO();
60 void HugeSparseIO();
61 void GetAvailableRange();
62 void CouldBeSparse();
63 void UpdateSparseEntry();
64 void DoomSparseEntry();
65 void PartialSparseEntry();
66 bool SimpleCacheMakeBadChecksumEntry(const std::string& key, int* data_size);
67 bool SimpleCacheThirdStreamFileExists(const char* key);
68 void SyncDoomEntry(const char* key);
71 // This part of the test runs on the background thread.
72 void DiskCacheEntryTest::InternalSyncIOBackground(disk_cache::Entry* entry) {
73 const int kSize1 = 10;
74 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
75 CacheTestFillBuffer(buffer1->data(), kSize1, false);
76 EXPECT_EQ(
78 entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
79 base::strlcpy(buffer1->data(), "the data", kSize1);
80 EXPECT_EQ(10,
81 entry->WriteData(
82 0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
83 memset(buffer1->data(), 0, kSize1);
84 EXPECT_EQ(
85 10,
86 entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
87 EXPECT_STREQ("the data", buffer1->data());
89 const int kSize2 = 5000;
90 const int kSize3 = 10000;
91 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
92 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
93 memset(buffer3->data(), 0, kSize3);
94 CacheTestFillBuffer(buffer2->data(), kSize2, false);
95 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
96 EXPECT_EQ(
97 5000,
98 entry->WriteData(
99 1, 1500, buffer2.get(), kSize2, net::CompletionCallback(), false));
100 memset(buffer2->data(), 0, kSize2);
101 EXPECT_EQ(4989,
102 entry->ReadData(
103 1, 1511, buffer2.get(), kSize2, net::CompletionCallback()));
104 EXPECT_STREQ("big data goes here", buffer2->data());
105 EXPECT_EQ(
106 5000,
107 entry->ReadData(1, 0, buffer2.get(), kSize2, net::CompletionCallback()));
108 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
109 EXPECT_EQ(1500,
110 entry->ReadData(
111 1, 5000, buffer2.get(), kSize2, net::CompletionCallback()));
113 EXPECT_EQ(0,
114 entry->ReadData(
115 1, 6500, buffer2.get(), kSize2, net::CompletionCallback()));
116 EXPECT_EQ(
117 6500,
118 entry->ReadData(1, 0, buffer3.get(), kSize3, net::CompletionCallback()));
119 EXPECT_EQ(8192,
120 entry->WriteData(
121 1, 0, buffer3.get(), 8192, net::CompletionCallback(), false));
122 EXPECT_EQ(
123 8192,
124 entry->ReadData(1, 0, buffer3.get(), kSize3, net::CompletionCallback()));
125 EXPECT_EQ(8192, entry->GetDataSize(1));
127 // We need to delete the memory buffer on this thread.
128 EXPECT_EQ(0, entry->WriteData(
129 0, 0, NULL, 0, net::CompletionCallback(), true));
130 EXPECT_EQ(0, entry->WriteData(
131 1, 0, NULL, 0, net::CompletionCallback(), true));
134 // We need to support synchronous IO even though it is not a supported operation
135 // from the point of view of the disk cache's public interface, because we use
136 // it internally, not just by a few tests, but as part of the implementation
137 // (see sparse_control.cc, for example).
138 void DiskCacheEntryTest::InternalSyncIO() {
139 disk_cache::Entry* entry = NULL;
140 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
141 ASSERT_TRUE(NULL != entry);
143 // The bulk of the test runs from within the callback, on the cache thread.
144 RunTaskForTest(base::Bind(&DiskCacheEntryTest::InternalSyncIOBackground,
145 base::Unretained(this),
146 entry));
149 entry->Doom();
150 entry->Close();
151 FlushQueueForTest();
152 EXPECT_EQ(0, cache_->GetEntryCount());
155 TEST_F(DiskCacheEntryTest, InternalSyncIO) {
156 InitCache();
157 InternalSyncIO();
160 TEST_F(DiskCacheEntryTest, MemoryOnlyInternalSyncIO) {
161 SetMemoryOnlyMode();
162 InitCache();
163 InternalSyncIO();
166 void DiskCacheEntryTest::InternalAsyncIO() {
167 disk_cache::Entry* entry = NULL;
168 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
169 ASSERT_TRUE(NULL != entry);
171 // Avoid using internal buffers for the test. We have to write something to
172 // the entry and close it so that we flush the internal buffer to disk. After
173 // that, IO operations will be really hitting the disk. We don't care about
174 // the content, so just extending the entry is enough (all extensions zero-
175 // fill any holes).
176 EXPECT_EQ(0, WriteData(entry, 0, 15 * 1024, NULL, 0, false));
177 EXPECT_EQ(0, WriteData(entry, 1, 15 * 1024, NULL, 0, false));
178 entry->Close();
179 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
181 MessageLoopHelper helper;
182 // Let's verify that each IO goes to the right callback object.
183 CallbackTest callback1(&helper, false);
184 CallbackTest callback2(&helper, false);
185 CallbackTest callback3(&helper, false);
186 CallbackTest callback4(&helper, false);
187 CallbackTest callback5(&helper, false);
188 CallbackTest callback6(&helper, false);
189 CallbackTest callback7(&helper, false);
190 CallbackTest callback8(&helper, false);
191 CallbackTest callback9(&helper, false);
192 CallbackTest callback10(&helper, false);
193 CallbackTest callback11(&helper, false);
194 CallbackTest callback12(&helper, false);
195 CallbackTest callback13(&helper, false);
197 const int kSize1 = 10;
198 const int kSize2 = 5000;
199 const int kSize3 = 10000;
200 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
201 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
202 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
203 CacheTestFillBuffer(buffer1->data(), kSize1, false);
204 CacheTestFillBuffer(buffer2->data(), kSize2, false);
205 CacheTestFillBuffer(buffer3->data(), kSize3, false);
207 EXPECT_EQ(0,
208 entry->ReadData(
210 15 * 1024,
211 buffer1.get(),
212 kSize1,
213 base::Bind(&CallbackTest::Run, base::Unretained(&callback1))));
214 base::strlcpy(buffer1->data(), "the data", kSize1);
215 int expected = 0;
216 int ret = entry->WriteData(
219 buffer1.get(),
220 kSize1,
221 base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
222 false);
223 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
224 if (net::ERR_IO_PENDING == ret)
225 expected++;
227 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
228 memset(buffer2->data(), 0, kSize2);
229 ret = entry->ReadData(
232 buffer2.get(),
233 kSize1,
234 base::Bind(&CallbackTest::Run, base::Unretained(&callback3)));
235 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
236 if (net::ERR_IO_PENDING == ret)
237 expected++;
239 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
240 EXPECT_STREQ("the data", buffer2->data());
242 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
243 ret = entry->WriteData(
245 1500,
246 buffer2.get(),
247 kSize2,
248 base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
249 true);
250 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
251 if (net::ERR_IO_PENDING == ret)
252 expected++;
254 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
255 memset(buffer3->data(), 0, kSize3);
256 ret = entry->ReadData(
258 1511,
259 buffer3.get(),
260 kSize2,
261 base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
262 EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret);
263 if (net::ERR_IO_PENDING == ret)
264 expected++;
266 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
267 EXPECT_STREQ("big data goes here", buffer3->data());
268 ret = entry->ReadData(
271 buffer2.get(),
272 kSize2,
273 base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
274 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
275 if (net::ERR_IO_PENDING == ret)
276 expected++;
278 memset(buffer3->data(), 0, kSize3);
280 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
281 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
282 ret = entry->ReadData(
284 5000,
285 buffer2.get(),
286 kSize2,
287 base::Bind(&CallbackTest::Run, base::Unretained(&callback7)));
288 EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret);
289 if (net::ERR_IO_PENDING == ret)
290 expected++;
292 ret = entry->ReadData(
295 buffer3.get(),
296 kSize3,
297 base::Bind(&CallbackTest::Run, base::Unretained(&callback9)));
298 EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret);
299 if (net::ERR_IO_PENDING == ret)
300 expected++;
302 ret = entry->WriteData(
305 buffer3.get(),
306 8192,
307 base::Bind(&CallbackTest::Run, base::Unretained(&callback10)),
308 true);
309 EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
310 if (net::ERR_IO_PENDING == ret)
311 expected++;
313 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
314 ret = entry->ReadData(
317 buffer3.get(),
318 kSize3,
319 base::Bind(&CallbackTest::Run, base::Unretained(&callback11)));
320 EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
321 if (net::ERR_IO_PENDING == ret)
322 expected++;
324 EXPECT_EQ(8192, entry->GetDataSize(1));
326 ret = entry->ReadData(
329 buffer1.get(),
330 kSize1,
331 base::Bind(&CallbackTest::Run, base::Unretained(&callback12)));
332 EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
333 if (net::ERR_IO_PENDING == ret)
334 expected++;
336 ret = entry->ReadData(
339 buffer2.get(),
340 kSize2,
341 base::Bind(&CallbackTest::Run, base::Unretained(&callback13)));
342 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
343 if (net::ERR_IO_PENDING == ret)
344 expected++;
346 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
348 EXPECT_FALSE(helper.callback_reused_error());
350 entry->Doom();
351 entry->Close();
352 FlushQueueForTest();
353 EXPECT_EQ(0, cache_->GetEntryCount());
356 TEST_F(DiskCacheEntryTest, InternalAsyncIO) {
357 InitCache();
358 InternalAsyncIO();
361 TEST_F(DiskCacheEntryTest, MemoryOnlyInternalAsyncIO) {
362 SetMemoryOnlyMode();
363 InitCache();
364 InternalAsyncIO();
367 // This part of the test runs on the background thread.
368 void DiskCacheEntryTest::ExternalSyncIOBackground(disk_cache::Entry* entry) {
369 const int kSize1 = 17000;
370 const int kSize2 = 25000;
371 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
372 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
373 CacheTestFillBuffer(buffer1->data(), kSize1, false);
374 CacheTestFillBuffer(buffer2->data(), kSize2, false);
375 base::strlcpy(buffer1->data(), "the data", kSize1);
376 EXPECT_EQ(17000,
377 entry->WriteData(
378 0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
379 memset(buffer1->data(), 0, kSize1);
380 EXPECT_EQ(
381 17000,
382 entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
383 EXPECT_STREQ("the data", buffer1->data());
385 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
386 EXPECT_EQ(
387 25000,
388 entry->WriteData(
389 1, 10000, buffer2.get(), kSize2, net::CompletionCallback(), false));
390 memset(buffer2->data(), 0, kSize2);
391 EXPECT_EQ(24989,
392 entry->ReadData(
393 1, 10011, buffer2.get(), kSize2, net::CompletionCallback()));
394 EXPECT_STREQ("big data goes here", buffer2->data());
395 EXPECT_EQ(
396 25000,
397 entry->ReadData(1, 0, buffer2.get(), kSize2, net::CompletionCallback()));
398 EXPECT_EQ(5000,
399 entry->ReadData(
400 1, 30000, buffer2.get(), kSize2, net::CompletionCallback()));
402 EXPECT_EQ(0,
403 entry->ReadData(
404 1, 35000, buffer2.get(), kSize2, net::CompletionCallback()));
405 EXPECT_EQ(
406 17000,
407 entry->ReadData(1, 0, buffer1.get(), kSize1, net::CompletionCallback()));
408 EXPECT_EQ(
409 17000,
410 entry->WriteData(
411 1, 20000, buffer1.get(), kSize1, net::CompletionCallback(), false));
412 EXPECT_EQ(37000, entry->GetDataSize(1));
414 // We need to delete the memory buffer on this thread.
415 EXPECT_EQ(0, entry->WriteData(
416 0, 0, NULL, 0, net::CompletionCallback(), true));
417 EXPECT_EQ(0, entry->WriteData(
418 1, 0, NULL, 0, net::CompletionCallback(), true));
421 void DiskCacheEntryTest::ExternalSyncIO() {
422 disk_cache::Entry* entry;
423 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
425 // The bulk of the test runs from within the callback, on the cache thread.
426 RunTaskForTest(base::Bind(&DiskCacheEntryTest::ExternalSyncIOBackground,
427 base::Unretained(this),
428 entry));
430 entry->Doom();
431 entry->Close();
432 FlushQueueForTest();
433 EXPECT_EQ(0, cache_->GetEntryCount());
436 TEST_F(DiskCacheEntryTest, ExternalSyncIO) {
437 InitCache();
438 ExternalSyncIO();
441 TEST_F(DiskCacheEntryTest, ExternalSyncIONoBuffer) {
442 InitCache();
443 cache_impl_->SetFlags(disk_cache::kNoBuffering);
444 ExternalSyncIO();
447 TEST_F(DiskCacheEntryTest, MemoryOnlyExternalSyncIO) {
448 SetMemoryOnlyMode();
449 InitCache();
450 ExternalSyncIO();
453 void DiskCacheEntryTest::ExternalAsyncIO() {
454 disk_cache::Entry* entry;
455 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
457 int expected = 0;
459 MessageLoopHelper helper;
460 // Let's verify that each IO goes to the right callback object.
461 CallbackTest callback1(&helper, false);
462 CallbackTest callback2(&helper, false);
463 CallbackTest callback3(&helper, false);
464 CallbackTest callback4(&helper, false);
465 CallbackTest callback5(&helper, false);
466 CallbackTest callback6(&helper, false);
467 CallbackTest callback7(&helper, false);
468 CallbackTest callback8(&helper, false);
469 CallbackTest callback9(&helper, false);
471 const int kSize1 = 17000;
472 const int kSize2 = 25000;
473 const int kSize3 = 25000;
474 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
475 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
476 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
477 CacheTestFillBuffer(buffer1->data(), kSize1, false);
478 CacheTestFillBuffer(buffer2->data(), kSize2, false);
479 CacheTestFillBuffer(buffer3->data(), kSize3, false);
480 base::strlcpy(buffer1->data(), "the data", kSize1);
481 int ret = entry->WriteData(
484 buffer1.get(),
485 kSize1,
486 base::Bind(&CallbackTest::Run, base::Unretained(&callback1)),
487 false);
488 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
489 if (net::ERR_IO_PENDING == ret)
490 expected++;
492 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
494 memset(buffer2->data(), 0, kSize1);
495 ret = entry->ReadData(
498 buffer2.get(),
499 kSize1,
500 base::Bind(&CallbackTest::Run, base::Unretained(&callback2)));
501 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
502 if (net::ERR_IO_PENDING == ret)
503 expected++;
505 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
506 EXPECT_STREQ("the data", buffer2->data());
508 base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
509 ret = entry->WriteData(
511 10000,
512 buffer2.get(),
513 kSize2,
514 base::Bind(&CallbackTest::Run, base::Unretained(&callback3)),
515 false);
516 EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
517 if (net::ERR_IO_PENDING == ret)
518 expected++;
520 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
522 memset(buffer3->data(), 0, kSize3);
523 ret = entry->ReadData(
525 10011,
526 buffer3.get(),
527 kSize3,
528 base::Bind(&CallbackTest::Run, base::Unretained(&callback4)));
529 EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret);
530 if (net::ERR_IO_PENDING == ret)
531 expected++;
533 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
534 EXPECT_STREQ("big data goes here", buffer3->data());
535 ret = entry->ReadData(
538 buffer2.get(),
539 kSize2,
540 base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
541 EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
542 if (net::ERR_IO_PENDING == ret)
543 expected++;
545 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
546 memset(buffer3->data(), 0, kSize3);
547 EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 10000));
548 ret = entry->ReadData(
550 30000,
551 buffer2.get(),
552 kSize2,
553 base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
554 EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
555 if (net::ERR_IO_PENDING == ret)
556 expected++;
558 EXPECT_EQ(0,
559 entry->ReadData(
561 35000,
562 buffer2.get(),
563 kSize2,
564 base::Bind(&CallbackTest::Run, base::Unretained(&callback7))));
565 ret = entry->ReadData(
568 buffer1.get(),
569 kSize1,
570 base::Bind(&CallbackTest::Run, base::Unretained(&callback8)));
571 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
572 if (net::ERR_IO_PENDING == ret)
573 expected++;
574 ret = entry->WriteData(
576 20000,
577 buffer3.get(),
578 kSize1,
579 base::Bind(&CallbackTest::Run, base::Unretained(&callback9)),
580 false);
581 EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
582 if (net::ERR_IO_PENDING == ret)
583 expected++;
585 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
586 EXPECT_EQ(37000, entry->GetDataSize(1));
588 EXPECT_FALSE(helper.callback_reused_error());
590 entry->Doom();
591 entry->Close();
592 FlushQueueForTest();
593 EXPECT_EQ(0, cache_->GetEntryCount());
596 TEST_F(DiskCacheEntryTest, ExternalAsyncIO) {
597 InitCache();
598 ExternalAsyncIO();
601 TEST_F(DiskCacheEntryTest, ExternalAsyncIONoBuffer) {
602 InitCache();
603 cache_impl_->SetFlags(disk_cache::kNoBuffering);
604 ExternalAsyncIO();
607 TEST_F(DiskCacheEntryTest, MemoryOnlyExternalAsyncIO) {
608 SetMemoryOnlyMode();
609 InitCache();
610 ExternalAsyncIO();
613 // Tests that IOBuffers are not referenced after IO completes.
614 void DiskCacheEntryTest::ReleaseBuffer(int stream_index) {
615 disk_cache::Entry* entry = NULL;
616 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
617 ASSERT_TRUE(NULL != entry);
619 const int kBufferSize = 1024;
620 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize));
621 CacheTestFillBuffer(buffer->data(), kBufferSize, false);
623 net::ReleaseBufferCompletionCallback cb(buffer.get());
624 int rv = entry->WriteData(
625 stream_index, 0, buffer.get(), kBufferSize, cb.callback(), false);
626 EXPECT_EQ(kBufferSize, cb.GetResult(rv));
627 entry->Close();
630 TEST_F(DiskCacheEntryTest, ReleaseBuffer) {
631 InitCache();
632 cache_impl_->SetFlags(disk_cache::kNoBuffering);
633 ReleaseBuffer(0);
636 TEST_F(DiskCacheEntryTest, MemoryOnlyReleaseBuffer) {
637 SetMemoryOnlyMode();
638 InitCache();
639 ReleaseBuffer(0);
642 void DiskCacheEntryTest::StreamAccess() {
643 disk_cache::Entry* entry = NULL;
644 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
645 ASSERT_TRUE(NULL != entry);
647 const int kBufferSize = 1024;
648 const int kNumStreams = 3;
649 scoped_refptr<net::IOBuffer> reference_buffers[kNumStreams];
650 for (int i = 0; i < kNumStreams; i++) {
651 reference_buffers[i] = new net::IOBuffer(kBufferSize);
652 CacheTestFillBuffer(reference_buffers[i]->data(), kBufferSize, false);
654 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kBufferSize));
655 for (int i = 0; i < kNumStreams; i++) {
656 EXPECT_EQ(
657 kBufferSize,
658 WriteData(entry, i, 0, reference_buffers[i].get(), kBufferSize, false));
659 memset(buffer1->data(), 0, kBufferSize);
660 EXPECT_EQ(kBufferSize, ReadData(entry, i, 0, buffer1.get(), kBufferSize));
661 EXPECT_EQ(
662 0, memcmp(reference_buffers[i]->data(), buffer1->data(), kBufferSize));
664 EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
665 ReadData(entry, kNumStreams, 0, buffer1.get(), kBufferSize));
666 entry->Close();
668 // Open the entry and read it in chunks, including a read past the end.
669 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
670 ASSERT_TRUE(NULL != entry);
671 const int kReadBufferSize = 600;
672 const int kFinalReadSize = kBufferSize - kReadBufferSize;
673 COMPILE_ASSERT(kFinalReadSize < kReadBufferSize, should_be_exactly_two_reads);
674 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kReadBufferSize));
675 for (int i = 0; i < kNumStreams; i++) {
676 memset(buffer2->data(), 0, kReadBufferSize);
677 EXPECT_EQ(kReadBufferSize,
678 ReadData(entry, i, 0, buffer2.get(), kReadBufferSize));
679 EXPECT_EQ(
681 memcmp(reference_buffers[i]->data(), buffer2->data(), kReadBufferSize));
683 memset(buffer2->data(), 0, kReadBufferSize);
684 EXPECT_EQ(
685 kFinalReadSize,
686 ReadData(entry, i, kReadBufferSize, buffer2.get(), kReadBufferSize));
687 EXPECT_EQ(0,
688 memcmp(reference_buffers[i]->data() + kReadBufferSize,
689 buffer2->data(),
690 kFinalReadSize));
693 entry->Close();
696 TEST_F(DiskCacheEntryTest, StreamAccess) {
697 InitCache();
698 StreamAccess();
701 TEST_F(DiskCacheEntryTest, MemoryOnlyStreamAccess) {
702 SetMemoryOnlyMode();
703 InitCache();
704 StreamAccess();
707 void DiskCacheEntryTest::GetKey() {
708 std::string key("the first key");
709 disk_cache::Entry* entry;
710 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
711 EXPECT_EQ(key, entry->GetKey()) << "short key";
712 entry->Close();
714 int seed = static_cast<int>(Time::Now().ToInternalValue());
715 srand(seed);
716 char key_buffer[20000];
718 CacheTestFillBuffer(key_buffer, 3000, true);
719 key_buffer[1000] = '\0';
721 key = key_buffer;
722 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
723 EXPECT_TRUE(key == entry->GetKey()) << "1000 bytes key";
724 entry->Close();
726 key_buffer[1000] = 'p';
727 key_buffer[3000] = '\0';
728 key = key_buffer;
729 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
730 EXPECT_TRUE(key == entry->GetKey()) << "medium size key";
731 entry->Close();
733 CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
734 key_buffer[19999] = '\0';
736 key = key_buffer;
737 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
738 EXPECT_TRUE(key == entry->GetKey()) << "long key";
739 entry->Close();
741 CacheTestFillBuffer(key_buffer, 0x4000, true);
742 key_buffer[0x4000] = '\0';
744 key = key_buffer;
745 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
746 EXPECT_TRUE(key == entry->GetKey()) << "16KB key";
747 entry->Close();
750 TEST_F(DiskCacheEntryTest, GetKey) {
751 InitCache();
752 GetKey();
755 TEST_F(DiskCacheEntryTest, MemoryOnlyGetKey) {
756 SetMemoryOnlyMode();
757 InitCache();
758 GetKey();
761 void DiskCacheEntryTest::GetTimes(int stream_index) {
762 std::string key("the first key");
763 disk_cache::Entry* entry;
765 Time t1 = Time::Now();
766 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
767 EXPECT_TRUE(entry->GetLastModified() >= t1);
768 EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
770 AddDelay();
771 Time t2 = Time::Now();
772 EXPECT_TRUE(t2 > t1);
773 EXPECT_EQ(0, WriteData(entry, stream_index, 200, NULL, 0, false));
774 if (type_ == net::APP_CACHE) {
775 EXPECT_TRUE(entry->GetLastModified() < t2);
776 } else {
777 EXPECT_TRUE(entry->GetLastModified() >= t2);
779 EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
781 AddDelay();
782 Time t3 = Time::Now();
783 EXPECT_TRUE(t3 > t2);
784 const int kSize = 200;
785 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
786 EXPECT_EQ(kSize, ReadData(entry, stream_index, 0, buffer.get(), kSize));
787 if (type_ == net::APP_CACHE) {
788 EXPECT_TRUE(entry->GetLastUsed() < t2);
789 EXPECT_TRUE(entry->GetLastModified() < t2);
790 } else if (type_ == net::SHADER_CACHE) {
791 EXPECT_TRUE(entry->GetLastUsed() < t3);
792 EXPECT_TRUE(entry->GetLastModified() < t3);
793 } else {
794 EXPECT_TRUE(entry->GetLastUsed() >= t3);
795 EXPECT_TRUE(entry->GetLastModified() < t3);
797 entry->Close();
800 TEST_F(DiskCacheEntryTest, GetTimes) {
801 InitCache();
802 GetTimes(0);
805 TEST_F(DiskCacheEntryTest, MemoryOnlyGetTimes) {
806 SetMemoryOnlyMode();
807 InitCache();
808 GetTimes(0);
811 TEST_F(DiskCacheEntryTest, AppCacheGetTimes) {
812 SetCacheType(net::APP_CACHE);
813 InitCache();
814 GetTimes(0);
817 TEST_F(DiskCacheEntryTest, ShaderCacheGetTimes) {
818 SetCacheType(net::SHADER_CACHE);
819 InitCache();
820 GetTimes(0);
823 void DiskCacheEntryTest::GrowData(int stream_index) {
824 std::string key1("the first key");
825 disk_cache::Entry* entry;
826 ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
828 const int kSize = 20000;
829 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
830 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
831 CacheTestFillBuffer(buffer1->data(), kSize, false);
832 memset(buffer2->data(), 0, kSize);
834 base::strlcpy(buffer1->data(), "the data", kSize);
835 EXPECT_EQ(10, WriteData(entry, stream_index, 0, buffer1.get(), 10, false));
836 EXPECT_EQ(10, ReadData(entry, stream_index, 0, buffer2.get(), 10));
837 EXPECT_STREQ("the data", buffer2->data());
838 EXPECT_EQ(10, entry->GetDataSize(stream_index));
840 EXPECT_EQ(2000,
841 WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
842 EXPECT_EQ(2000, entry->GetDataSize(stream_index));
843 EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
844 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
846 EXPECT_EQ(20000,
847 WriteData(entry, stream_index, 0, buffer1.get(), kSize, false));
848 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
849 EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), kSize));
850 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
851 entry->Close();
853 memset(buffer2->data(), 0, kSize);
854 std::string key2("Second key");
855 ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
856 EXPECT_EQ(10, WriteData(entry, stream_index, 0, buffer1.get(), 10, false));
857 EXPECT_EQ(10, entry->GetDataSize(stream_index));
858 entry->Close();
860 // Go from an internal address to a bigger block size.
861 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
862 EXPECT_EQ(2000,
863 WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
864 EXPECT_EQ(2000, entry->GetDataSize(stream_index));
865 EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
866 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
867 entry->Close();
868 memset(buffer2->data(), 0, kSize);
870 // Go from an internal address to an external one.
871 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
872 EXPECT_EQ(20000,
873 WriteData(entry, stream_index, 0, buffer1.get(), kSize, false));
874 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
875 EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), kSize));
876 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
877 entry->Close();
879 // Double check the size from disk.
880 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
881 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
883 // Now extend the entry without actual data.
884 EXPECT_EQ(0, WriteData(entry, stream_index, 45500, buffer1.get(), 0, false));
885 entry->Close();
887 // And check again from disk.
888 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
889 EXPECT_EQ(45500, entry->GetDataSize(stream_index));
890 entry->Close();
893 TEST_F(DiskCacheEntryTest, GrowData) {
894 InitCache();
895 GrowData(0);
898 TEST_F(DiskCacheEntryTest, GrowDataNoBuffer) {
899 InitCache();
900 cache_impl_->SetFlags(disk_cache::kNoBuffering);
901 GrowData(0);
904 TEST_F(DiskCacheEntryTest, MemoryOnlyGrowData) {
905 SetMemoryOnlyMode();
906 InitCache();
907 GrowData(0);
910 void DiskCacheEntryTest::TruncateData(int stream_index) {
911 std::string key("the first key");
912 disk_cache::Entry* entry;
913 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
915 const int kSize1 = 20000;
916 const int kSize2 = 20000;
917 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
918 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
920 CacheTestFillBuffer(buffer1->data(), kSize1, false);
921 memset(buffer2->data(), 0, kSize2);
923 // Simple truncation:
924 EXPECT_EQ(200, WriteData(entry, stream_index, 0, buffer1.get(), 200, false));
925 EXPECT_EQ(200, entry->GetDataSize(stream_index));
926 EXPECT_EQ(100, WriteData(entry, stream_index, 0, buffer1.get(), 100, false));
927 EXPECT_EQ(200, entry->GetDataSize(stream_index));
928 EXPECT_EQ(100, WriteData(entry, stream_index, 0, buffer1.get(), 100, true));
929 EXPECT_EQ(100, entry->GetDataSize(stream_index));
930 EXPECT_EQ(0, WriteData(entry, stream_index, 50, buffer1.get(), 0, true));
931 EXPECT_EQ(50, entry->GetDataSize(stream_index));
932 EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer1.get(), 0, true));
933 EXPECT_EQ(0, entry->GetDataSize(stream_index));
934 entry->Close();
935 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
937 // Go to an external file.
938 EXPECT_EQ(20000,
939 WriteData(entry, stream_index, 0, buffer1.get(), 20000, true));
940 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
941 EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), 20000));
942 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000));
943 memset(buffer2->data(), 0, kSize2);
945 // External file truncation
946 EXPECT_EQ(18000,
947 WriteData(entry, stream_index, 0, buffer1.get(), 18000, false));
948 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
949 EXPECT_EQ(18000,
950 WriteData(entry, stream_index, 0, buffer1.get(), 18000, true));
951 EXPECT_EQ(18000, entry->GetDataSize(stream_index));
952 EXPECT_EQ(0, WriteData(entry, stream_index, 17500, buffer1.get(), 0, true));
953 EXPECT_EQ(17500, entry->GetDataSize(stream_index));
955 // And back to an internal block.
956 EXPECT_EQ(600,
957 WriteData(entry, stream_index, 1000, buffer1.get(), 600, true));
958 EXPECT_EQ(1600, entry->GetDataSize(stream_index));
959 EXPECT_EQ(600, ReadData(entry, stream_index, 1000, buffer2.get(), 600));
960 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600));
961 EXPECT_EQ(1000, ReadData(entry, stream_index, 0, buffer2.get(), 1000));
962 EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000))
963 << "Preserves previous data";
965 // Go from external file to zero length.
966 EXPECT_EQ(20000,
967 WriteData(entry, stream_index, 0, buffer1.get(), 20000, true));
968 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
969 EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer1.get(), 0, true));
970 EXPECT_EQ(0, entry->GetDataSize(stream_index));
972 entry->Close();
975 TEST_F(DiskCacheEntryTest, TruncateData) {
976 InitCache();
977 TruncateData(0);
980 TEST_F(DiskCacheEntryTest, TruncateDataNoBuffer) {
981 InitCache();
982 cache_impl_->SetFlags(disk_cache::kNoBuffering);
983 TruncateData(0);
986 TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateData) {
987 SetMemoryOnlyMode();
988 InitCache();
989 TruncateData(0);
992 void DiskCacheEntryTest::ZeroLengthIO(int stream_index) {
993 std::string key("the first key");
994 disk_cache::Entry* entry;
995 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
997 EXPECT_EQ(0, ReadData(entry, stream_index, 0, NULL, 0));
998 EXPECT_EQ(0, WriteData(entry, stream_index, 0, NULL, 0, false));
1000 // This write should extend the entry.
1001 EXPECT_EQ(0, WriteData(entry, stream_index, 1000, NULL, 0, false));
1002 EXPECT_EQ(0, ReadData(entry, stream_index, 500, NULL, 0));
1003 EXPECT_EQ(0, ReadData(entry, stream_index, 2000, NULL, 0));
1004 EXPECT_EQ(1000, entry->GetDataSize(stream_index));
1006 EXPECT_EQ(0, WriteData(entry, stream_index, 100000, NULL, 0, true));
1007 EXPECT_EQ(0, ReadData(entry, stream_index, 50000, NULL, 0));
1008 EXPECT_EQ(100000, entry->GetDataSize(stream_index));
1010 // Let's verify the actual content.
1011 const int kSize = 20;
1012 const char zeros[kSize] = {};
1013 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1015 CacheTestFillBuffer(buffer->data(), kSize, false);
1016 EXPECT_EQ(kSize, ReadData(entry, stream_index, 500, buffer.get(), kSize));
1017 EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1019 CacheTestFillBuffer(buffer->data(), kSize, false);
1020 EXPECT_EQ(kSize, ReadData(entry, stream_index, 5000, buffer.get(), kSize));
1021 EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1023 CacheTestFillBuffer(buffer->data(), kSize, false);
1024 EXPECT_EQ(kSize, ReadData(entry, stream_index, 50000, buffer.get(), kSize));
1025 EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1027 entry->Close();
1030 TEST_F(DiskCacheEntryTest, ZeroLengthIO) {
1031 InitCache();
1032 ZeroLengthIO(0);
1035 TEST_F(DiskCacheEntryTest, ZeroLengthIONoBuffer) {
1036 InitCache();
1037 cache_impl_->SetFlags(disk_cache::kNoBuffering);
1038 ZeroLengthIO(0);
1041 TEST_F(DiskCacheEntryTest, MemoryOnlyZeroLengthIO) {
1042 SetMemoryOnlyMode();
1043 InitCache();
1044 ZeroLengthIO(0);
1047 // Tests that we handle the content correctly when buffering, a feature of the
1048 // standard cache that permits fast responses to certain reads.
1049 void DiskCacheEntryTest::Buffering() {
1050 std::string key("the first key");
1051 disk_cache::Entry* entry;
1052 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1054 const int kSize = 200;
1055 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1056 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
1057 CacheTestFillBuffer(buffer1->data(), kSize, true);
1058 CacheTestFillBuffer(buffer2->data(), kSize, true);
1060 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
1061 entry->Close();
1063 // Write a little more and read what we wrote before.
1064 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1065 EXPECT_EQ(kSize, WriteData(entry, 1, 5000, buffer1.get(), kSize, false));
1066 EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
1067 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1069 // Now go to an external file.
1070 EXPECT_EQ(kSize, WriteData(entry, 1, 18000, buffer1.get(), kSize, false));
1071 entry->Close();
1073 // Write something else and verify old data.
1074 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1075 EXPECT_EQ(kSize, WriteData(entry, 1, 10000, buffer1.get(), kSize, false));
1076 CacheTestFillBuffer(buffer2->data(), kSize, true);
1077 EXPECT_EQ(kSize, ReadData(entry, 1, 5000, buffer2.get(), kSize));
1078 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1079 CacheTestFillBuffer(buffer2->data(), kSize, true);
1080 EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
1081 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1082 CacheTestFillBuffer(buffer2->data(), kSize, true);
1083 EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
1084 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1086 // Extend the file some more.
1087 EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, false));
1088 entry->Close();
1090 // And now make sure that we can deal with data in both places (ram/disk).
1091 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1092 EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, false));
1094 // We should not overwrite the data at 18000 with this.
1095 EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, false));
1096 CacheTestFillBuffer(buffer2->data(), kSize, true);
1097 EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
1098 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1099 CacheTestFillBuffer(buffer2->data(), kSize, true);
1100 EXPECT_EQ(kSize, ReadData(entry, 1, 17000, buffer2.get(), kSize));
1101 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1103 EXPECT_EQ(kSize, WriteData(entry, 1, 22900, buffer1.get(), kSize, false));
1104 CacheTestFillBuffer(buffer2->data(), kSize, true);
1105 EXPECT_EQ(100, ReadData(entry, 1, 23000, buffer2.get(), kSize));
1106 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
1108 CacheTestFillBuffer(buffer2->data(), kSize, true);
1109 EXPECT_EQ(100, ReadData(entry, 1, 23100, buffer2.get(), kSize));
1110 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
1112 // Extend the file again and read before without closing the entry.
1113 EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, false));
1114 EXPECT_EQ(kSize, WriteData(entry, 1, 45000, buffer1.get(), kSize, false));
1115 CacheTestFillBuffer(buffer2->data(), kSize, true);
1116 EXPECT_EQ(kSize, ReadData(entry, 1, 25000, buffer2.get(), kSize));
1117 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1118 CacheTestFillBuffer(buffer2->data(), kSize, true);
1119 EXPECT_EQ(kSize, ReadData(entry, 1, 45000, buffer2.get(), kSize));
1120 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1122 entry->Close();
1125 TEST_F(DiskCacheEntryTest, Buffering) {
1126 InitCache();
1127 Buffering();
1130 TEST_F(DiskCacheEntryTest, BufferingNoBuffer) {
1131 InitCache();
1132 cache_impl_->SetFlags(disk_cache::kNoBuffering);
1133 Buffering();
1136 // Checks that entries are zero length when created.
1137 void DiskCacheEntryTest::SizeAtCreate() {
1138 const char key[] = "the first key";
1139 disk_cache::Entry* entry;
1140 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1142 const int kNumStreams = 3;
1143 for (int i = 0; i < kNumStreams; ++i)
1144 EXPECT_EQ(0, entry->GetDataSize(i));
1145 entry->Close();
1148 TEST_F(DiskCacheEntryTest, SizeAtCreate) {
1149 InitCache();
1150 SizeAtCreate();
1153 TEST_F(DiskCacheEntryTest, MemoryOnlySizeAtCreate) {
1154 SetMemoryOnlyMode();
1155 InitCache();
1156 SizeAtCreate();
1159 // Some extra tests to make sure that buffering works properly when changing
1160 // the entry size.
1161 void DiskCacheEntryTest::SizeChanges(int stream_index) {
1162 std::string key("the first key");
1163 disk_cache::Entry* entry;
1164 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1166 const int kSize = 200;
1167 const char zeros[kSize] = {};
1168 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1169 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
1170 CacheTestFillBuffer(buffer1->data(), kSize, true);
1171 CacheTestFillBuffer(buffer2->data(), kSize, true);
1173 EXPECT_EQ(kSize,
1174 WriteData(entry, stream_index, 0, buffer1.get(), kSize, true));
1175 EXPECT_EQ(kSize,
1176 WriteData(entry, stream_index, 17000, buffer1.get(), kSize, true));
1177 EXPECT_EQ(kSize,
1178 WriteData(entry, stream_index, 23000, buffer1.get(), kSize, true));
1179 entry->Close();
1181 // Extend the file and read between the old size and the new write.
1182 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1183 EXPECT_EQ(23000 + kSize, entry->GetDataSize(stream_index));
1184 EXPECT_EQ(kSize,
1185 WriteData(entry, stream_index, 25000, buffer1.get(), kSize, true));
1186 EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1187 EXPECT_EQ(kSize, ReadData(entry, stream_index, 24000, buffer2.get(), kSize));
1188 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, kSize));
1190 // Read at the end of the old file size.
1191 EXPECT_EQ(
1192 kSize,
1193 ReadData(entry, stream_index, 23000 + kSize - 35, buffer2.get(), kSize));
1194 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 35, 35));
1196 // Read slightly before the last write.
1197 CacheTestFillBuffer(buffer2->data(), kSize, true);
1198 EXPECT_EQ(kSize, ReadData(entry, stream_index, 24900, buffer2.get(), kSize));
1199 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1200 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1202 // Extend the entry a little more.
1203 EXPECT_EQ(kSize,
1204 WriteData(entry, stream_index, 26000, buffer1.get(), kSize, true));
1205 EXPECT_EQ(26000 + kSize, entry->GetDataSize(stream_index));
1206 CacheTestFillBuffer(buffer2->data(), kSize, true);
1207 EXPECT_EQ(kSize, ReadData(entry, stream_index, 25900, buffer2.get(), kSize));
1208 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1209 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1211 // And now reduce the size.
1212 EXPECT_EQ(kSize,
1213 WriteData(entry, stream_index, 25000, buffer1.get(), kSize, true));
1214 EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1215 EXPECT_EQ(
1217 ReadData(entry, stream_index, 25000 + kSize - 28, buffer2.get(), kSize));
1218 EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 28, 28));
1220 // Reduce the size with a buffer that is not extending the size.
1221 EXPECT_EQ(kSize,
1222 WriteData(entry, stream_index, 24000, buffer1.get(), kSize, false));
1223 EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1224 EXPECT_EQ(kSize,
1225 WriteData(entry, stream_index, 24500, buffer1.get(), kSize, true));
1226 EXPECT_EQ(24500 + kSize, entry->GetDataSize(stream_index));
1227 EXPECT_EQ(kSize, ReadData(entry, stream_index, 23900, buffer2.get(), kSize));
1228 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1229 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1231 // And now reduce the size below the old size.
1232 EXPECT_EQ(kSize,
1233 WriteData(entry, stream_index, 19000, buffer1.get(), kSize, true));
1234 EXPECT_EQ(19000 + kSize, entry->GetDataSize(stream_index));
1235 EXPECT_EQ(kSize, ReadData(entry, stream_index, 18900, buffer2.get(), kSize));
1236 EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1237 EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1239 // Verify that the actual file is truncated.
1240 entry->Close();
1241 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1242 EXPECT_EQ(19000 + kSize, entry->GetDataSize(stream_index));
1244 // Extend the newly opened file with a zero length write, expect zero fill.
1245 EXPECT_EQ(
1247 WriteData(entry, stream_index, 20000 + kSize, buffer1.get(), 0, false));
1248 EXPECT_EQ(kSize,
1249 ReadData(entry, stream_index, 19000 + kSize, buffer1.get(), kSize));
1250 EXPECT_EQ(0, memcmp(buffer1->data(), zeros, kSize));
1252 entry->Close();
1255 TEST_F(DiskCacheEntryTest, SizeChanges) {
1256 InitCache();
1257 SizeChanges(1);
1260 TEST_F(DiskCacheEntryTest, SizeChangesNoBuffer) {
1261 InitCache();
1262 cache_impl_->SetFlags(disk_cache::kNoBuffering);
1263 SizeChanges(1);
1266 // Write more than the total cache capacity but to a single entry. |size| is the
1267 // amount of bytes to write each time.
1268 void DiskCacheEntryTest::ReuseEntry(int size, int stream_index) {
1269 std::string key1("the first key");
1270 disk_cache::Entry* entry;
1271 ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
1273 entry->Close();
1274 std::string key2("the second key");
1275 ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
1277 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size));
1278 CacheTestFillBuffer(buffer->data(), size, false);
1280 for (int i = 0; i < 15; i++) {
1281 EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer.get(), 0, true));
1282 EXPECT_EQ(size,
1283 WriteData(entry, stream_index, 0, buffer.get(), size, false));
1284 entry->Close();
1285 ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
1288 entry->Close();
1289 ASSERT_EQ(net::OK, OpenEntry(key1, &entry)) << "have not evicted this entry";
1290 entry->Close();
1293 TEST_F(DiskCacheEntryTest, ReuseExternalEntry) {
1294 SetMaxSize(200 * 1024);
1295 InitCache();
1296 ReuseEntry(20 * 1024, 0);
1299 TEST_F(DiskCacheEntryTest, MemoryOnlyReuseExternalEntry) {
1300 SetMemoryOnlyMode();
1301 SetMaxSize(200 * 1024);
1302 InitCache();
1303 ReuseEntry(20 * 1024, 0);
1306 TEST_F(DiskCacheEntryTest, ReuseInternalEntry) {
1307 SetMaxSize(100 * 1024);
1308 InitCache();
1309 ReuseEntry(10 * 1024, 0);
1312 TEST_F(DiskCacheEntryTest, MemoryOnlyReuseInternalEntry) {
1313 SetMemoryOnlyMode();
1314 SetMaxSize(100 * 1024);
1315 InitCache();
1316 ReuseEntry(10 * 1024, 0);
1319 // Reading somewhere that was not written should return zeros.
1320 void DiskCacheEntryTest::InvalidData(int stream_index) {
1321 std::string key("the first key");
1322 disk_cache::Entry* entry;
1323 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1325 const int kSize1 = 20000;
1326 const int kSize2 = 20000;
1327 const int kSize3 = 20000;
1328 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
1329 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
1330 scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
1332 CacheTestFillBuffer(buffer1->data(), kSize1, false);
1333 memset(buffer2->data(), 0, kSize2);
1335 // Simple data grow:
1336 EXPECT_EQ(200,
1337 WriteData(entry, stream_index, 400, buffer1.get(), 200, false));
1338 EXPECT_EQ(600, entry->GetDataSize(stream_index));
1339 EXPECT_EQ(100, ReadData(entry, stream_index, 300, buffer3.get(), 100));
1340 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1341 entry->Close();
1342 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1344 // The entry is now on disk. Load it and extend it.
1345 EXPECT_EQ(200,
1346 WriteData(entry, stream_index, 800, buffer1.get(), 200, false));
1347 EXPECT_EQ(1000, entry->GetDataSize(stream_index));
1348 EXPECT_EQ(100, ReadData(entry, stream_index, 700, buffer3.get(), 100));
1349 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1350 entry->Close();
1351 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1353 // This time using truncate.
1354 EXPECT_EQ(200,
1355 WriteData(entry, stream_index, 1800, buffer1.get(), 200, true));
1356 EXPECT_EQ(2000, entry->GetDataSize(stream_index));
1357 EXPECT_EQ(100, ReadData(entry, stream_index, 1500, buffer3.get(), 100));
1358 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1360 // Go to an external file.
1361 EXPECT_EQ(200,
1362 WriteData(entry, stream_index, 19800, buffer1.get(), 200, false));
1363 EXPECT_EQ(20000, entry->GetDataSize(stream_index));
1364 EXPECT_EQ(4000, ReadData(entry, stream_index, 14000, buffer3.get(), 4000));
1365 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 4000));
1367 // And back to an internal block.
1368 EXPECT_EQ(600,
1369 WriteData(entry, stream_index, 1000, buffer1.get(), 600, true));
1370 EXPECT_EQ(1600, entry->GetDataSize(stream_index));
1371 EXPECT_EQ(600, ReadData(entry, stream_index, 1000, buffer3.get(), 600));
1372 EXPECT_TRUE(!memcmp(buffer3->data(), buffer1->data(), 600));
1374 // Extend it again.
1375 EXPECT_EQ(600,
1376 WriteData(entry, stream_index, 2000, buffer1.get(), 600, false));
1377 EXPECT_EQ(2600, entry->GetDataSize(stream_index));
1378 EXPECT_EQ(200, ReadData(entry, stream_index, 1800, buffer3.get(), 200));
1379 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
1381 // And again (with truncation flag).
1382 EXPECT_EQ(600,
1383 WriteData(entry, stream_index, 3000, buffer1.get(), 600, true));
1384 EXPECT_EQ(3600, entry->GetDataSize(stream_index));
1385 EXPECT_EQ(200, ReadData(entry, stream_index, 2800, buffer3.get(), 200));
1386 EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
1388 entry->Close();
1391 TEST_F(DiskCacheEntryTest, InvalidData) {
1392 InitCache();
1393 InvalidData(0);
1396 TEST_F(DiskCacheEntryTest, InvalidDataNoBuffer) {
1397 InitCache();
1398 cache_impl_->SetFlags(disk_cache::kNoBuffering);
1399 InvalidData(0);
1402 TEST_F(DiskCacheEntryTest, MemoryOnlyInvalidData) {
1403 SetMemoryOnlyMode();
1404 InitCache();
1405 InvalidData(0);
1408 // Tests that the cache preserves the buffer of an IO operation.
1409 void DiskCacheEntryTest::ReadWriteDestroyBuffer(int stream_index) {
1410 std::string key("the first key");
1411 disk_cache::Entry* entry;
1412 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1414 const int kSize = 200;
1415 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1416 CacheTestFillBuffer(buffer->data(), kSize, false);
1418 net::TestCompletionCallback cb;
1419 EXPECT_EQ(net::ERR_IO_PENDING,
1420 entry->WriteData(
1421 stream_index, 0, buffer.get(), kSize, cb.callback(), false));
1423 // Release our reference to the buffer.
1424 buffer = NULL;
1425 EXPECT_EQ(kSize, cb.WaitForResult());
1427 // And now test with a Read().
1428 buffer = new net::IOBuffer(kSize);
1429 CacheTestFillBuffer(buffer->data(), kSize, false);
1431 EXPECT_EQ(
1432 net::ERR_IO_PENDING,
1433 entry->ReadData(stream_index, 0, buffer.get(), kSize, cb.callback()));
1434 buffer = NULL;
1435 EXPECT_EQ(kSize, cb.WaitForResult());
1437 entry->Close();
1440 TEST_F(DiskCacheEntryTest, ReadWriteDestroyBuffer) {
1441 InitCache();
1442 ReadWriteDestroyBuffer(0);
1445 void DiskCacheEntryTest::DoomNormalEntry() {
1446 std::string key("the first key");
1447 disk_cache::Entry* entry;
1448 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1449 entry->Doom();
1450 entry->Close();
1452 const int kSize = 20000;
1453 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1454 CacheTestFillBuffer(buffer->data(), kSize, true);
1455 buffer->data()[19999] = '\0';
1457 key = buffer->data();
1458 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1459 EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1460 EXPECT_EQ(20000, WriteData(entry, 1, 0, buffer.get(), kSize, false));
1461 entry->Doom();
1462 entry->Close();
1464 FlushQueueForTest();
1465 EXPECT_EQ(0, cache_->GetEntryCount());
1468 TEST_F(DiskCacheEntryTest, DoomEntry) {
1469 InitCache();
1470 DoomNormalEntry();
1473 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomEntry) {
1474 SetMemoryOnlyMode();
1475 InitCache();
1476 DoomNormalEntry();
1479 // Tests dooming an entry that's linked to an open entry.
1480 void DiskCacheEntryTest::DoomEntryNextToOpenEntry() {
1481 disk_cache::Entry* entry1;
1482 disk_cache::Entry* entry2;
1483 ASSERT_EQ(net::OK, CreateEntry("fixed", &entry1));
1484 entry1->Close();
1485 ASSERT_EQ(net::OK, CreateEntry("foo", &entry1));
1486 entry1->Close();
1487 ASSERT_EQ(net::OK, CreateEntry("bar", &entry1));
1488 entry1->Close();
1490 ASSERT_EQ(net::OK, OpenEntry("foo", &entry1));
1491 ASSERT_EQ(net::OK, OpenEntry("bar", &entry2));
1492 entry2->Doom();
1493 entry2->Close();
1495 ASSERT_EQ(net::OK, OpenEntry("foo", &entry2));
1496 entry2->Doom();
1497 entry2->Close();
1498 entry1->Close();
1500 ASSERT_EQ(net::OK, OpenEntry("fixed", &entry1));
1501 entry1->Close();
1504 TEST_F(DiskCacheEntryTest, DoomEntryNextToOpenEntry) {
1505 InitCache();
1506 DoomEntryNextToOpenEntry();
1509 TEST_F(DiskCacheEntryTest, NewEvictionDoomEntryNextToOpenEntry) {
1510 SetNewEviction();
1511 InitCache();
1512 DoomEntryNextToOpenEntry();
1515 TEST_F(DiskCacheEntryTest, AppCacheDoomEntryNextToOpenEntry) {
1516 SetCacheType(net::APP_CACHE);
1517 InitCache();
1518 DoomEntryNextToOpenEntry();
1521 // Verify that basic operations work as expected with doomed entries.
1522 void DiskCacheEntryTest::DoomedEntry(int stream_index) {
1523 std::string key("the first key");
1524 disk_cache::Entry* entry;
1525 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1526 entry->Doom();
1528 FlushQueueForTest();
1529 EXPECT_EQ(0, cache_->GetEntryCount());
1530 Time initial = Time::Now();
1531 AddDelay();
1533 const int kSize1 = 2000;
1534 const int kSize2 = 2000;
1535 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
1536 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
1537 CacheTestFillBuffer(buffer1->data(), kSize1, false);
1538 memset(buffer2->data(), 0, kSize2);
1540 EXPECT_EQ(2000,
1541 WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
1542 EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
1543 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize1));
1544 EXPECT_EQ(key, entry->GetKey());
1545 EXPECT_TRUE(initial < entry->GetLastModified());
1546 EXPECT_TRUE(initial < entry->GetLastUsed());
1548 entry->Close();
1551 TEST_F(DiskCacheEntryTest, DoomedEntry) {
1552 InitCache();
1553 DoomedEntry(0);
1556 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomedEntry) {
1557 SetMemoryOnlyMode();
1558 InitCache();
1559 DoomedEntry(0);
1562 // Tests that we discard entries if the data is missing.
1563 TEST_F(DiskCacheEntryTest, MissingData) {
1564 InitCache();
1566 std::string key("the first key");
1567 disk_cache::Entry* entry;
1568 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1570 // Write to an external file.
1571 const int kSize = 20000;
1572 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1573 CacheTestFillBuffer(buffer->data(), kSize, false);
1574 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1575 entry->Close();
1576 FlushQueueForTest();
1578 disk_cache::Addr address(0x80000001);
1579 base::FilePath name = cache_impl_->GetFileName(address);
1580 EXPECT_TRUE(base::DeleteFile(name, false));
1582 // Attempt to read the data.
1583 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1584 EXPECT_EQ(net::ERR_FILE_NOT_FOUND,
1585 ReadData(entry, 0, 0, buffer.get(), kSize));
1586 entry->Close();
1588 // The entry should be gone.
1589 ASSERT_NE(net::OK, OpenEntry(key, &entry));
1592 // Test that child entries in a memory cache backend are not visible from
1593 // enumerations.
1594 TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) {
1595 SetMemoryOnlyMode();
1596 InitCache();
1598 const int kSize = 4096;
1599 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1600 CacheTestFillBuffer(buf->data(), kSize, false);
1602 std::string key("the first key");
1603 disk_cache::Entry* parent_entry;
1604 ASSERT_EQ(net::OK, CreateEntry(key, &parent_entry));
1606 // Writes to the parent entry.
1607 EXPECT_EQ(kSize,
1608 parent_entry->WriteSparseData(
1609 0, buf.get(), kSize, net::CompletionCallback()));
1611 // This write creates a child entry and writes to it.
1612 EXPECT_EQ(kSize,
1613 parent_entry->WriteSparseData(
1614 8192, buf.get(), kSize, net::CompletionCallback()));
1616 parent_entry->Close();
1618 // Perform the enumerations.
1619 scoped_ptr<TestIterator> iter = CreateIterator();
1620 disk_cache::Entry* entry = NULL;
1621 int count = 0;
1622 while (iter->OpenNextEntry(&entry) == net::OK) {
1623 ASSERT_TRUE(entry != NULL);
1624 ++count;
1625 disk_cache::MemEntryImpl* mem_entry =
1626 reinterpret_cast<disk_cache::MemEntryImpl*>(entry);
1627 EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry, mem_entry->type());
1628 mem_entry->Close();
1630 EXPECT_EQ(1, count);
1633 // Writes |buf_1| to offset and reads it back as |buf_2|.
1634 void VerifySparseIO(disk_cache::Entry* entry, int64 offset,
1635 net::IOBuffer* buf_1, int size, net::IOBuffer* buf_2) {
1636 net::TestCompletionCallback cb;
1638 memset(buf_2->data(), 0, size);
1639 int ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
1640 EXPECT_EQ(0, cb.GetResult(ret));
1642 ret = entry->WriteSparseData(offset, buf_1, size, cb.callback());
1643 EXPECT_EQ(size, cb.GetResult(ret));
1645 ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
1646 EXPECT_EQ(size, cb.GetResult(ret));
1648 EXPECT_EQ(0, memcmp(buf_1->data(), buf_2->data(), size));
1651 // Reads |size| bytes from |entry| at |offset| and verifies that they are the
1652 // same as the content of the provided |buffer|.
1653 void VerifyContentSparseIO(disk_cache::Entry* entry, int64 offset, char* buffer,
1654 int size) {
1655 net::TestCompletionCallback cb;
1657 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(size));
1658 memset(buf_1->data(), 0, size);
1659 int ret = entry->ReadSparseData(offset, buf_1.get(), size, cb.callback());
1660 EXPECT_EQ(size, cb.GetResult(ret));
1661 EXPECT_EQ(0, memcmp(buf_1->data(), buffer, size));
1664 void DiskCacheEntryTest::BasicSparseIO() {
1665 std::string key("the first key");
1666 disk_cache::Entry* entry;
1667 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1669 const int kSize = 2048;
1670 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1671 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1672 CacheTestFillBuffer(buf_1->data(), kSize, false);
1674 // Write at offset 0.
1675 VerifySparseIO(entry, 0, buf_1.get(), kSize, buf_2.get());
1677 // Write at offset 0x400000 (4 MB).
1678 VerifySparseIO(entry, 0x400000, buf_1.get(), kSize, buf_2.get());
1680 // Write at offset 0x800000000 (32 GB).
1681 VerifySparseIO(entry, 0x800000000LL, buf_1.get(), kSize, buf_2.get());
1683 entry->Close();
1685 // Check everything again.
1686 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1687 VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
1688 VerifyContentSparseIO(entry, 0x400000, buf_1->data(), kSize);
1689 VerifyContentSparseIO(entry, 0x800000000LL, buf_1->data(), kSize);
1690 entry->Close();
1693 TEST_F(DiskCacheEntryTest, BasicSparseIO) {
1694 InitCache();
1695 BasicSparseIO();
1698 TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseIO) {
1699 SetMemoryOnlyMode();
1700 InitCache();
1701 BasicSparseIO();
1704 void DiskCacheEntryTest::HugeSparseIO() {
1705 std::string key("the first key");
1706 disk_cache::Entry* entry;
1707 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1709 // Write 1.2 MB so that we cover multiple entries.
1710 const int kSize = 1200 * 1024;
1711 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1712 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1713 CacheTestFillBuffer(buf_1->data(), kSize, false);
1715 // Write at offset 0x20F0000 (33 MB - 64 KB).
1716 VerifySparseIO(entry, 0x20F0000, buf_1.get(), kSize, buf_2.get());
1717 entry->Close();
1719 // Check it again.
1720 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1721 VerifyContentSparseIO(entry, 0x20F0000, buf_1->data(), kSize);
1722 entry->Close();
1725 TEST_F(DiskCacheEntryTest, HugeSparseIO) {
1726 InitCache();
1727 HugeSparseIO();
1730 TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseIO) {
1731 SetMemoryOnlyMode();
1732 InitCache();
1733 HugeSparseIO();
1736 void DiskCacheEntryTest::GetAvailableRange() {
1737 std::string key("the first key");
1738 disk_cache::Entry* entry;
1739 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1741 const int kSize = 16 * 1024;
1742 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1743 CacheTestFillBuffer(buf->data(), kSize, false);
1745 // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
1746 EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
1747 EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F4400, buf.get(), kSize));
1749 // We stop at the first empty block.
1750 int64 start;
1751 net::TestCompletionCallback cb;
1752 int rv = entry->GetAvailableRange(
1753 0x20F0000, kSize * 2, &start, cb.callback());
1754 EXPECT_EQ(kSize, cb.GetResult(rv));
1755 EXPECT_EQ(0x20F0000, start);
1757 start = 0;
1758 rv = entry->GetAvailableRange(0, kSize, &start, cb.callback());
1759 EXPECT_EQ(0, cb.GetResult(rv));
1760 rv = entry->GetAvailableRange(
1761 0x20F0000 - kSize, kSize, &start, cb.callback());
1762 EXPECT_EQ(0, cb.GetResult(rv));
1763 rv = entry->GetAvailableRange(0, 0x2100000, &start, cb.callback());
1764 EXPECT_EQ(kSize, cb.GetResult(rv));
1765 EXPECT_EQ(0x20F0000, start);
1767 // We should be able to Read based on the results of GetAvailableRange.
1768 start = -1;
1769 rv = entry->GetAvailableRange(0x2100000, kSize, &start, cb.callback());
1770 EXPECT_EQ(0, cb.GetResult(rv));
1771 rv = entry->ReadSparseData(start, buf.get(), kSize, cb.callback());
1772 EXPECT_EQ(0, cb.GetResult(rv));
1774 start = 0;
1775 rv = entry->GetAvailableRange(0x20F2000, kSize, &start, cb.callback());
1776 EXPECT_EQ(0x2000, cb.GetResult(rv));
1777 EXPECT_EQ(0x20F2000, start);
1778 EXPECT_EQ(0x2000, ReadSparseData(entry, start, buf.get(), kSize));
1780 // Make sure that we respect the |len| argument.
1781 start = 0;
1782 rv = entry->GetAvailableRange(
1783 0x20F0001 - kSize, kSize, &start, cb.callback());
1784 EXPECT_EQ(1, cb.GetResult(rv));
1785 EXPECT_EQ(0x20F0000, start);
1787 entry->Close();
1790 TEST_F(DiskCacheEntryTest, GetAvailableRange) {
1791 InitCache();
1792 GetAvailableRange();
1795 TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) {
1796 SetMemoryOnlyMode();
1797 InitCache();
1798 GetAvailableRange();
1801 void DiskCacheEntryTest::CouldBeSparse() {
1802 std::string key("the first key");
1803 disk_cache::Entry* entry;
1804 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1806 const int kSize = 16 * 1024;
1807 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1808 CacheTestFillBuffer(buf->data(), kSize, false);
1810 // Write at offset 0x20F0000 (33 MB - 64 KB).
1811 EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
1813 EXPECT_TRUE(entry->CouldBeSparse());
1814 entry->Close();
1816 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1817 EXPECT_TRUE(entry->CouldBeSparse());
1818 entry->Close();
1820 // Now verify a regular entry.
1821 key.assign("another key");
1822 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1823 EXPECT_FALSE(entry->CouldBeSparse());
1825 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buf.get(), kSize, false));
1826 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buf.get(), kSize, false));
1827 EXPECT_EQ(kSize, WriteData(entry, 2, 0, buf.get(), kSize, false));
1829 EXPECT_FALSE(entry->CouldBeSparse());
1830 entry->Close();
1832 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1833 EXPECT_FALSE(entry->CouldBeSparse());
1834 entry->Close();
1837 TEST_F(DiskCacheEntryTest, CouldBeSparse) {
1838 InitCache();
1839 CouldBeSparse();
1842 TEST_F(DiskCacheEntryTest, MemoryCouldBeSparse) {
1843 SetMemoryOnlyMode();
1844 InitCache();
1845 CouldBeSparse();
1848 TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedSparseIO) {
1849 SetMemoryOnlyMode();
1850 InitCache();
1852 const int kSize = 8192;
1853 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1854 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1855 CacheTestFillBuffer(buf_1->data(), kSize, false);
1857 std::string key("the first key");
1858 disk_cache::Entry* entry;
1859 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1861 // This loop writes back to back starting from offset 0 and 9000.
1862 for (int i = 0; i < kSize; i += 1024) {
1863 scoped_refptr<net::WrappedIOBuffer> buf_3(
1864 new net::WrappedIOBuffer(buf_1->data() + i));
1865 VerifySparseIO(entry, i, buf_3.get(), 1024, buf_2.get());
1866 VerifySparseIO(entry, 9000 + i, buf_3.get(), 1024, buf_2.get());
1869 // Make sure we have data written.
1870 VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
1871 VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize);
1873 // This tests a large write that spans 3 entries from a misaligned offset.
1874 VerifySparseIO(entry, 20481, buf_1.get(), 8192, buf_2.get());
1876 entry->Close();
1879 TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedGetAvailableRange) {
1880 SetMemoryOnlyMode();
1881 InitCache();
1883 const int kSize = 8192;
1884 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1885 CacheTestFillBuffer(buf->data(), kSize, false);
1887 disk_cache::Entry* entry;
1888 std::string key("the first key");
1889 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1891 // Writes in the middle of an entry.
1892 EXPECT_EQ(
1893 1024,
1894 entry->WriteSparseData(0, buf.get(), 1024, net::CompletionCallback()));
1895 EXPECT_EQ(
1896 1024,
1897 entry->WriteSparseData(5120, buf.get(), 1024, net::CompletionCallback()));
1898 EXPECT_EQ(1024,
1899 entry->WriteSparseData(
1900 10000, buf.get(), 1024, net::CompletionCallback()));
1902 // Writes in the middle of an entry and spans 2 child entries.
1903 EXPECT_EQ(8192,
1904 entry->WriteSparseData(
1905 50000, buf.get(), 8192, net::CompletionCallback()));
1907 int64 start;
1908 net::TestCompletionCallback cb;
1909 // Test that we stop at a discontinuous child at the second block.
1910 int rv = entry->GetAvailableRange(0, 10000, &start, cb.callback());
1911 EXPECT_EQ(1024, cb.GetResult(rv));
1912 EXPECT_EQ(0, start);
1914 // Test that number of bytes is reported correctly when we start from the
1915 // middle of a filled region.
1916 rv = entry->GetAvailableRange(512, 10000, &start, cb.callback());
1917 EXPECT_EQ(512, cb.GetResult(rv));
1918 EXPECT_EQ(512, start);
1920 // Test that we found bytes in the child of next block.
1921 rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
1922 EXPECT_EQ(1024, cb.GetResult(rv));
1923 EXPECT_EQ(5120, start);
1925 // Test that the desired length is respected. It starts within a filled
1926 // region.
1927 rv = entry->GetAvailableRange(5500, 512, &start, cb.callback());
1928 EXPECT_EQ(512, cb.GetResult(rv));
1929 EXPECT_EQ(5500, start);
1931 // Test that the desired length is respected. It starts before a filled
1932 // region.
1933 rv = entry->GetAvailableRange(5000, 620, &start, cb.callback());
1934 EXPECT_EQ(500, cb.GetResult(rv));
1935 EXPECT_EQ(5120, start);
1937 // Test that multiple blocks are scanned.
1938 rv = entry->GetAvailableRange(40000, 20000, &start, cb.callback());
1939 EXPECT_EQ(8192, cb.GetResult(rv));
1940 EXPECT_EQ(50000, start);
1942 entry->Close();
1945 void DiskCacheEntryTest::UpdateSparseEntry() {
1946 std::string key("the first key");
1947 disk_cache::Entry* entry1;
1948 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1950 const int kSize = 2048;
1951 scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1952 scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1953 CacheTestFillBuffer(buf_1->data(), kSize, false);
1955 // Write at offset 0.
1956 VerifySparseIO(entry1, 0, buf_1.get(), kSize, buf_2.get());
1957 entry1->Close();
1959 // Write at offset 2048.
1960 ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1961 VerifySparseIO(entry1, 2048, buf_1.get(), kSize, buf_2.get());
1963 disk_cache::Entry* entry2;
1964 ASSERT_EQ(net::OK, CreateEntry("the second key", &entry2));
1966 entry1->Close();
1967 entry2->Close();
1968 FlushQueueForTest();
1969 if (memory_only_ || simple_cache_mode_)
1970 EXPECT_EQ(2, cache_->GetEntryCount());
1971 else
1972 EXPECT_EQ(3, cache_->GetEntryCount());
1975 TEST_F(DiskCacheEntryTest, UpdateSparseEntry) {
1976 SetCacheType(net::MEDIA_CACHE);
1977 InitCache();
1978 UpdateSparseEntry();
1981 TEST_F(DiskCacheEntryTest, MemoryOnlyUpdateSparseEntry) {
1982 SetMemoryOnlyMode();
1983 SetCacheType(net::MEDIA_CACHE);
1984 InitCache();
1985 UpdateSparseEntry();
1988 void DiskCacheEntryTest::DoomSparseEntry() {
1989 std::string key1("the first key");
1990 std::string key2("the second key");
1991 disk_cache::Entry *entry1, *entry2;
1992 ASSERT_EQ(net::OK, CreateEntry(key1, &entry1));
1993 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1995 const int kSize = 4 * 1024;
1996 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1997 CacheTestFillBuffer(buf->data(), kSize, false);
1999 int64 offset = 1024;
2000 // Write to a bunch of ranges.
2001 for (int i = 0; i < 12; i++) {
2002 EXPECT_EQ(kSize, WriteSparseData(entry1, offset, buf.get(), kSize));
2003 // Keep the second map under the default size.
2004 if (i < 9)
2005 EXPECT_EQ(kSize, WriteSparseData(entry2, offset, buf.get(), kSize));
2007 offset *= 4;
2010 if (memory_only_ || simple_cache_mode_)
2011 EXPECT_EQ(2, cache_->GetEntryCount());
2012 else
2013 EXPECT_EQ(15, cache_->GetEntryCount());
2015 // Doom the first entry while it's still open.
2016 entry1->Doom();
2017 entry1->Close();
2018 entry2->Close();
2020 // Doom the second entry after it's fully saved.
2021 EXPECT_EQ(net::OK, DoomEntry(key2));
2023 // Make sure we do all needed work. This may fail for entry2 if between Close
2024 // and DoomEntry the system decides to remove all traces of the file from the
2025 // system cache so we don't see that there is pending IO.
2026 base::MessageLoop::current()->RunUntilIdle();
2028 if (memory_only_) {
2029 EXPECT_EQ(0, cache_->GetEntryCount());
2030 } else {
2031 if (5 == cache_->GetEntryCount()) {
2032 // Most likely we are waiting for the result of reading the sparse info
2033 // (it's always async on Posix so it is easy to miss). Unfortunately we
2034 // don't have any signal to watch for so we can only wait.
2035 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500));
2036 base::MessageLoop::current()->RunUntilIdle();
2038 EXPECT_EQ(0, cache_->GetEntryCount());
2042 TEST_F(DiskCacheEntryTest, DoomSparseEntry) {
2043 UseCurrentThread();
2044 InitCache();
2045 DoomSparseEntry();
2048 TEST_F(DiskCacheEntryTest, MemoryOnlyDoomSparseEntry) {
2049 SetMemoryOnlyMode();
2050 InitCache();
2051 DoomSparseEntry();
2054 // A CompletionCallback wrapper that deletes the cache from within the callback.
2055 // The way a CompletionCallback works means that all tasks (even new ones)
2056 // are executed by the message loop before returning to the caller so the only
2057 // way to simulate a race is to execute what we want on the callback.
2058 class SparseTestCompletionCallback: public net::TestCompletionCallback {
2059 public:
2060 explicit SparseTestCompletionCallback(scoped_ptr<disk_cache::Backend> cache)
2061 : cache_(cache.Pass()) {
2064 private:
2065 void SetResult(int result) override {
2066 cache_.reset();
2067 TestCompletionCallback::SetResult(result);
2070 scoped_ptr<disk_cache::Backend> cache_;
2071 DISALLOW_COPY_AND_ASSIGN(SparseTestCompletionCallback);
2074 // Tests that we don't crash when the backend is deleted while we are working
2075 // deleting the sub-entries of a sparse entry.
2076 TEST_F(DiskCacheEntryTest, DoomSparseEntry2) {
2077 UseCurrentThread();
2078 InitCache();
2079 std::string key("the key");
2080 disk_cache::Entry* entry;
2081 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2083 const int kSize = 4 * 1024;
2084 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2085 CacheTestFillBuffer(buf->data(), kSize, false);
2087 int64 offset = 1024;
2088 // Write to a bunch of ranges.
2089 for (int i = 0; i < 12; i++) {
2090 EXPECT_EQ(kSize,
2091 entry->WriteSparseData(
2092 offset, buf.get(), kSize, net::CompletionCallback()));
2093 offset *= 4;
2095 EXPECT_EQ(9, cache_->GetEntryCount());
2097 entry->Close();
2098 disk_cache::Backend* cache = cache_.get();
2099 SparseTestCompletionCallback cb(cache_.Pass());
2100 int rv = cache->DoomEntry(key, cb.callback());
2101 EXPECT_EQ(net::ERR_IO_PENDING, rv);
2102 EXPECT_EQ(net::OK, cb.WaitForResult());
2105 void DiskCacheEntryTest::PartialSparseEntry() {
2106 std::string key("the first key");
2107 disk_cache::Entry* entry;
2108 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2110 // We should be able to deal with IO that is not aligned to the block size
2111 // of a sparse entry, at least to write a big range without leaving holes.
2112 const int kSize = 4 * 1024;
2113 const int kSmallSize = 128;
2114 scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
2115 CacheTestFillBuffer(buf1->data(), kSize, false);
2117 // The first write is just to extend the entry. The third write occupies
2118 // a 1KB block partially, it may not be written internally depending on the
2119 // implementation.
2120 EXPECT_EQ(kSize, WriteSparseData(entry, 20000, buf1.get(), kSize));
2121 EXPECT_EQ(kSize, WriteSparseData(entry, 500, buf1.get(), kSize));
2122 EXPECT_EQ(kSmallSize,
2123 WriteSparseData(entry, 1080321, buf1.get(), kSmallSize));
2124 entry->Close();
2125 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2127 scoped_refptr<net::IOBuffer> buf2(new net::IOBuffer(kSize));
2128 memset(buf2->data(), 0, kSize);
2129 EXPECT_EQ(0, ReadSparseData(entry, 8000, buf2.get(), kSize));
2131 EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
2132 EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
2133 EXPECT_EQ(0, ReadSparseData(entry, 0, buf2.get(), kSize));
2135 // This read should not change anything.
2136 EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize));
2137 EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
2138 EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize));
2140 int rv;
2141 int64 start;
2142 net::TestCompletionCallback cb;
2143 if (memory_only_ || simple_cache_mode_) {
2144 rv = entry->GetAvailableRange(0, 600, &start, cb.callback());
2145 EXPECT_EQ(100, cb.GetResult(rv));
2146 EXPECT_EQ(500, start);
2147 } else {
2148 rv = entry->GetAvailableRange(0, 2048, &start, cb.callback());
2149 EXPECT_EQ(1024, cb.GetResult(rv));
2150 EXPECT_EQ(1024, start);
2152 rv = entry->GetAvailableRange(kSize, kSize, &start, cb.callback());
2153 EXPECT_EQ(500, cb.GetResult(rv));
2154 EXPECT_EQ(kSize, start);
2155 rv = entry->GetAvailableRange(20 * 1024, 10000, &start, cb.callback());
2156 EXPECT_EQ(3616, cb.GetResult(rv));
2157 EXPECT_EQ(20 * 1024, start);
2159 // 1. Query before a filled 1KB block.
2160 // 2. Query within a filled 1KB block.
2161 // 3. Query beyond a filled 1KB block.
2162 if (memory_only_ || simple_cache_mode_) {
2163 rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
2164 EXPECT_EQ(3496, cb.GetResult(rv));
2165 EXPECT_EQ(20000, start);
2166 } else {
2167 rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
2168 EXPECT_EQ(3016, cb.GetResult(rv));
2169 EXPECT_EQ(20480, start);
2171 rv = entry->GetAvailableRange(3073, kSize, &start, cb.callback());
2172 EXPECT_EQ(1523, cb.GetResult(rv));
2173 EXPECT_EQ(3073, start);
2174 rv = entry->GetAvailableRange(4600, kSize, &start, cb.callback());
2175 EXPECT_EQ(0, cb.GetResult(rv));
2176 EXPECT_EQ(4600, start);
2178 // Now make another write and verify that there is no hole in between.
2179 EXPECT_EQ(kSize, WriteSparseData(entry, 500 + kSize, buf1.get(), kSize));
2180 rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
2181 EXPECT_EQ(7 * 1024 + 500, cb.GetResult(rv));
2182 EXPECT_EQ(1024, start);
2183 EXPECT_EQ(kSize, ReadSparseData(entry, kSize, buf2.get(), kSize));
2184 EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
2185 EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500));
2187 entry->Close();
2190 TEST_F(DiskCacheEntryTest, PartialSparseEntry) {
2191 InitCache();
2192 PartialSparseEntry();
2195 TEST_F(DiskCacheEntryTest, MemoryPartialSparseEntry) {
2196 SetMemoryOnlyMode();
2197 InitCache();
2198 PartialSparseEntry();
2201 // Tests that corrupt sparse children are removed automatically.
2202 TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
2203 InitCache();
2204 std::string key("the first key");
2205 disk_cache::Entry* entry;
2206 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2208 const int kSize = 4 * 1024;
2209 scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
2210 CacheTestFillBuffer(buf1->data(), kSize, false);
2212 const int k1Meg = 1024 * 1024;
2213 EXPECT_EQ(kSize, WriteSparseData(entry, 8192, buf1.get(), kSize));
2214 EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
2215 EXPECT_EQ(kSize, WriteSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
2216 entry->Close();
2217 EXPECT_EQ(4, cache_->GetEntryCount());
2219 scoped_ptr<TestIterator> iter = CreateIterator();
2220 int count = 0;
2221 std::string child_key[2];
2222 while (iter->OpenNextEntry(&entry) == net::OK) {
2223 ASSERT_TRUE(entry != NULL);
2224 // Writing to an entry will alter the LRU list and invalidate the iterator.
2225 if (entry->GetKey() != key && count < 2)
2226 child_key[count++] = entry->GetKey();
2227 entry->Close();
2229 for (int i = 0; i < 2; i++) {
2230 ASSERT_EQ(net::OK, OpenEntry(child_key[i], &entry));
2231 // Overwrite the header's magic and signature.
2232 EXPECT_EQ(12, WriteData(entry, 2, 0, buf1.get(), 12, false));
2233 entry->Close();
2236 EXPECT_EQ(4, cache_->GetEntryCount());
2237 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2239 // Two children should be gone. One while reading and one while writing.
2240 EXPECT_EQ(0, ReadSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
2241 EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 16384, buf1.get(), kSize));
2242 EXPECT_EQ(0, ReadSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
2244 // We never touched this one.
2245 EXPECT_EQ(kSize, ReadSparseData(entry, 8192, buf1.get(), kSize));
2246 entry->Close();
2248 // We re-created one of the corrupt children.
2249 EXPECT_EQ(3, cache_->GetEntryCount());
2252 TEST_F(DiskCacheEntryTest, CancelSparseIO) {
2253 UseCurrentThread();
2254 InitCache();
2255 std::string key("the first key");
2256 disk_cache::Entry* entry;
2257 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2259 const int kSize = 40 * 1024;
2260 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2261 CacheTestFillBuffer(buf->data(), kSize, false);
2263 // This will open and write two "real" entries.
2264 net::TestCompletionCallback cb1, cb2, cb3, cb4, cb5;
2265 int rv = entry->WriteSparseData(
2266 1024 * 1024 - 4096, buf.get(), kSize, cb1.callback());
2267 EXPECT_EQ(net::ERR_IO_PENDING, rv);
2269 int64 offset = 0;
2270 rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
2271 rv = cb5.GetResult(rv);
2272 if (!cb1.have_result()) {
2273 // We may or may not have finished writing to the entry. If we have not,
2274 // we cannot start another operation at this time.
2275 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv);
2278 // We cancel the pending operation, and register multiple notifications.
2279 entry->CancelSparseIO();
2280 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb2.callback()));
2281 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb3.callback()));
2282 entry->CancelSparseIO(); // Should be a no op at this point.
2283 EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb4.callback()));
2285 if (!cb1.have_result()) {
2286 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
2287 entry->ReadSparseData(
2288 offset, buf.get(), kSize, net::CompletionCallback()));
2289 EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
2290 entry->WriteSparseData(
2291 offset, buf.get(), kSize, net::CompletionCallback()));
2294 // Now see if we receive all notifications. Note that we should not be able
2295 // to write everything (unless the timing of the system is really weird).
2296 rv = cb1.WaitForResult();
2297 EXPECT_TRUE(rv == 4096 || rv == kSize);
2298 EXPECT_EQ(net::OK, cb2.WaitForResult());
2299 EXPECT_EQ(net::OK, cb3.WaitForResult());
2300 EXPECT_EQ(net::OK, cb4.WaitForResult());
2302 rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
2303 EXPECT_EQ(0, cb5.GetResult(rv));
2304 entry->Close();
2307 // Tests that we perform sanity checks on an entry's key. Note that there are
2308 // other tests that exercise sanity checks by using saved corrupt files.
2309 TEST_F(DiskCacheEntryTest, KeySanityCheck) {
2310 UseCurrentThread();
2311 InitCache();
2312 std::string key("the first key");
2313 disk_cache::Entry* entry;
2314 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2316 disk_cache::EntryImpl* entry_impl =
2317 static_cast<disk_cache::EntryImpl*>(entry);
2318 disk_cache::EntryStore* store = entry_impl->entry()->Data();
2320 // We have reserved space for a short key (one block), let's say that the key
2321 // takes more than one block, and remove the NULLs after the actual key.
2322 store->key_len = 800;
2323 memset(store->key + key.size(), 'k', sizeof(store->key) - key.size());
2324 entry_impl->entry()->set_modified();
2325 entry->Close();
2327 // We have a corrupt entry. Now reload it. We should NOT read beyond the
2328 // allocated buffer here.
2329 ASSERT_NE(net::OK, OpenEntry(key, &entry));
2330 DisableIntegrityCheck();
2333 TEST_F(DiskCacheEntryTest, SimpleCacheInternalAsyncIO) {
2334 SetSimpleCacheMode();
2335 InitCache();
2336 InternalAsyncIO();
2339 TEST_F(DiskCacheEntryTest, SimpleCacheExternalAsyncIO) {
2340 SetSimpleCacheMode();
2341 InitCache();
2342 ExternalAsyncIO();
2345 TEST_F(DiskCacheEntryTest, SimpleCacheReleaseBuffer) {
2346 SetSimpleCacheMode();
2347 InitCache();
2348 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2349 EXPECT_EQ(net::OK, DoomAllEntries());
2350 ReleaseBuffer(i);
2354 TEST_F(DiskCacheEntryTest, SimpleCacheStreamAccess) {
2355 SetSimpleCacheMode();
2356 InitCache();
2357 StreamAccess();
2360 TEST_F(DiskCacheEntryTest, SimpleCacheGetKey) {
2361 SetSimpleCacheMode();
2362 InitCache();
2363 GetKey();
2366 TEST_F(DiskCacheEntryTest, SimpleCacheGetTimes) {
2367 SetSimpleCacheMode();
2368 InitCache();
2369 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2370 EXPECT_EQ(net::OK, DoomAllEntries());
2371 GetTimes(i);
2375 TEST_F(DiskCacheEntryTest, SimpleCacheGrowData) {
2376 SetSimpleCacheMode();
2377 InitCache();
2378 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2379 EXPECT_EQ(net::OK, DoomAllEntries());
2380 GrowData(i);
2384 TEST_F(DiskCacheEntryTest, SimpleCacheTruncateData) {
2385 SetSimpleCacheMode();
2386 InitCache();
2387 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2388 EXPECT_EQ(net::OK, DoomAllEntries());
2389 TruncateData(i);
2393 TEST_F(DiskCacheEntryTest, SimpleCacheZeroLengthIO) {
2394 SetSimpleCacheMode();
2395 InitCache();
2396 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2397 EXPECT_EQ(net::OK, DoomAllEntries());
2398 ZeroLengthIO(i);
2402 TEST_F(DiskCacheEntryTest, SimpleCacheSizeAtCreate) {
2403 SetSimpleCacheMode();
2404 InitCache();
2405 SizeAtCreate();
2408 TEST_F(DiskCacheEntryTest, SimpleCacheReuseExternalEntry) {
2409 SetSimpleCacheMode();
2410 SetMaxSize(200 * 1024);
2411 InitCache();
2412 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2413 EXPECT_EQ(net::OK, DoomAllEntries());
2414 ReuseEntry(20 * 1024, i);
2418 TEST_F(DiskCacheEntryTest, SimpleCacheReuseInternalEntry) {
2419 SetSimpleCacheMode();
2420 SetMaxSize(100 * 1024);
2421 InitCache();
2422 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2423 EXPECT_EQ(net::OK, DoomAllEntries());
2424 ReuseEntry(10 * 1024, i);
2428 TEST_F(DiskCacheEntryTest, SimpleCacheSizeChanges) {
2429 SetSimpleCacheMode();
2430 InitCache();
2431 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2432 EXPECT_EQ(net::OK, DoomAllEntries());
2433 SizeChanges(i);
2437 TEST_F(DiskCacheEntryTest, SimpleCacheInvalidData) {
2438 SetSimpleCacheMode();
2439 InitCache();
2440 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2441 EXPECT_EQ(net::OK, DoomAllEntries());
2442 InvalidData(i);
2446 TEST_F(DiskCacheEntryTest, SimpleCacheReadWriteDestroyBuffer) {
2447 // Proving that the test works well with optimistic operations enabled is
2448 // subtle, instead run only in APP_CACHE mode to disable optimistic
2449 // operations. Stream 0 always uses optimistic operations, so the test is not
2450 // run on stream 0.
2451 SetCacheType(net::APP_CACHE);
2452 SetSimpleCacheMode();
2453 InitCache();
2454 for (int i = 1; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2455 EXPECT_EQ(net::OK, DoomAllEntries());
2456 ReadWriteDestroyBuffer(i);
2460 TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntry) {
2461 SetSimpleCacheMode();
2462 InitCache();
2463 DoomNormalEntry();
2466 TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntryNextToOpenEntry) {
2467 SetSimpleCacheMode();
2468 InitCache();
2469 DoomEntryNextToOpenEntry();
2472 TEST_F(DiskCacheEntryTest, SimpleCacheDoomedEntry) {
2473 SetSimpleCacheMode();
2474 InitCache();
2475 // Stream 2 is excluded because the implementation does not support writing to
2476 // it on a doomed entry, if it was previously lazily omitted.
2477 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount - 1; ++i) {
2478 EXPECT_EQ(net::OK, DoomAllEntries());
2479 DoomedEntry(i);
2483 // Creates an entry with corrupted last byte in stream 0.
2484 // Requires SimpleCacheMode.
2485 bool DiskCacheEntryTest::SimpleCacheMakeBadChecksumEntry(const std::string& key,
2486 int* data_size) {
2487 disk_cache::Entry* entry = NULL;
2489 if (CreateEntry(key, &entry) != net::OK || !entry) {
2490 LOG(ERROR) << "Could not create entry";
2491 return false;
2494 const char data[] = "this is very good data";
2495 const int kDataSize = arraysize(data);
2496 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kDataSize));
2497 base::strlcpy(buffer->data(), data, kDataSize);
2499 EXPECT_EQ(kDataSize, WriteData(entry, 1, 0, buffer.get(), kDataSize, false));
2500 entry->Close();
2501 entry = NULL;
2503 // Corrupt the last byte of the data.
2504 base::FilePath entry_file0_path = cache_path_.AppendASCII(
2505 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
2506 base::File entry_file0(entry_file0_path,
2507 base::File::FLAG_WRITE | base::File::FLAG_OPEN);
2508 if (!entry_file0.IsValid())
2509 return false;
2511 int64 file_offset =
2512 sizeof(disk_cache::SimpleFileHeader) + key.size() + kDataSize - 2;
2513 EXPECT_EQ(1, entry_file0.Write(file_offset, "X", 1));
2514 *data_size = kDataSize;
2515 return true;
2518 // Tests that the simple cache can detect entries that have bad data.
2519 TEST_F(DiskCacheEntryTest, SimpleCacheBadChecksum) {
2520 SetSimpleCacheMode();
2521 InitCache();
2523 const char key[] = "the first key";
2524 int size_unused;
2525 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size_unused));
2527 disk_cache::Entry* entry = NULL;
2529 // Open the entry.
2530 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2531 ScopedEntryPtr entry_closer(entry);
2533 const int kReadBufferSize = 200;
2534 EXPECT_GE(kReadBufferSize, entry->GetDataSize(1));
2535 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
2536 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
2537 ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize));
2540 // Tests that an entry that has had an IO error occur can still be Doomed().
2541 TEST_F(DiskCacheEntryTest, SimpleCacheErrorThenDoom) {
2542 SetSimpleCacheMode();
2543 InitCache();
2545 const char key[] = "the first key";
2546 int size_unused;
2547 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size_unused));
2549 disk_cache::Entry* entry = NULL;
2551 // Open the entry, forcing an IO error.
2552 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2553 ScopedEntryPtr entry_closer(entry);
2555 const int kReadBufferSize = 200;
2556 EXPECT_GE(kReadBufferSize, entry->GetDataSize(1));
2557 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
2558 EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
2559 ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize));
2561 entry->Doom(); // Should not crash.
2564 bool TruncatePath(const base::FilePath& file_path, int64 length) {
2565 base::File file(file_path, base::File::FLAG_WRITE | base::File::FLAG_OPEN);
2566 if (!file.IsValid())
2567 return false;
2568 return file.SetLength(length);
2571 TEST_F(DiskCacheEntryTest, SimpleCacheNoEOF) {
2572 SetSimpleCacheMode();
2573 InitCache();
2575 const char key[] = "the first key";
2577 disk_cache::Entry* entry = NULL;
2578 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2579 disk_cache::Entry* null = NULL;
2580 EXPECT_NE(null, entry);
2581 entry->Close();
2582 entry = NULL;
2584 // Force the entry to flush to disk, so subsequent platform file operations
2585 // succed.
2586 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2587 entry->Close();
2588 entry = NULL;
2590 // Truncate the file such that the length isn't sufficient to have an EOF
2591 // record.
2592 int kTruncationBytes = -implicit_cast<int>(sizeof(disk_cache::SimpleFileEOF));
2593 const base::FilePath entry_path = cache_path_.AppendASCII(
2594 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
2595 const int64 invalid_size =
2596 disk_cache::simple_util::GetFileSizeFromKeyAndDataSize(key,
2597 kTruncationBytes);
2598 EXPECT_TRUE(TruncatePath(entry_path, invalid_size));
2599 EXPECT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
2600 DisableIntegrityCheck();
2603 TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsBasic) {
2604 // Test sequence:
2605 // Create, Write, Read, Close.
2606 SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
2607 SetSimpleCacheMode();
2608 InitCache();
2609 disk_cache::Entry* const null_entry = NULL;
2611 disk_cache::Entry* entry = NULL;
2612 EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2613 ASSERT_NE(null_entry, entry);
2614 ScopedEntryPtr entry_closer(entry);
2616 const int kBufferSize = 10;
2617 scoped_refptr<net::IOBufferWithSize> write_buffer(
2618 new net::IOBufferWithSize(kBufferSize));
2619 CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2620 EXPECT_EQ(
2621 write_buffer->size(),
2622 WriteData(entry, 1, 0, write_buffer.get(), write_buffer->size(), false));
2624 scoped_refptr<net::IOBufferWithSize> read_buffer(
2625 new net::IOBufferWithSize(kBufferSize));
2626 EXPECT_EQ(read_buffer->size(),
2627 ReadData(entry, 1, 0, read_buffer.get(), read_buffer->size()));
2630 TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsDontBlock) {
2631 // Test sequence:
2632 // Create, Write, Close.
2633 SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
2634 SetSimpleCacheMode();
2635 InitCache();
2636 disk_cache::Entry* const null_entry = NULL;
2638 MessageLoopHelper helper;
2639 CallbackTest create_callback(&helper, false);
2641 int expected_callback_runs = 0;
2642 const int kBufferSize = 10;
2643 scoped_refptr<net::IOBufferWithSize> write_buffer(
2644 new net::IOBufferWithSize(kBufferSize));
2646 disk_cache::Entry* entry = NULL;
2647 EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2648 ASSERT_NE(null_entry, entry);
2649 ScopedEntryPtr entry_closer(entry);
2651 CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2652 CallbackTest write_callback(&helper, false);
2653 int ret = entry->WriteData(
2656 write_buffer.get(),
2657 write_buffer->size(),
2658 base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
2659 false);
2660 ASSERT_EQ(net::ERR_IO_PENDING, ret);
2661 helper.WaitUntilCacheIoFinished(++expected_callback_runs);
2664 TEST_F(DiskCacheEntryTest,
2665 SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting) {
2666 // Test sequence:
2667 // Create, Write, Read, Close.
2668 SetCacheType(net::APP_CACHE); // APP_CACHE doesn't use optimistic operations.
2669 SetSimpleCacheMode();
2670 InitCache();
2671 disk_cache::Entry* const null_entry = NULL;
2672 MessageLoopHelper helper;
2674 disk_cache::Entry* entry = NULL;
2675 // Note that |entry| is only set once CreateEntry() completed which is why we
2676 // have to wait (i.e. use the helper CreateEntry() function).
2677 EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2678 ASSERT_NE(null_entry, entry);
2679 ScopedEntryPtr entry_closer(entry);
2681 const int kBufferSize = 10;
2682 scoped_refptr<net::IOBufferWithSize> write_buffer(
2683 new net::IOBufferWithSize(kBufferSize));
2684 CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2685 CallbackTest write_callback(&helper, false);
2686 int ret = entry->WriteData(
2689 write_buffer.get(),
2690 write_buffer->size(),
2691 base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
2692 false);
2693 EXPECT_EQ(net::ERR_IO_PENDING, ret);
2694 int expected_callback_runs = 1;
2696 scoped_refptr<net::IOBufferWithSize> read_buffer(
2697 new net::IOBufferWithSize(kBufferSize));
2698 CallbackTest read_callback(&helper, false);
2699 ret = entry->ReadData(
2702 read_buffer.get(),
2703 read_buffer->size(),
2704 base::Bind(&CallbackTest::Run, base::Unretained(&read_callback)));
2705 EXPECT_EQ(net::ERR_IO_PENDING, ret);
2706 ++expected_callback_runs;
2708 helper.WaitUntilCacheIoFinished(expected_callback_runs);
2709 ASSERT_EQ(read_buffer->size(), write_buffer->size());
2710 EXPECT_EQ(
2712 memcmp(read_buffer->data(), write_buffer->data(), read_buffer->size()));
2715 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic) {
2716 // Test sequence:
2717 // Create, Write, Read, Write, Read, Close.
2718 SetSimpleCacheMode();
2719 InitCache();
2720 disk_cache::Entry* null = NULL;
2721 const char key[] = "the first key";
2723 MessageLoopHelper helper;
2724 CallbackTest callback1(&helper, false);
2725 CallbackTest callback2(&helper, false);
2726 CallbackTest callback3(&helper, false);
2727 CallbackTest callback4(&helper, false);
2728 CallbackTest callback5(&helper, false);
2730 int expected = 0;
2731 const int kSize1 = 10;
2732 const int kSize2 = 20;
2733 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2734 scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
2735 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
2736 scoped_refptr<net::IOBuffer> buffer2_read(new net::IOBuffer(kSize2));
2737 CacheTestFillBuffer(buffer1->data(), kSize1, false);
2738 CacheTestFillBuffer(buffer2->data(), kSize2, false);
2740 disk_cache::Entry* entry = NULL;
2741 // Create is optimistic, must return OK.
2742 ASSERT_EQ(net::OK,
2743 cache_->CreateEntry(key, &entry,
2744 base::Bind(&CallbackTest::Run,
2745 base::Unretained(&callback1))));
2746 EXPECT_NE(null, entry);
2747 ScopedEntryPtr entry_closer(entry);
2749 // This write may or may not be optimistic (it depends if the previous
2750 // optimistic create already finished by the time we call the write here).
2751 int ret = entry->WriteData(
2754 buffer1.get(),
2755 kSize1,
2756 base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
2757 false);
2758 EXPECT_TRUE(kSize1 == ret || net::ERR_IO_PENDING == ret);
2759 if (net::ERR_IO_PENDING == ret)
2760 expected++;
2762 // This Read must not be optimistic, since we don't support that yet.
2763 EXPECT_EQ(net::ERR_IO_PENDING,
2764 entry->ReadData(
2767 buffer1_read.get(),
2768 kSize1,
2769 base::Bind(&CallbackTest::Run, base::Unretained(&callback3))));
2770 expected++;
2771 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
2772 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
2774 // At this point after waiting, the pending operations queue on the entry
2775 // should be empty, so the next Write operation must run as optimistic.
2776 EXPECT_EQ(kSize2,
2777 entry->WriteData(
2780 buffer2.get(),
2781 kSize2,
2782 base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
2783 false));
2785 // Lets do another read so we block until both the write and the read
2786 // operation finishes and we can then test for HasOneRef() below.
2787 EXPECT_EQ(net::ERR_IO_PENDING,
2788 entry->ReadData(
2791 buffer2_read.get(),
2792 kSize2,
2793 base::Bind(&CallbackTest::Run, base::Unretained(&callback5))));
2794 expected++;
2796 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
2797 EXPECT_EQ(0, memcmp(buffer2->data(), buffer2_read->data(), kSize2));
2799 // Check that we are not leaking.
2800 EXPECT_NE(entry, null);
2801 EXPECT_TRUE(
2802 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2805 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic2) {
2806 // Test sequence:
2807 // Create, Open, Close, Close.
2808 SetSimpleCacheMode();
2809 InitCache();
2810 disk_cache::Entry* null = NULL;
2811 const char key[] = "the first key";
2813 MessageLoopHelper helper;
2814 CallbackTest callback1(&helper, false);
2815 CallbackTest callback2(&helper, false);
2817 disk_cache::Entry* entry = NULL;
2818 ASSERT_EQ(net::OK,
2819 cache_->CreateEntry(key, &entry,
2820 base::Bind(&CallbackTest::Run,
2821 base::Unretained(&callback1))));
2822 EXPECT_NE(null, entry);
2823 ScopedEntryPtr entry_closer(entry);
2825 disk_cache::Entry* entry2 = NULL;
2826 ASSERT_EQ(net::ERR_IO_PENDING,
2827 cache_->OpenEntry(key, &entry2,
2828 base::Bind(&CallbackTest::Run,
2829 base::Unretained(&callback2))));
2830 ASSERT_TRUE(helper.WaitUntilCacheIoFinished(1));
2832 EXPECT_NE(null, entry2);
2833 EXPECT_EQ(entry, entry2);
2835 // We have to call close twice, since we called create and open above.
2836 entry->Close();
2838 // Check that we are not leaking.
2839 EXPECT_TRUE(
2840 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2843 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic3) {
2844 // Test sequence:
2845 // Create, Close, Open, Close.
2846 SetSimpleCacheMode();
2847 InitCache();
2848 disk_cache::Entry* null = NULL;
2849 const char key[] = "the first key";
2851 disk_cache::Entry* entry = NULL;
2852 ASSERT_EQ(net::OK,
2853 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2854 EXPECT_NE(null, entry);
2855 entry->Close();
2857 net::TestCompletionCallback cb;
2858 disk_cache::Entry* entry2 = NULL;
2859 ASSERT_EQ(net::ERR_IO_PENDING,
2860 cache_->OpenEntry(key, &entry2, cb.callback()));
2861 ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
2862 ScopedEntryPtr entry_closer(entry2);
2864 EXPECT_NE(null, entry2);
2865 EXPECT_EQ(entry, entry2);
2867 // Check that we are not leaking.
2868 EXPECT_TRUE(
2869 static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
2872 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic4) {
2873 // Test sequence:
2874 // Create, Close, Write, Open, Open, Close, Write, Read, Close.
2875 SetSimpleCacheMode();
2876 InitCache();
2877 disk_cache::Entry* null = NULL;
2878 const char key[] = "the first key";
2880 net::TestCompletionCallback cb;
2881 const int kSize1 = 10;
2882 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2883 CacheTestFillBuffer(buffer1->data(), kSize1, false);
2884 disk_cache::Entry* entry = NULL;
2886 ASSERT_EQ(net::OK,
2887 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2888 EXPECT_NE(null, entry);
2889 entry->Close();
2891 // Lets do a Write so we block until both the Close and the Write
2892 // operation finishes. Write must fail since we are writing in a closed entry.
2893 EXPECT_EQ(
2894 net::ERR_IO_PENDING,
2895 entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
2896 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(net::ERR_IO_PENDING));
2898 // Finish running the pending tasks so that we fully complete the close
2899 // operation and destroy the entry object.
2900 base::MessageLoop::current()->RunUntilIdle();
2902 // At this point the |entry| must have been destroyed, and called
2903 // RemoveSelfFromBackend().
2904 disk_cache::Entry* entry2 = NULL;
2905 ASSERT_EQ(net::ERR_IO_PENDING,
2906 cache_->OpenEntry(key, &entry2, cb.callback()));
2907 ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
2908 EXPECT_NE(null, entry2);
2910 disk_cache::Entry* entry3 = NULL;
2911 ASSERT_EQ(net::ERR_IO_PENDING,
2912 cache_->OpenEntry(key, &entry3, cb.callback()));
2913 ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
2914 EXPECT_NE(null, entry3);
2915 EXPECT_EQ(entry2, entry3);
2916 entry3->Close();
2918 // The previous Close doesn't actually closes the entry since we opened it
2919 // twice, so the next Write operation must succeed and it must be able to
2920 // perform it optimistically, since there is no operation running on this
2921 // entry.
2922 EXPECT_EQ(kSize1,
2923 entry2->WriteData(
2924 1, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
2926 // Lets do another read so we block until both the write and the read
2927 // operation finishes and we can then test for HasOneRef() below.
2928 EXPECT_EQ(net::ERR_IO_PENDING,
2929 entry2->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
2930 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
2932 // Check that we are not leaking.
2933 EXPECT_TRUE(
2934 static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
2935 entry2->Close();
2938 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic5) {
2939 // Test sequence:
2940 // Create, Doom, Write, Read, Close.
2941 SetSimpleCacheMode();
2942 InitCache();
2943 disk_cache::Entry* null = NULL;
2944 const char key[] = "the first key";
2946 net::TestCompletionCallback cb;
2947 const int kSize1 = 10;
2948 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2949 CacheTestFillBuffer(buffer1->data(), kSize1, false);
2950 disk_cache::Entry* entry = NULL;
2952 ASSERT_EQ(net::OK,
2953 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2954 EXPECT_NE(null, entry);
2955 ScopedEntryPtr entry_closer(entry);
2956 entry->Doom();
2958 EXPECT_EQ(
2959 net::ERR_IO_PENDING,
2960 entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
2961 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
2963 EXPECT_EQ(net::ERR_IO_PENDING,
2964 entry->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
2965 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
2967 // Check that we are not leaking.
2968 EXPECT_TRUE(
2969 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2972 TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic6) {
2973 // Test sequence:
2974 // Create, Write, Doom, Doom, Read, Doom, Close.
2975 SetSimpleCacheMode();
2976 InitCache();
2977 disk_cache::Entry* null = NULL;
2978 const char key[] = "the first key";
2980 net::TestCompletionCallback cb;
2981 const int kSize1 = 10;
2982 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2983 scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
2984 CacheTestFillBuffer(buffer1->data(), kSize1, false);
2985 disk_cache::Entry* entry = NULL;
2987 ASSERT_EQ(net::OK,
2988 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2989 EXPECT_NE(null, entry);
2990 ScopedEntryPtr entry_closer(entry);
2992 EXPECT_EQ(
2993 net::ERR_IO_PENDING,
2994 entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
2995 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
2997 entry->Doom();
2998 entry->Doom();
3000 // This Read must not be optimistic, since we don't support that yet.
3001 EXPECT_EQ(net::ERR_IO_PENDING,
3002 entry->ReadData(1, 0, buffer1_read.get(), kSize1, cb.callback()));
3003 EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3004 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
3006 entry->Doom();
3009 // Confirm that IO buffers are not referenced by the Simple Cache after a write
3010 // completes.
3011 TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticWriteReleases) {
3012 SetSimpleCacheMode();
3013 InitCache();
3015 const char key[] = "the first key";
3016 disk_cache::Entry* entry = NULL;
3018 // First, an optimistic create.
3019 ASSERT_EQ(net::OK,
3020 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3021 ASSERT_TRUE(entry);
3022 ScopedEntryPtr entry_closer(entry);
3024 const int kWriteSize = 512;
3025 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kWriteSize));
3026 EXPECT_TRUE(buffer1->HasOneRef());
3027 CacheTestFillBuffer(buffer1->data(), kWriteSize, false);
3029 // An optimistic write happens only when there is an empty queue of pending
3030 // operations. To ensure the queue is empty, we issue a write and wait until
3031 // it completes.
3032 EXPECT_EQ(kWriteSize,
3033 WriteData(entry, 1, 0, buffer1.get(), kWriteSize, false));
3034 EXPECT_TRUE(buffer1->HasOneRef());
3036 // Finally, we should perform an optimistic write and confirm that all
3037 // references to the IO buffer have been released.
3038 EXPECT_EQ(
3039 kWriteSize,
3040 entry->WriteData(
3041 1, 0, buffer1.get(), kWriteSize, net::CompletionCallback(), false));
3042 EXPECT_TRUE(buffer1->HasOneRef());
3045 TEST_F(DiskCacheEntryTest, SimpleCacheCreateDoomRace) {
3046 // Test sequence:
3047 // Create, Doom, Write, Close, Check files are not on disk anymore.
3048 SetSimpleCacheMode();
3049 InitCache();
3050 disk_cache::Entry* null = NULL;
3051 const char key[] = "the first key";
3053 net::TestCompletionCallback cb;
3054 const int kSize1 = 10;
3055 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
3056 CacheTestFillBuffer(buffer1->data(), kSize1, false);
3057 disk_cache::Entry* entry = NULL;
3059 ASSERT_EQ(net::OK,
3060 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3061 EXPECT_NE(null, entry);
3063 EXPECT_EQ(net::ERR_IO_PENDING, cache_->DoomEntry(key, cb.callback()));
3064 EXPECT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
3066 EXPECT_EQ(
3067 kSize1,
3068 entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false));
3070 entry->Close();
3072 // Finish running the pending tasks so that we fully complete the close
3073 // operation and destroy the entry object.
3074 base::MessageLoop::current()->RunUntilIdle();
3076 for (int i = 0; i < disk_cache::kSimpleEntryFileCount; ++i) {
3077 base::FilePath entry_file_path = cache_path_.AppendASCII(
3078 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i));
3079 base::File::Info info;
3080 EXPECT_FALSE(base::GetFileInfo(entry_file_path, &info));
3084 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateRace) {
3085 // This test runs as APP_CACHE to make operations more synchronous. Test
3086 // sequence:
3087 // Create, Doom, Create.
3088 SetCacheType(net::APP_CACHE);
3089 SetSimpleCacheMode();
3090 InitCache();
3091 disk_cache::Entry* null = NULL;
3092 const char key[] = "the first key";
3094 net::TestCompletionCallback create_callback;
3096 disk_cache::Entry* entry1 = NULL;
3097 ASSERT_EQ(net::OK,
3098 create_callback.GetResult(
3099 cache_->CreateEntry(key, &entry1, create_callback.callback())));
3100 ScopedEntryPtr entry1_closer(entry1);
3101 EXPECT_NE(null, entry1);
3103 net::TestCompletionCallback doom_callback;
3104 EXPECT_EQ(net::ERR_IO_PENDING,
3105 cache_->DoomEntry(key, doom_callback.callback()));
3107 disk_cache::Entry* entry2 = NULL;
3108 ASSERT_EQ(net::OK,
3109 create_callback.GetResult(
3110 cache_->CreateEntry(key, &entry2, create_callback.callback())));
3111 ScopedEntryPtr entry2_closer(entry2);
3112 EXPECT_EQ(net::OK, doom_callback.GetResult(net::ERR_IO_PENDING));
3115 TEST_F(DiskCacheEntryTest, SimpleCacheDoomDoom) {
3116 // Test sequence:
3117 // Create, Doom, Create, Doom (1st entry), Open.
3118 SetSimpleCacheMode();
3119 InitCache();
3120 disk_cache::Entry* null = NULL;
3122 const char key[] = "the first key";
3124 disk_cache::Entry* entry1 = NULL;
3125 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3126 ScopedEntryPtr entry1_closer(entry1);
3127 EXPECT_NE(null, entry1);
3129 EXPECT_EQ(net::OK, DoomEntry(key));
3131 disk_cache::Entry* entry2 = NULL;
3132 ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3133 ScopedEntryPtr entry2_closer(entry2);
3134 EXPECT_NE(null, entry2);
3136 // Redundantly dooming entry1 should not delete entry2.
3137 disk_cache::SimpleEntryImpl* simple_entry1 =
3138 static_cast<disk_cache::SimpleEntryImpl*>(entry1);
3139 net::TestCompletionCallback cb;
3140 EXPECT_EQ(net::OK,
3141 cb.GetResult(simple_entry1->DoomEntry(cb.callback())));
3143 disk_cache::Entry* entry3 = NULL;
3144 ASSERT_EQ(net::OK, OpenEntry(key, &entry3));
3145 ScopedEntryPtr entry3_closer(entry3);
3146 EXPECT_NE(null, entry3);
3149 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateDoom) {
3150 // Test sequence:
3151 // Create, Doom, Create, Doom.
3152 SetSimpleCacheMode();
3153 InitCache();
3155 disk_cache::Entry* null = NULL;
3157 const char key[] = "the first key";
3159 disk_cache::Entry* entry1 = NULL;
3160 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3161 ScopedEntryPtr entry1_closer(entry1);
3162 EXPECT_NE(null, entry1);
3164 entry1->Doom();
3166 disk_cache::Entry* entry2 = NULL;
3167 ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3168 ScopedEntryPtr entry2_closer(entry2);
3169 EXPECT_NE(null, entry2);
3171 entry2->Doom();
3173 // This test passes if it doesn't crash.
3176 TEST_F(DiskCacheEntryTest, SimpleCacheDoomCloseCreateCloseOpen) {
3177 // Test sequence: Create, Doom, Close, Create, Close, Open.
3178 SetSimpleCacheMode();
3179 InitCache();
3181 disk_cache::Entry* null = NULL;
3183 const char key[] = "this is a key";
3185 disk_cache::Entry* entry1 = NULL;
3186 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3187 ScopedEntryPtr entry1_closer(entry1);
3188 EXPECT_NE(null, entry1);
3190 entry1->Doom();
3191 entry1_closer.reset();
3192 entry1 = NULL;
3194 disk_cache::Entry* entry2 = NULL;
3195 ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3196 ScopedEntryPtr entry2_closer(entry2);
3197 EXPECT_NE(null, entry2);
3199 entry2_closer.reset();
3200 entry2 = NULL;
3202 disk_cache::Entry* entry3 = NULL;
3203 ASSERT_EQ(net::OK, OpenEntry(key, &entry3));
3204 ScopedEntryPtr entry3_closer(entry3);
3205 EXPECT_NE(null, entry3);
3208 // Checks that an optimistic Create would fail later on a racing Open.
3209 TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticCreateFailsOnOpen) {
3210 SetSimpleCacheMode();
3211 InitCache();
3213 // Create a corrupt file in place of a future entry. Optimistic create should
3214 // initially succeed, but realize later that creation failed.
3215 const std::string key = "the key";
3216 net::TestCompletionCallback cb;
3217 disk_cache::Entry* entry = NULL;
3218 disk_cache::Entry* entry2 = NULL;
3220 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3221 key, cache_path_));
3222 EXPECT_EQ(net::OK, cache_->CreateEntry(key, &entry, cb.callback()));
3223 ASSERT_TRUE(entry);
3224 ScopedEntryPtr entry_closer(entry);
3225 ASSERT_NE(net::OK, OpenEntry(key, &entry2));
3227 // Check that we are not leaking.
3228 EXPECT_TRUE(
3229 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3231 DisableIntegrityCheck();
3234 // Tests that old entries are evicted while new entries remain in the index.
3235 // This test relies on non-mandatory properties of the simple Cache Backend:
3236 // LRU eviction, specific values of high-watermark and low-watermark etc.
3237 // When changing the eviction algorithm, the test will have to be re-engineered.
3238 TEST_F(DiskCacheEntryTest, SimpleCacheEvictOldEntries) {
3239 const int kMaxSize = 200 * 1024;
3240 const int kWriteSize = kMaxSize / 10;
3241 const int kNumExtraEntries = 12;
3242 SetSimpleCacheMode();
3243 SetMaxSize(kMaxSize);
3244 InitCache();
3246 std::string key1("the first key");
3247 disk_cache::Entry* entry;
3248 ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
3249 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kWriteSize));
3250 CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3251 EXPECT_EQ(kWriteSize,
3252 WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3253 entry->Close();
3254 AddDelay();
3256 std::string key2("the key prefix");
3257 for (int i = 0; i < kNumExtraEntries; i++) {
3258 if (i == kNumExtraEntries - 2) {
3259 // Create a distinct timestamp for the last two entries. These entries
3260 // will be checked for outliving the eviction.
3261 AddDelay();
3263 ASSERT_EQ(net::OK, CreateEntry(key2 + base::StringPrintf("%d", i), &entry));
3264 ScopedEntryPtr entry_closer(entry);
3265 EXPECT_EQ(kWriteSize,
3266 WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3269 // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using
3270 // the internal knowledge about |SimpleBackendImpl|.
3271 ASSERT_NE(net::OK, OpenEntry(key1, &entry))
3272 << "Should have evicted the old entry";
3273 for (int i = 0; i < 2; i++) {
3274 int entry_no = kNumExtraEntries - i - 1;
3275 // Generally there is no guarantee that at this point the backround eviction
3276 // is finished. We are testing the positive case, i.e. when the eviction
3277 // never reaches this entry, should be non-flaky.
3278 ASSERT_EQ(net::OK, OpenEntry(key2 + base::StringPrintf("%d", entry_no),
3279 &entry))
3280 << "Should not have evicted fresh entry " << entry_no;
3281 entry->Close();
3285 // Tests that if a read and a following in-flight truncate are both in progress
3286 // simultaniously that they both can occur successfully. See
3287 // http://crbug.com/239223
3288 TEST_F(DiskCacheEntryTest, SimpleCacheInFlightTruncate) {
3289 SetSimpleCacheMode();
3290 InitCache();
3292 const char key[] = "the first key";
3294 const int kBufferSize = 1024;
3295 scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
3296 CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
3298 disk_cache::Entry* entry = NULL;
3299 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3301 EXPECT_EQ(kBufferSize,
3302 WriteData(entry, 1, 0, write_buffer.get(), kBufferSize, false));
3303 entry->Close();
3304 entry = NULL;
3306 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3307 ScopedEntryPtr entry_closer(entry);
3309 MessageLoopHelper helper;
3310 int expected = 0;
3312 // Make a short read.
3313 const int kReadBufferSize = 512;
3314 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
3315 CallbackTest read_callback(&helper, false);
3316 EXPECT_EQ(net::ERR_IO_PENDING,
3317 entry->ReadData(1,
3319 read_buffer.get(),
3320 kReadBufferSize,
3321 base::Bind(&CallbackTest::Run,
3322 base::Unretained(&read_callback))));
3323 ++expected;
3325 // Truncate the entry to the length of that read.
3326 scoped_refptr<net::IOBuffer>
3327 truncate_buffer(new net::IOBuffer(kReadBufferSize));
3328 CacheTestFillBuffer(truncate_buffer->data(), kReadBufferSize, false);
3329 CallbackTest truncate_callback(&helper, false);
3330 EXPECT_EQ(net::ERR_IO_PENDING,
3331 entry->WriteData(1,
3333 truncate_buffer.get(),
3334 kReadBufferSize,
3335 base::Bind(&CallbackTest::Run,
3336 base::Unretained(&truncate_callback)),
3337 true));
3338 ++expected;
3340 // Wait for both the read and truncation to finish, and confirm that both
3341 // succeeded.
3342 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
3343 EXPECT_EQ(kReadBufferSize, read_callback.last_result());
3344 EXPECT_EQ(kReadBufferSize, truncate_callback.last_result());
3345 EXPECT_EQ(0,
3346 memcmp(write_buffer->data(), read_buffer->data(), kReadBufferSize));
3349 // Tests that if a write and a read dependant on it are both in flight
3350 // simultaneiously that they both can complete successfully without erroneous
3351 // early returns. See http://crbug.com/239223
3352 TEST_F(DiskCacheEntryTest, SimpleCacheInFlightRead) {
3353 SetSimpleCacheMode();
3354 InitCache();
3356 const char key[] = "the first key";
3357 disk_cache::Entry* entry = NULL;
3358 ASSERT_EQ(net::OK,
3359 cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3360 ScopedEntryPtr entry_closer(entry);
3362 const int kBufferSize = 1024;
3363 scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
3364 CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
3366 MessageLoopHelper helper;
3367 int expected = 0;
3369 CallbackTest write_callback(&helper, false);
3370 EXPECT_EQ(net::ERR_IO_PENDING,
3371 entry->WriteData(1,
3373 write_buffer.get(),
3374 kBufferSize,
3375 base::Bind(&CallbackTest::Run,
3376 base::Unretained(&write_callback)),
3377 true));
3378 ++expected;
3380 scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kBufferSize));
3381 CallbackTest read_callback(&helper, false);
3382 EXPECT_EQ(net::ERR_IO_PENDING,
3383 entry->ReadData(1,
3385 read_buffer.get(),
3386 kBufferSize,
3387 base::Bind(&CallbackTest::Run,
3388 base::Unretained(&read_callback))));
3389 ++expected;
3391 EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
3392 EXPECT_EQ(kBufferSize, write_callback.last_result());
3393 EXPECT_EQ(kBufferSize, read_callback.last_result());
3394 EXPECT_EQ(0, memcmp(write_buffer->data(), read_buffer->data(), kBufferSize));
3397 TEST_F(DiskCacheEntryTest, SimpleCacheOpenCreateRaceWithNoIndex) {
3398 SetSimpleCacheMode();
3399 DisableSimpleCacheWaitForIndex();
3400 DisableIntegrityCheck();
3401 InitCache();
3403 // Assume the index is not initialized, which is likely, since we are blocking
3404 // the IO thread from executing the index finalization step.
3405 disk_cache::Entry* entry1;
3406 net::TestCompletionCallback cb1;
3407 disk_cache::Entry* entry2;
3408 net::TestCompletionCallback cb2;
3409 int rv1 = cache_->OpenEntry("key", &entry1, cb1.callback());
3410 int rv2 = cache_->CreateEntry("key", &entry2, cb2.callback());
3412 EXPECT_EQ(net::ERR_FAILED, cb1.GetResult(rv1));
3413 ASSERT_EQ(net::OK, cb2.GetResult(rv2));
3414 entry2->Close();
3417 // Checks that reading two entries simultaneously does not discard a CRC check.
3418 // TODO(pasko): make it work with Simple Cache.
3419 TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheMultipleReadersCheckCRC) {
3420 SetSimpleCacheMode();
3421 InitCache();
3423 const char key[] = "key";
3425 int size;
3426 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size));
3428 scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
3429 scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
3431 // Advance the first reader a little.
3432 disk_cache::Entry* entry = NULL;
3433 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3434 EXPECT_EQ(1, ReadData(entry, 0, 0, read_buffer1.get(), 1));
3436 // Make the second reader pass the point where the first one is, and close.
3437 disk_cache::Entry* entry2 = NULL;
3438 EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
3439 EXPECT_EQ(1, ReadData(entry2, 0, 0, read_buffer2.get(), 1));
3440 EXPECT_EQ(1, ReadData(entry2, 0, 1, read_buffer2.get(), 1));
3441 entry2->Close();
3443 // Read the data till the end should produce an error.
3444 EXPECT_GT(0, ReadData(entry, 0, 1, read_buffer1.get(), size));
3445 entry->Close();
3446 DisableIntegrityCheck();
3449 // Checking one more scenario of overlapped reading of a bad entry.
3450 // Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of
3451 // last two reads.
3452 TEST_F(DiskCacheEntryTest, SimpleCacheMultipleReadersCheckCRC2) {
3453 SetSimpleCacheMode();
3454 InitCache();
3456 const char key[] = "key";
3457 int size;
3458 ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size));
3460 scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
3461 scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
3463 // Advance the first reader a little.
3464 disk_cache::Entry* entry = NULL;
3465 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3466 ScopedEntryPtr entry_closer(entry);
3467 EXPECT_EQ(1, ReadData(entry, 1, 0, read_buffer1.get(), 1));
3469 // Advance the 2nd reader by the same amount.
3470 disk_cache::Entry* entry2 = NULL;
3471 EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
3472 ScopedEntryPtr entry2_closer(entry2);
3473 EXPECT_EQ(1, ReadData(entry2, 1, 0, read_buffer2.get(), 1));
3475 // Continue reading 1st.
3476 EXPECT_GT(0, ReadData(entry, 1, 1, read_buffer1.get(), size));
3478 // This read should fail as well because we have previous read failures.
3479 EXPECT_GT(0, ReadData(entry2, 1, 1, read_buffer2.get(), 1));
3480 DisableIntegrityCheck();
3483 // Test if we can sequentially read each subset of the data until all the data
3484 // is read, then the CRC is calculated correctly and the reads are successful.
3485 TEST_F(DiskCacheEntryTest, SimpleCacheReadCombineCRC) {
3486 // Test sequence:
3487 // Create, Write, Read (first half of data), Read (second half of data),
3488 // Close.
3489 SetSimpleCacheMode();
3490 InitCache();
3491 disk_cache::Entry* null = NULL;
3492 const char key[] = "the first key";
3494 const int kHalfSize = 200;
3495 const int kSize = 2 * kHalfSize;
3496 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3497 CacheTestFillBuffer(buffer1->data(), kSize, false);
3498 disk_cache::Entry* entry = NULL;
3500 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3501 EXPECT_NE(null, entry);
3503 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
3504 entry->Close();
3506 disk_cache::Entry* entry2 = NULL;
3507 ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
3508 EXPECT_EQ(entry, entry2);
3510 // Read the first half of the data.
3511 int offset = 0;
3512 int buf_len = kHalfSize;
3513 scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(buf_len));
3514 EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read1.get(), buf_len));
3515 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), buf_len));
3517 // Read the second half of the data.
3518 offset = buf_len;
3519 buf_len = kHalfSize;
3520 scoped_refptr<net::IOBuffer> buffer1_read2(new net::IOBuffer(buf_len));
3521 EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read2.get(), buf_len));
3522 char* buffer1_data = buffer1->data() + offset;
3523 EXPECT_EQ(0, memcmp(buffer1_data, buffer1_read2->data(), buf_len));
3525 // Check that we are not leaking.
3526 EXPECT_NE(entry, null);
3527 EXPECT_TRUE(
3528 static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3529 entry->Close();
3530 entry = NULL;
3533 // Test if we can write the data not in sequence and read correctly. In
3534 // this case the CRC will not be present.
3535 TEST_F(DiskCacheEntryTest, SimpleCacheNonSequentialWrite) {
3536 // Test sequence:
3537 // Create, Write (second half of data), Write (first half of data), Read,
3538 // Close.
3539 SetSimpleCacheMode();
3540 InitCache();
3541 disk_cache::Entry* null = NULL;
3542 const char key[] = "the first key";
3544 const int kHalfSize = 200;
3545 const int kSize = 2 * kHalfSize;
3546 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3547 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3548 CacheTestFillBuffer(buffer1->data(), kSize, false);
3549 char* buffer1_data = buffer1->data() + kHalfSize;
3550 memcpy(buffer2->data(), buffer1_data, kHalfSize);
3552 disk_cache::Entry* entry = NULL;
3553 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3554 entry->Close();
3555 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3556 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3557 EXPECT_NE(null, entry);
3559 int offset = kHalfSize;
3560 int buf_len = kHalfSize;
3562 EXPECT_EQ(buf_len,
3563 WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3564 offset = 0;
3565 buf_len = kHalfSize;
3566 EXPECT_EQ(buf_len,
3567 WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3568 entry->Close();
3570 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3572 scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
3573 EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
3574 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kSize));
3575 // Check that we are not leaking.
3576 ASSERT_NE(entry, null);
3577 EXPECT_TRUE(static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3578 entry->Close();
3582 // Test that changing stream1 size does not affect stream0 (stream0 and stream1
3583 // are stored in the same file in Simple Cache).
3584 TEST_F(DiskCacheEntryTest, SimpleCacheStream1SizeChanges) {
3585 SetSimpleCacheMode();
3586 InitCache();
3587 disk_cache::Entry* entry = NULL;
3588 const char key[] = "the key";
3589 const int kSize = 100;
3590 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3591 scoped_refptr<net::IOBuffer> buffer_read(new net::IOBuffer(kSize));
3592 CacheTestFillBuffer(buffer->data(), kSize, false);
3594 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3595 EXPECT_TRUE(entry);
3597 // Write something into stream0.
3598 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
3599 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3600 EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3601 entry->Close();
3603 // Extend stream1.
3604 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3605 int stream1_size = 100;
3606 EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, false));
3607 EXPECT_EQ(stream1_size, entry->GetDataSize(1));
3608 entry->Close();
3610 // Check that stream0 data has not been modified and that the EOF record for
3611 // stream 0 contains a crc.
3612 // The entry needs to be reopened before checking the crc: Open will perform
3613 // the synchronization with the previous Close. This ensures the EOF records
3614 // have been written to disk before we attempt to read them independently.
3615 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3616 base::FilePath entry_file0_path = cache_path_.AppendASCII(
3617 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3618 base::File entry_file0(entry_file0_path,
3619 base::File::FLAG_READ | base::File::FLAG_OPEN);
3620 ASSERT_TRUE(entry_file0.IsValid());
3622 int data_size[disk_cache::kSimpleEntryStreamCount] = {kSize, stream1_size, 0};
3623 int sparse_data_size = 0;
3624 disk_cache::SimpleEntryStat entry_stat(
3625 base::Time::Now(), base::Time::Now(), data_size, sparse_data_size);
3626 int eof_offset = entry_stat.GetEOFOffsetInFile(key, 0);
3627 disk_cache::SimpleFileEOF eof_record;
3628 ASSERT_EQ(static_cast<int>(sizeof(eof_record)),
3629 entry_file0.Read(eof_offset, reinterpret_cast<char*>(&eof_record),
3630 sizeof(eof_record)));
3631 EXPECT_EQ(disk_cache::kSimpleFinalMagicNumber, eof_record.final_magic_number);
3632 EXPECT_TRUE((eof_record.flags & disk_cache::SimpleFileEOF::FLAG_HAS_CRC32) ==
3633 disk_cache::SimpleFileEOF::FLAG_HAS_CRC32);
3635 buffer_read = new net::IOBuffer(kSize);
3636 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3637 EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3639 // Shrink stream1.
3640 stream1_size = 50;
3641 EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, true));
3642 EXPECT_EQ(stream1_size, entry->GetDataSize(1));
3643 entry->Close();
3645 // Check that stream0 data has not been modified.
3646 buffer_read = new net::IOBuffer(kSize);
3647 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3648 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3649 EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3650 entry->Close();
3651 entry = NULL;
3654 // Test that writing within the range for which the crc has already been
3655 // computed will properly invalidate the computed crc.
3656 TEST_F(DiskCacheEntryTest, SimpleCacheCRCRewrite) {
3657 // Test sequence:
3658 // Create, Write (big data), Write (small data in the middle), Close.
3659 // Open, Read (all), Close.
3660 SetSimpleCacheMode();
3661 InitCache();
3662 disk_cache::Entry* null = NULL;
3663 const char key[] = "the first key";
3665 const int kHalfSize = 200;
3666 const int kSize = 2 * kHalfSize;
3667 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3668 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kHalfSize));
3669 CacheTestFillBuffer(buffer1->data(), kSize, false);
3670 CacheTestFillBuffer(buffer2->data(), kHalfSize, false);
3672 disk_cache::Entry* entry = NULL;
3673 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3674 EXPECT_NE(null, entry);
3675 entry->Close();
3677 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3678 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3679 int offset = 0;
3680 int buf_len = kSize;
3682 EXPECT_EQ(buf_len,
3683 WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3684 offset = kHalfSize;
3685 buf_len = kHalfSize;
3686 EXPECT_EQ(buf_len,
3687 WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3688 entry->Close();
3690 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3692 scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
3693 EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
3694 EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kHalfSize));
3695 EXPECT_EQ(
3697 memcmp(buffer2->data(), buffer1_read1->data() + kHalfSize, kHalfSize));
3699 entry->Close();
3703 bool DiskCacheEntryTest::SimpleCacheThirdStreamFileExists(const char* key) {
3704 int third_stream_file_index =
3705 disk_cache::simple_util::GetFileIndexFromStreamIndex(2);
3706 base::FilePath third_stream_file_path = cache_path_.AppendASCII(
3707 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(
3708 key, third_stream_file_index));
3709 return PathExists(third_stream_file_path);
3712 void DiskCacheEntryTest::SyncDoomEntry(const char* key) {
3713 net::TestCompletionCallback callback;
3714 cache_->DoomEntry(key, callback.callback());
3715 callback.WaitForResult();
3718 // Check that a newly-created entry with no third-stream writes omits the
3719 // third stream file.
3720 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream1) {
3721 SetSimpleCacheMode();
3722 InitCache();
3724 const char key[] = "key";
3726 disk_cache::Entry* entry;
3728 // Create entry and close without writing: third stream file should be
3729 // omitted, since the stream is empty.
3730 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3731 entry->Close();
3732 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3734 SyncDoomEntry(key);
3735 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3738 // Check that a newly-created entry with only a single zero-offset, zero-length
3739 // write omits the third stream file.
3740 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream2) {
3741 SetSimpleCacheMode();
3742 InitCache();
3744 const int kHalfSize = 8;
3745 const int kSize = kHalfSize * 2;
3746 const char key[] = "key";
3747 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3748 CacheTestFillBuffer(buffer->data(), kHalfSize, false);
3750 disk_cache::Entry* entry;
3752 // Create entry, write empty buffer to third stream, and close: third stream
3753 // should still be omitted, since the entry ignores writes that don't modify
3754 // data or change the length.
3755 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3756 EXPECT_EQ(0, WriteData(entry, 2, 0, buffer.get(), 0, true));
3757 entry->Close();
3758 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3760 SyncDoomEntry(key);
3761 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3764 // Check that we can read back data written to the third stream.
3765 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream3) {
3766 SetSimpleCacheMode();
3767 InitCache();
3769 const int kHalfSize = 8;
3770 const int kSize = kHalfSize * 2;
3771 const char key[] = "key";
3772 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3773 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3774 CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
3776 disk_cache::Entry* entry;
3778 // Create entry, write data to third stream, and close: third stream should
3779 // not be omitted, since it contains data. Re-open entry and ensure there
3780 // are that many bytes in the third stream.
3781 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3782 EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1.get(), kHalfSize, true));
3783 entry->Close();
3784 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3786 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3787 EXPECT_EQ(kHalfSize, ReadData(entry, 2, 0, buffer2.get(), kSize));
3788 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kHalfSize));
3789 entry->Close();
3790 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3792 SyncDoomEntry(key);
3793 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3796 // Check that we remove the third stream file upon opening an entry and finding
3797 // the third stream empty. (This is the upgrade path for entries written
3798 // before the third stream was optional.)
3799 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream4) {
3800 SetSimpleCacheMode();
3801 InitCache();
3803 const int kHalfSize = 8;
3804 const int kSize = kHalfSize * 2;
3805 const char key[] = "key";
3806 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3807 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3808 CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
3810 disk_cache::Entry* entry;
3812 // Create entry, write data to third stream, truncate third stream back to
3813 // empty, and close: third stream will not initially be omitted, since entry
3814 // creates the file when the first significant write comes in, and only
3815 // removes it on open if it is empty. Reopen, ensure that the file is
3816 // deleted, and that there's no data in the third stream.
3817 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3818 EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1.get(), kHalfSize, true));
3819 EXPECT_EQ(0, WriteData(entry, 2, 0, buffer1.get(), 0, true));
3820 entry->Close();
3821 EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3823 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3824 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3825 EXPECT_EQ(0, ReadData(entry, 2, 0, buffer2.get(), kSize));
3826 entry->Close();
3827 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3829 SyncDoomEntry(key);
3830 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3833 // Check that we don't accidentally create the third stream file once the entry
3834 // has been doomed.
3835 TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream5) {
3836 SetSimpleCacheMode();
3837 InitCache();
3839 const int kHalfSize = 8;
3840 const int kSize = kHalfSize * 2;
3841 const char key[] = "key";
3842 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3843 CacheTestFillBuffer(buffer->data(), kHalfSize, false);
3845 disk_cache::Entry* entry;
3847 // Create entry, doom entry, write data to third stream, and close: third
3848 // stream should not exist. (Note: We don't care if the write fails, just
3849 // that it doesn't cause the file to be created on disk.)
3850 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3851 entry->Doom();
3852 WriteData(entry, 2, 0, buffer.get(), kHalfSize, true);
3853 entry->Close();
3854 EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3857 // There could be a race between Doom and an optimistic write.
3858 TEST_F(DiskCacheEntryTest, SimpleCacheDoomOptimisticWritesRace) {
3859 // Test sequence:
3860 // Create, first Write, second Write, Close.
3861 // Open, Close.
3862 SetSimpleCacheMode();
3863 InitCache();
3864 disk_cache::Entry* null = NULL;
3865 const char key[] = "the first key";
3867 const int kSize = 200;
3868 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3869 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3870 CacheTestFillBuffer(buffer1->data(), kSize, false);
3871 CacheTestFillBuffer(buffer2->data(), kSize, false);
3873 // The race only happens on stream 1 and stream 2.
3874 for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3875 ASSERT_EQ(net::OK, DoomAllEntries());
3876 disk_cache::Entry* entry = NULL;
3878 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3879 EXPECT_NE(null, entry);
3880 entry->Close();
3881 entry = NULL;
3883 ASSERT_EQ(net::OK, DoomAllEntries());
3884 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3885 EXPECT_NE(null, entry);
3887 int offset = 0;
3888 int buf_len = kSize;
3889 // This write should not be optimistic (since create is).
3890 EXPECT_EQ(buf_len,
3891 WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3893 offset = kSize;
3894 // This write should be optimistic.
3895 EXPECT_EQ(buf_len,
3896 WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3897 entry->Close();
3899 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3900 EXPECT_NE(null, entry);
3902 entry->Close();
3903 entry = NULL;
3907 // Tests for a regression in crbug.com/317138 , in which deleting an already
3908 // doomed entry was removing the active entry from the index.
3909 TEST_F(DiskCacheEntryTest, SimpleCachePreserveActiveEntries) {
3910 SetSimpleCacheMode();
3911 InitCache();
3913 disk_cache::Entry* null = NULL;
3915 const char key[] = "this is a key";
3917 disk_cache::Entry* entry1 = NULL;
3918 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3919 ScopedEntryPtr entry1_closer(entry1);
3920 EXPECT_NE(null, entry1);
3921 entry1->Doom();
3923 disk_cache::Entry* entry2 = NULL;
3924 ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3925 ScopedEntryPtr entry2_closer(entry2);
3926 EXPECT_NE(null, entry2);
3927 entry2_closer.reset();
3929 // Closing then reopening entry2 insures that entry2 is serialized, and so
3930 // it can be opened from files without error.
3931 entry2 = NULL;
3932 ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
3933 EXPECT_NE(null, entry2);
3934 entry2_closer.reset(entry2);
3936 scoped_refptr<disk_cache::SimpleEntryImpl>
3937 entry1_refptr = static_cast<disk_cache::SimpleEntryImpl*>(entry1);
3939 // If crbug.com/317138 has regressed, this will remove |entry2| from
3940 // the backend's |active_entries_| while |entry2| is still alive and its
3941 // files are still on disk.
3942 entry1_closer.reset();
3943 entry1 = NULL;
3945 // Close does not have a callback. However, we need to be sure the close is
3946 // finished before we continue the test. We can take advantage of how the ref
3947 // counting of a SimpleEntryImpl works to fake out a callback: When the
3948 // last Close() call is made to an entry, an IO operation is sent to the
3949 // synchronous entry to close the platform files. This IO operation holds a
3950 // ref pointer to the entry, which expires when the operation is done. So,
3951 // we take a refpointer, and watch the SimpleEntry object until it has only
3952 // one ref; this indicates the IO operation is complete.
3953 while (!entry1_refptr->HasOneRef()) {
3954 base::PlatformThread::YieldCurrentThread();
3955 base::MessageLoop::current()->RunUntilIdle();
3957 entry1_refptr = NULL;
3959 // In the bug case, this new entry ends up being a duplicate object pointing
3960 // at the same underlying files.
3961 disk_cache::Entry* entry3 = NULL;
3962 EXPECT_EQ(net::OK, OpenEntry(key, &entry3));
3963 ScopedEntryPtr entry3_closer(entry3);
3964 EXPECT_NE(null, entry3);
3966 // The test passes if these two dooms do not crash.
3967 entry2->Doom();
3968 entry3->Doom();
3971 TEST_F(DiskCacheEntryTest, SimpleCacheBasicSparseIO) {
3972 SetSimpleCacheMode();
3973 InitCache();
3974 BasicSparseIO();
3977 TEST_F(DiskCacheEntryTest, SimpleCacheHugeSparseIO) {
3978 SetSimpleCacheMode();
3979 InitCache();
3980 HugeSparseIO();
3983 TEST_F(DiskCacheEntryTest, SimpleCacheGetAvailableRange) {
3984 SetSimpleCacheMode();
3985 InitCache();
3986 GetAvailableRange();
3989 TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheCouldBeSparse) {
3990 SetSimpleCacheMode();
3991 InitCache();
3992 CouldBeSparse();
3995 TEST_F(DiskCacheEntryTest, SimpleCacheUpdateSparseEntry) {
3996 SetSimpleCacheMode();
3997 InitCache();
3998 UpdateSparseEntry();
4001 TEST_F(DiskCacheEntryTest, SimpleCacheDoomSparseEntry) {
4002 SetSimpleCacheMode();
4003 InitCache();
4004 DoomSparseEntry();
4007 TEST_F(DiskCacheEntryTest, SimpleCachePartialSparseEntry) {
4008 SetSimpleCacheMode();
4009 InitCache();
4010 PartialSparseEntry();
4013 TEST_F(DiskCacheEntryTest, SimpleCacheTruncateLargeSparseFile) {
4014 const int kSize = 1024;
4016 SetSimpleCacheMode();
4017 // An entry is allowed sparse data 1/10 the size of the cache, so this size
4018 // allows for one |kSize|-sized range plus overhead, but not two ranges.
4019 SetMaxSize(kSize * 15);
4020 InitCache();
4022 const char key[] = "key";
4023 disk_cache::Entry* null = NULL;
4024 disk_cache::Entry* entry;
4025 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
4026 EXPECT_NE(null, entry);
4028 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
4029 CacheTestFillBuffer(buffer->data(), kSize, false);
4030 net::TestCompletionCallback callback;
4031 int ret;
4033 // Verify initial conditions.
4034 ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4035 EXPECT_EQ(0, callback.GetResult(ret));
4037 ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
4038 EXPECT_EQ(0, callback.GetResult(ret));
4040 // Write a range and make sure it reads back.
4041 ret = entry->WriteSparseData(0, buffer.get(), kSize, callback.callback());
4042 EXPECT_EQ(kSize, callback.GetResult(ret));
4044 ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4045 EXPECT_EQ(kSize, callback.GetResult(ret));
4047 // Write another range and make sure it reads back.
4048 ret = entry->WriteSparseData(kSize, buffer.get(), kSize, callback.callback());
4049 EXPECT_EQ(kSize, callback.GetResult(ret));
4051 ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
4052 EXPECT_EQ(kSize, callback.GetResult(ret));
4054 // Make sure the first range was removed when the second was written.
4055 ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4056 EXPECT_EQ(0, callback.GetResult(ret));
4058 entry->Close();