Windows should animate when they are about to get docked at screen edges.
[chromium-blink-merge.git] / net / disk_cache / backend_unittest.cc
blobbc48a2eb203536819c4b67ed7b2123b1e9ba448f
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/file_util.h"
7 #include "base/metrics/field_trial.h"
8 #include "base/port.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
12 #include "base/threading/platform_thread.h"
13 #include "base/threading/thread_restrictions.h"
14 #include "net/base/cache_type.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/net_errors.h"
17 #include "net/base/test_completion_callback.h"
18 #include "net/disk_cache/backend_impl.h"
19 #include "net/disk_cache/cache_util.h"
20 #include "net/disk_cache/disk_cache_test_base.h"
21 #include "net/disk_cache/disk_cache_test_util.h"
22 #include "net/disk_cache/entry_impl.h"
23 #include "net/disk_cache/experiments.h"
24 #include "net/disk_cache/histogram_macros.h"
25 #include "net/disk_cache/mapped_file.h"
26 #include "net/disk_cache/mem_backend_impl.h"
27 #include "net/disk_cache/simple/simple_backend_impl.h"
28 #include "net/disk_cache/simple/simple_entry_format.h"
29 #include "net/disk_cache/simple/simple_test_util.h"
30 #include "net/disk_cache/simple/simple_util.h"
31 #include "net/disk_cache/tracing_cache_backend.h"
32 #include "testing/gtest/include/gtest/gtest.h"
34 #if defined(OS_WIN)
35 #include "base/win/scoped_handle.h"
36 #endif
38 using base::Time;
40 namespace {
42 const char kExistingEntryKey[] = "existing entry key";
44 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
45 const base::Thread& cache_thread,
46 base::FilePath& cache_path) {
47 net::TestCompletionCallback cb;
49 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
50 cache_path, cache_thread.message_loop_proxy(), NULL));
51 int rv = cache->Init(cb.callback());
52 if (cb.GetResult(rv) != net::OK)
53 return scoped_ptr<disk_cache::BackendImpl>();
55 disk_cache::Entry* entry = NULL;
56 rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
57 if (cb.GetResult(rv) != net::OK)
58 return scoped_ptr<disk_cache::BackendImpl>();
59 entry->Close();
61 return cache.Pass();
64 } // namespace
66 // Tests that can run with different types of caches.
67 class DiskCacheBackendTest : public DiskCacheTestWithCache {
68 protected:
69 void BackendBasics();
70 void BackendKeying();
71 void BackendShutdownWithPendingFileIO(bool fast);
72 void BackendShutdownWithPendingIO(bool fast);
73 void BackendShutdownWithPendingCreate(bool fast);
74 void BackendSetSize();
75 void BackendLoad();
76 void BackendChain();
77 void BackendValidEntry();
78 void BackendInvalidEntry();
79 void BackendInvalidEntryRead();
80 void BackendInvalidEntryWithLoad();
81 void BackendTrimInvalidEntry();
82 void BackendTrimInvalidEntry2();
83 void BackendEnumerations();
84 void BackendEnumerations2();
85 void BackendInvalidEntryEnumeration();
86 void BackendFixEnumerators();
87 void BackendDoomRecent();
89 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
90 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
91 // There are 4 entries after doomed_start and 2 after doomed_end.
92 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
94 void BackendDoomBetween();
95 void BackendTransaction(const std::string& name, int num_entries, bool load);
96 void BackendRecoverInsert();
97 void BackendRecoverRemove();
98 void BackendRecoverWithEviction();
99 void BackendInvalidEntry2();
100 void BackendInvalidEntry3();
101 void BackendInvalidEntry7();
102 void BackendInvalidEntry8();
103 void BackendInvalidEntry9(bool eviction);
104 void BackendInvalidEntry10(bool eviction);
105 void BackendInvalidEntry11(bool eviction);
106 void BackendTrimInvalidEntry12();
107 void BackendDoomAll();
108 void BackendDoomAll2();
109 void BackendInvalidRankings();
110 void BackendInvalidRankings2();
111 void BackendDisable();
112 void BackendDisable2();
113 void BackendDisable3();
114 void BackendDisable4();
115 void TracingBackendBasics();
117 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
118 bool EnumerateAndMatchKeys(int max_to_open,
119 void** iter,
120 std::set<std::string>* keys_to_match,
121 size_t* count);
124 void DiskCacheBackendTest::BackendBasics() {
125 InitCache();
126 disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
127 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
128 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
129 ASSERT_TRUE(NULL != entry1);
130 entry1->Close();
131 entry1 = NULL;
133 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
134 ASSERT_TRUE(NULL != entry1);
135 entry1->Close();
136 entry1 = NULL;
138 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
139 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
140 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
141 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
142 ASSERT_TRUE(NULL != entry1);
143 ASSERT_TRUE(NULL != entry2);
144 EXPECT_EQ(2, cache_->GetEntryCount());
146 disk_cache::Entry* entry3 = NULL;
147 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3));
148 ASSERT_TRUE(NULL != entry3);
149 EXPECT_TRUE(entry2 == entry3);
150 EXPECT_EQ(2, cache_->GetEntryCount());
152 EXPECT_EQ(net::OK, DoomEntry("some other key"));
153 EXPECT_EQ(1, cache_->GetEntryCount());
154 entry1->Close();
155 entry2->Close();
156 entry3->Close();
158 EXPECT_EQ(net::OK, DoomEntry("the first key"));
159 EXPECT_EQ(0, cache_->GetEntryCount());
161 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
162 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
163 entry1->Doom();
164 entry1->Close();
165 EXPECT_EQ(net::OK, DoomEntry("some other key"));
166 EXPECT_EQ(0, cache_->GetEntryCount());
167 entry2->Close();
170 TEST_F(DiskCacheBackendTest, Basics) {
171 BackendBasics();
174 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
175 SetNewEviction();
176 BackendBasics();
179 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
180 SetMemoryOnlyMode();
181 BackendBasics();
184 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
185 SetCacheType(net::APP_CACHE);
186 BackendBasics();
189 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
190 SetCacheType(net::SHADER_CACHE);
191 BackendBasics();
194 void DiskCacheBackendTest::BackendKeying() {
195 InitCache();
196 const char* kName1 = "the first key";
197 const char* kName2 = "the first Key";
198 disk_cache::Entry *entry1, *entry2;
199 ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1));
201 ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2));
202 EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
203 entry2->Close();
205 char buffer[30];
206 base::strlcpy(buffer, kName1, arraysize(buffer));
207 ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2));
208 EXPECT_TRUE(entry1 == entry2);
209 entry2->Close();
211 base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
212 ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2));
213 EXPECT_TRUE(entry1 == entry2);
214 entry2->Close();
216 base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3);
217 ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2));
218 EXPECT_TRUE(entry1 == entry2);
219 entry2->Close();
221 // Now verify long keys.
222 char buffer2[20000];
223 memset(buffer2, 's', sizeof(buffer2));
224 buffer2[1023] = '\0';
225 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
226 entry2->Close();
228 buffer2[1023] = 'g';
229 buffer2[19999] = '\0';
230 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
231 entry2->Close();
232 entry1->Close();
235 TEST_F(DiskCacheBackendTest, Keying) {
236 BackendKeying();
239 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
240 SetNewEviction();
241 BackendKeying();
244 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
245 SetMemoryOnlyMode();
246 BackendKeying();
249 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
250 SetCacheType(net::APP_CACHE);
251 BackendKeying();
254 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
255 SetCacheType(net::SHADER_CACHE);
256 BackendKeying();
259 TEST_F(DiskCacheTest, CreateBackend) {
260 net::TestCompletionCallback cb;
263 ASSERT_TRUE(CleanupCacheDir());
264 base::Thread cache_thread("CacheThread");
265 ASSERT_TRUE(cache_thread.StartWithOptions(
266 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
268 // Test the private factory method(s).
269 scoped_ptr<disk_cache::Backend> cache;
270 cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
271 ASSERT_TRUE(cache.get());
272 cache.reset();
274 // Now test the public API.
275 int rv =
276 disk_cache::CreateCacheBackend(net::DISK_CACHE,
277 net::CACHE_BACKEND_DEFAULT,
278 cache_path_,
280 false,
281 cache_thread.message_loop_proxy().get(),
282 NULL,
283 &cache,
284 cb.callback());
285 ASSERT_EQ(net::OK, cb.GetResult(rv));
286 ASSERT_TRUE(cache.get());
287 cache.reset();
289 rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
290 net::CACHE_BACKEND_DEFAULT,
291 base::FilePath(), 0,
292 false, NULL, NULL, &cache,
293 cb.callback());
294 ASSERT_EQ(net::OK, cb.GetResult(rv));
295 ASSERT_TRUE(cache.get());
296 cache.reset();
299 base::MessageLoop::current()->RunUntilIdle();
302 // Tests that |BackendImpl| fails to initialize with a missing file.
303 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
304 ASSERT_TRUE(CopyTestCache("bad_entry"));
305 base::FilePath filename = cache_path_.AppendASCII("data_1");
306 base::DeleteFile(filename, false);
307 base::Thread cache_thread("CacheThread");
308 ASSERT_TRUE(cache_thread.StartWithOptions(
309 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
310 net::TestCompletionCallback cb;
312 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
313 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
314 cache_path_, cache_thread.message_loop_proxy().get(), NULL));
315 int rv = cache->Init(cb.callback());
316 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
317 base::ThreadRestrictions::SetIOAllowed(prev);
319 cache.reset();
320 DisableIntegrityCheck();
323 TEST_F(DiskCacheBackendTest, ExternalFiles) {
324 InitCache();
325 // First, let's create a file on the folder.
326 base::FilePath filename = cache_path_.AppendASCII("f_000001");
328 const int kSize = 50;
329 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
330 CacheTestFillBuffer(buffer1->data(), kSize, false);
331 ASSERT_EQ(kSize, file_util::WriteFile(filename, buffer1->data(), kSize));
333 // Now let's create a file with the cache.
334 disk_cache::Entry* entry;
335 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
336 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
337 entry->Close();
339 // And verify that the first file is still there.
340 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
341 ASSERT_EQ(kSize, file_util::ReadFile(filename, buffer2->data(), kSize));
342 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
345 // Tests that we deal with file-level pending operations at destruction time.
346 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
347 net::TestCompletionCallback cb;
348 int rv;
351 ASSERT_TRUE(CleanupCacheDir());
352 base::Thread cache_thread("CacheThread");
353 ASSERT_TRUE(cache_thread.StartWithOptions(
354 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
356 uint32 flags = disk_cache::kNoBuffering;
357 if (!fast)
358 flags |= disk_cache::kNoRandom;
360 UseCurrentThread();
361 CreateBackend(flags, NULL);
363 disk_cache::EntryImpl* entry;
364 rv = cache_->CreateEntry(
365 "some key", reinterpret_cast<disk_cache::Entry**>(&entry),
366 cb.callback());
367 ASSERT_EQ(net::OK, cb.GetResult(rv));
369 const int kSize = 25000;
370 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
371 CacheTestFillBuffer(buffer->data(), kSize, false);
373 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
374 // We are using the current thread as the cache thread because we want to
375 // be able to call directly this method to make sure that the OS (instead
376 // of us switching thread) is returning IO pending.
377 rv =
378 entry->WriteDataImpl(0, i, buffer.get(), kSize, cb.callback(), false);
379 if (rv == net::ERR_IO_PENDING)
380 break;
381 EXPECT_EQ(kSize, rv);
384 // Don't call Close() to avoid going through the queue or we'll deadlock
385 // waiting for the operation to finish.
386 entry->Release();
388 // The cache destructor will see one pending operation here.
389 cache_.reset();
391 if (rv == net::ERR_IO_PENDING) {
392 if (fast)
393 EXPECT_FALSE(cb.have_result());
394 else
395 EXPECT_TRUE(cb.have_result());
399 base::MessageLoop::current()->RunUntilIdle();
401 #if defined(OS_WIN)
402 // Wait for the actual operation to complete, or we'll keep a file handle that
403 // may cause issues later. Note that on Posix systems even though this test
404 // uses a single thread, the actual IO is posted to a worker thread and the
405 // cache destructor breaks the link to reach cb when the operation completes.
406 rv = cb.GetResult(rv);
407 #endif
410 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
411 BackendShutdownWithPendingFileIO(false);
414 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
415 // builds because they contain a lot of intentional memory leaks.
416 // The wrapper scripts used to run tests under Valgrind Memcheck and
417 // Heapchecker will also disable these tests under those tools. See:
418 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
419 // tools/heapcheck/net_unittests.gtest-heapcheck.txt
420 #if !defined(LEAK_SANITIZER)
421 // We'll be leaking from this test.
422 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
423 // The integrity test sets kNoRandom so there's a version mismatch if we don't
424 // force new eviction.
425 SetNewEviction();
426 BackendShutdownWithPendingFileIO(true);
428 #endif
430 // Tests that we deal with background-thread pending operations.
431 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
432 net::TestCompletionCallback cb;
435 ASSERT_TRUE(CleanupCacheDir());
436 base::Thread cache_thread("CacheThread");
437 ASSERT_TRUE(cache_thread.StartWithOptions(
438 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
440 uint32 flags = disk_cache::kNoBuffering;
441 if (!fast)
442 flags |= disk_cache::kNoRandom;
444 CreateBackend(flags, &cache_thread);
446 disk_cache::Entry* entry;
447 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
448 ASSERT_EQ(net::OK, cb.GetResult(rv));
450 entry->Close();
452 // The cache destructor will see one pending operation here.
453 cache_.reset();
456 base::MessageLoop::current()->RunUntilIdle();
459 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
460 BackendShutdownWithPendingIO(false);
463 #if !defined(LEAK_SANITIZER)
464 // We'll be leaking from this test.
465 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
466 // The integrity test sets kNoRandom so there's a version mismatch if we don't
467 // force new eviction.
468 SetNewEviction();
469 BackendShutdownWithPendingIO(true);
471 #endif
473 // Tests that we deal with create-type pending operations.
474 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
475 net::TestCompletionCallback cb;
478 ASSERT_TRUE(CleanupCacheDir());
479 base::Thread cache_thread("CacheThread");
480 ASSERT_TRUE(cache_thread.StartWithOptions(
481 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
483 disk_cache::BackendFlags flags =
484 fast ? disk_cache::kNone : disk_cache::kNoRandom;
485 CreateBackend(flags, &cache_thread);
487 disk_cache::Entry* entry;
488 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
489 ASSERT_EQ(net::ERR_IO_PENDING, rv);
491 cache_.reset();
492 EXPECT_FALSE(cb.have_result());
495 base::MessageLoop::current()->RunUntilIdle();
498 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
499 BackendShutdownWithPendingCreate(false);
502 #if !defined(LEAK_SANITIZER)
503 // We'll be leaking an entry from this test.
504 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
505 // The integrity test sets kNoRandom so there's a version mismatch if we don't
506 // force new eviction.
507 SetNewEviction();
508 BackendShutdownWithPendingCreate(true);
510 #endif
512 TEST_F(DiskCacheTest, TruncatedIndex) {
513 ASSERT_TRUE(CleanupCacheDir());
514 base::FilePath index = cache_path_.AppendASCII("index");
515 ASSERT_EQ(5, file_util::WriteFile(index, "hello", 5));
517 base::Thread cache_thread("CacheThread");
518 ASSERT_TRUE(cache_thread.StartWithOptions(
519 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
520 net::TestCompletionCallback cb;
522 scoped_ptr<disk_cache::Backend> backend;
523 int rv =
524 disk_cache::CreateCacheBackend(net::DISK_CACHE,
525 net::CACHE_BACKEND_BLOCKFILE,
526 cache_path_,
528 false,
529 cache_thread.message_loop_proxy().get(),
530 NULL,
531 &backend,
532 cb.callback());
533 ASSERT_NE(net::OK, cb.GetResult(rv));
535 ASSERT_FALSE(backend);
538 void DiskCacheBackendTest::BackendSetSize() {
539 const int cache_size = 0x10000; // 64 kB
540 SetMaxSize(cache_size);
541 InitCache();
543 std::string first("some key");
544 std::string second("something else");
545 disk_cache::Entry* entry;
546 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
548 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
549 memset(buffer->data(), 0, cache_size);
550 EXPECT_EQ(cache_size / 10,
551 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
552 << "normal file";
554 EXPECT_EQ(net::ERR_FAILED,
555 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
556 << "file size above the limit";
558 // By doubling the total size, we make this file cacheable.
559 SetMaxSize(cache_size * 2);
560 EXPECT_EQ(cache_size / 5,
561 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
563 // Let's fill up the cache!.
564 SetMaxSize(cache_size * 10);
565 EXPECT_EQ(cache_size * 3 / 4,
566 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
567 entry->Close();
568 FlushQueueForTest();
570 SetMaxSize(cache_size);
572 // The cache is 95% full.
574 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
575 EXPECT_EQ(cache_size / 10,
576 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
578 disk_cache::Entry* entry2;
579 ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
580 EXPECT_EQ(cache_size / 10,
581 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
582 entry2->Close(); // This will trigger the cache trim.
584 EXPECT_NE(net::OK, OpenEntry(first, &entry2));
586 FlushQueueForTest(); // Make sure that we are done trimming the cache.
587 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
589 entry->Close();
590 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
591 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
592 entry->Close();
595 TEST_F(DiskCacheBackendTest, SetSize) {
596 BackendSetSize();
599 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
600 SetNewEviction();
601 BackendSetSize();
604 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
605 SetMemoryOnlyMode();
606 BackendSetSize();
609 void DiskCacheBackendTest::BackendLoad() {
610 InitCache();
611 int seed = static_cast<int>(Time::Now().ToInternalValue());
612 srand(seed);
614 disk_cache::Entry* entries[100];
615 for (int i = 0; i < 100; i++) {
616 std::string key = GenerateKey(true);
617 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
619 EXPECT_EQ(100, cache_->GetEntryCount());
621 for (int i = 0; i < 100; i++) {
622 int source1 = rand() % 100;
623 int source2 = rand() % 100;
624 disk_cache::Entry* temp = entries[source1];
625 entries[source1] = entries[source2];
626 entries[source2] = temp;
629 for (int i = 0; i < 100; i++) {
630 disk_cache::Entry* entry;
631 ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry));
632 EXPECT_TRUE(entry == entries[i]);
633 entry->Close();
634 entries[i]->Doom();
635 entries[i]->Close();
637 FlushQueueForTest();
638 EXPECT_EQ(0, cache_->GetEntryCount());
641 TEST_F(DiskCacheBackendTest, Load) {
642 // Work with a tiny index table (16 entries)
643 SetMask(0xf);
644 SetMaxSize(0x100000);
645 BackendLoad();
648 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
649 SetNewEviction();
650 // Work with a tiny index table (16 entries)
651 SetMask(0xf);
652 SetMaxSize(0x100000);
653 BackendLoad();
656 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
657 SetMaxSize(0x100000);
658 SetMemoryOnlyMode();
659 BackendLoad();
662 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
663 SetCacheType(net::APP_CACHE);
664 // Work with a tiny index table (16 entries)
665 SetMask(0xf);
666 SetMaxSize(0x100000);
667 BackendLoad();
670 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
671 SetCacheType(net::SHADER_CACHE);
672 // Work with a tiny index table (16 entries)
673 SetMask(0xf);
674 SetMaxSize(0x100000);
675 BackendLoad();
678 // Tests the chaining of an entry to the current head.
679 void DiskCacheBackendTest::BackendChain() {
680 SetMask(0x1); // 2-entry table.
681 SetMaxSize(0x3000); // 12 kB.
682 InitCache();
684 disk_cache::Entry* entry;
685 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
686 entry->Close();
687 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry));
688 entry->Close();
691 TEST_F(DiskCacheBackendTest, Chain) {
692 BackendChain();
695 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
696 SetNewEviction();
697 BackendChain();
700 TEST_F(DiskCacheBackendTest, AppCacheChain) {
701 SetCacheType(net::APP_CACHE);
702 BackendChain();
705 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
706 SetCacheType(net::SHADER_CACHE);
707 BackendChain();
710 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
711 SetNewEviction();
712 InitCache();
714 disk_cache::Entry* entry;
715 for (int i = 0; i < 100; i++) {
716 std::string name(base::StringPrintf("Key %d", i));
717 ASSERT_EQ(net::OK, CreateEntry(name, &entry));
718 entry->Close();
719 if (i < 90) {
720 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
721 ASSERT_EQ(net::OK, OpenEntry(name, &entry));
722 entry->Close();
726 // The first eviction must come from list 1 (10% limit), the second must come
727 // from list 0.
728 TrimForTest(false);
729 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
730 TrimForTest(false);
731 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
733 // Double check that we still have the list tails.
734 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry));
735 entry->Close();
736 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry));
737 entry->Close();
740 // Before looking for invalid entries, let's check a valid entry.
741 void DiskCacheBackendTest::BackendValidEntry() {
742 InitCache();
744 std::string key("Some key");
745 disk_cache::Entry* entry;
746 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
748 const int kSize = 50;
749 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
750 memset(buffer1->data(), 0, kSize);
751 base::strlcpy(buffer1->data(), "And the data to save", kSize);
752 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
753 entry->Close();
754 SimulateCrash();
756 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
758 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
759 memset(buffer2->data(), 0, kSize);
760 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
761 entry->Close();
762 EXPECT_STREQ(buffer1->data(), buffer2->data());
765 TEST_F(DiskCacheBackendTest, ValidEntry) {
766 BackendValidEntry();
769 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
770 SetNewEviction();
771 BackendValidEntry();
774 // The same logic of the previous test (ValidEntry), but this time force the
775 // entry to be invalid, simulating a crash in the middle.
776 // We'll be leaking memory from this test.
777 void DiskCacheBackendTest::BackendInvalidEntry() {
778 InitCache();
780 std::string key("Some key");
781 disk_cache::Entry* entry;
782 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
784 const int kSize = 50;
785 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
786 memset(buffer->data(), 0, kSize);
787 base::strlcpy(buffer->data(), "And the data to save", kSize);
788 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
789 SimulateCrash();
791 EXPECT_NE(net::OK, OpenEntry(key, &entry));
792 EXPECT_EQ(0, cache_->GetEntryCount());
795 #if !defined(LEAK_SANITIZER)
796 // We'll be leaking memory from this test.
797 TEST_F(DiskCacheBackendTest, InvalidEntry) {
798 BackendInvalidEntry();
801 // We'll be leaking memory from this test.
802 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
803 SetNewEviction();
804 BackendInvalidEntry();
807 // We'll be leaking memory from this test.
808 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
809 SetCacheType(net::APP_CACHE);
810 BackendInvalidEntry();
813 // We'll be leaking memory from this test.
814 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
815 SetCacheType(net::SHADER_CACHE);
816 BackendInvalidEntry();
819 // Almost the same test, but this time crash the cache after reading an entry.
820 // We'll be leaking memory from this test.
821 void DiskCacheBackendTest::BackendInvalidEntryRead() {
822 InitCache();
824 std::string key("Some key");
825 disk_cache::Entry* entry;
826 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
828 const int kSize = 50;
829 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
830 memset(buffer->data(), 0, kSize);
831 base::strlcpy(buffer->data(), "And the data to save", kSize);
832 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
833 entry->Close();
834 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
835 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
837 SimulateCrash();
839 if (type_ == net::APP_CACHE) {
840 // Reading an entry and crashing should not make it dirty.
841 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
842 EXPECT_EQ(1, cache_->GetEntryCount());
843 entry->Close();
844 } else {
845 EXPECT_NE(net::OK, OpenEntry(key, &entry));
846 EXPECT_EQ(0, cache_->GetEntryCount());
850 // We'll be leaking memory from this test.
851 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
852 BackendInvalidEntryRead();
855 // We'll be leaking memory from this test.
856 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
857 SetNewEviction();
858 BackendInvalidEntryRead();
861 // We'll be leaking memory from this test.
862 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
863 SetCacheType(net::APP_CACHE);
864 BackendInvalidEntryRead();
867 // We'll be leaking memory from this test.
868 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
869 SetCacheType(net::SHADER_CACHE);
870 BackendInvalidEntryRead();
873 // We'll be leaking memory from this test.
874 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
875 // Work with a tiny index table (16 entries)
876 SetMask(0xf);
877 SetMaxSize(0x100000);
878 InitCache();
880 int seed = static_cast<int>(Time::Now().ToInternalValue());
881 srand(seed);
883 const int kNumEntries = 100;
884 disk_cache::Entry* entries[kNumEntries];
885 for (int i = 0; i < kNumEntries; i++) {
886 std::string key = GenerateKey(true);
887 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
889 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
891 for (int i = 0; i < kNumEntries; i++) {
892 int source1 = rand() % kNumEntries;
893 int source2 = rand() % kNumEntries;
894 disk_cache::Entry* temp = entries[source1];
895 entries[source1] = entries[source2];
896 entries[source2] = temp;
899 std::string keys[kNumEntries];
900 for (int i = 0; i < kNumEntries; i++) {
901 keys[i] = entries[i]->GetKey();
902 if (i < kNumEntries / 2)
903 entries[i]->Close();
906 SimulateCrash();
908 for (int i = kNumEntries / 2; i < kNumEntries; i++) {
909 disk_cache::Entry* entry;
910 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
913 for (int i = 0; i < kNumEntries / 2; i++) {
914 disk_cache::Entry* entry;
915 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry));
916 entry->Close();
919 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
922 // We'll be leaking memory from this test.
923 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
924 BackendInvalidEntryWithLoad();
927 // We'll be leaking memory from this test.
928 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
929 SetNewEviction();
930 BackendInvalidEntryWithLoad();
933 // We'll be leaking memory from this test.
934 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
935 SetCacheType(net::APP_CACHE);
936 BackendInvalidEntryWithLoad();
939 // We'll be leaking memory from this test.
940 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
941 SetCacheType(net::SHADER_CACHE);
942 BackendInvalidEntryWithLoad();
945 // We'll be leaking memory from this test.
946 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
947 const int kSize = 0x3000; // 12 kB
948 SetMaxSize(kSize * 10);
949 InitCache();
951 std::string first("some key");
952 std::string second("something else");
953 disk_cache::Entry* entry;
954 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
956 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
957 memset(buffer->data(), 0, kSize);
958 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
960 // Simulate a crash.
961 SimulateCrash();
963 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
964 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
966 EXPECT_EQ(2, cache_->GetEntryCount());
967 SetMaxSize(kSize);
968 entry->Close(); // Trim the cache.
969 FlushQueueForTest();
971 // If we evicted the entry in less than 20mS, we have one entry in the cache;
972 // if it took more than that, we posted a task and we'll delete the second
973 // entry too.
974 base::MessageLoop::current()->RunUntilIdle();
976 // This may be not thread-safe in general, but for now it's OK so add some
977 // ThreadSanitizer annotations to ignore data races on cache_.
978 // See http://crbug.com/55970
979 ANNOTATE_IGNORE_READS_BEGIN();
980 EXPECT_GE(1, cache_->GetEntryCount());
981 ANNOTATE_IGNORE_READS_END();
983 EXPECT_NE(net::OK, OpenEntry(first, &entry));
986 // We'll be leaking memory from this test.
987 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
988 BackendTrimInvalidEntry();
991 // We'll be leaking memory from this test.
992 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
993 SetNewEviction();
994 BackendTrimInvalidEntry();
997 // We'll be leaking memory from this test.
998 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
999 SetMask(0xf); // 16-entry table.
1001 const int kSize = 0x3000; // 12 kB
1002 SetMaxSize(kSize * 40);
1003 InitCache();
1005 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1006 memset(buffer->data(), 0, kSize);
1007 disk_cache::Entry* entry;
1009 // Writing 32 entries to this cache chains most of them.
1010 for (int i = 0; i < 32; i++) {
1011 std::string key(base::StringPrintf("some key %d", i));
1012 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1013 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1014 entry->Close();
1015 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1016 // Note that we are not closing the entries.
1019 // Simulate a crash.
1020 SimulateCrash();
1022 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
1023 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1025 FlushQueueForTest();
1026 EXPECT_EQ(33, cache_->GetEntryCount());
1027 SetMaxSize(kSize);
1029 // For the new eviction code, all corrupt entries are on the second list so
1030 // they are not going away that easy.
1031 if (new_eviction_) {
1032 EXPECT_EQ(net::OK, DoomAllEntries());
1035 entry->Close(); // Trim the cache.
1036 FlushQueueForTest();
1038 // We may abort the eviction before cleaning up everything.
1039 base::MessageLoop::current()->RunUntilIdle();
1040 FlushQueueForTest();
1041 // If it's not clear enough: we may still have eviction tasks running at this
1042 // time, so the number of entries is changing while we read it.
1043 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1044 EXPECT_GE(30, cache_->GetEntryCount());
1045 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1048 // We'll be leaking memory from this test.
1049 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1050 BackendTrimInvalidEntry2();
1053 // We'll be leaking memory from this test.
1054 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1055 SetNewEviction();
1056 BackendTrimInvalidEntry2();
1058 #endif // !defined(LEAK_SANITIZER)
1060 void DiskCacheBackendTest::BackendEnumerations() {
1061 InitCache();
1062 Time initial = Time::Now();
1064 const int kNumEntries = 100;
1065 for (int i = 0; i < kNumEntries; i++) {
1066 std::string key = GenerateKey(true);
1067 disk_cache::Entry* entry;
1068 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1069 entry->Close();
1071 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1072 Time final = Time::Now();
1074 disk_cache::Entry* entry;
1075 void* iter = NULL;
1076 int count = 0;
1077 Time last_modified[kNumEntries];
1078 Time last_used[kNumEntries];
1079 while (OpenNextEntry(&iter, &entry) == net::OK) {
1080 ASSERT_TRUE(NULL != entry);
1081 if (count < kNumEntries) {
1082 last_modified[count] = entry->GetLastModified();
1083 last_used[count] = entry->GetLastUsed();
1084 EXPECT_TRUE(initial <= last_modified[count]);
1085 EXPECT_TRUE(final >= last_modified[count]);
1088 entry->Close();
1089 count++;
1091 EXPECT_EQ(kNumEntries, count);
1093 iter = NULL;
1094 count = 0;
1095 // The previous enumeration should not have changed the timestamps.
1096 while (OpenNextEntry(&iter, &entry) == net::OK) {
1097 ASSERT_TRUE(NULL != entry);
1098 if (count < kNumEntries) {
1099 EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1100 EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1102 entry->Close();
1103 count++;
1105 EXPECT_EQ(kNumEntries, count);
1108 TEST_F(DiskCacheBackendTest, Enumerations) {
1109 BackendEnumerations();
1112 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1113 SetNewEviction();
1114 BackendEnumerations();
1117 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1118 SetMemoryOnlyMode();
1119 BackendEnumerations();
1122 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1123 SetCacheType(net::SHADER_CACHE);
1124 BackendEnumerations();
1127 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1128 SetCacheType(net::APP_CACHE);
1129 BackendEnumerations();
1132 // Verifies enumerations while entries are open.
1133 void DiskCacheBackendTest::BackendEnumerations2() {
1134 InitCache();
1135 const std::string first("first");
1136 const std::string second("second");
1137 disk_cache::Entry *entry1, *entry2;
1138 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1139 entry1->Close();
1140 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1141 entry2->Close();
1142 FlushQueueForTest();
1144 // Make sure that the timestamp is not the same.
1145 AddDelay();
1146 ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
1147 void* iter = NULL;
1148 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1149 EXPECT_EQ(entry2->GetKey(), second);
1151 // Two entries and the iterator pointing at "first".
1152 entry1->Close();
1153 entry2->Close();
1155 // The iterator should still be valid, so we should not crash.
1156 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1157 EXPECT_EQ(entry2->GetKey(), first);
1158 entry2->Close();
1159 cache_->EndEnumeration(&iter);
1161 // Modify the oldest entry and get the newest element.
1162 ASSERT_EQ(net::OK, OpenEntry(first, &entry1));
1163 EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false));
1164 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1165 if (type_ == net::APP_CACHE) {
1166 // The list is not updated.
1167 EXPECT_EQ(entry2->GetKey(), second);
1168 } else {
1169 EXPECT_EQ(entry2->GetKey(), first);
1172 entry1->Close();
1173 entry2->Close();
1174 cache_->EndEnumeration(&iter);
1177 TEST_F(DiskCacheBackendTest, Enumerations2) {
1178 BackendEnumerations2();
1181 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1182 SetNewEviction();
1183 BackendEnumerations2();
1186 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
1187 SetMemoryOnlyMode();
1188 BackendEnumerations2();
1191 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1192 SetCacheType(net::APP_CACHE);
1193 BackendEnumerations2();
1196 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1197 SetCacheType(net::SHADER_CACHE);
1198 BackendEnumerations2();
1201 // Verify that ReadData calls do not update the LRU cache
1202 // when using the SHADER_CACHE type.
1203 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1204 SetCacheType(net::SHADER_CACHE);
1205 InitCache();
1206 const std::string first("first");
1207 const std::string second("second");
1208 disk_cache::Entry *entry1, *entry2;
1209 const int kSize = 50;
1210 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1212 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1213 memset(buffer1->data(), 0, kSize);
1214 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1215 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1217 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1218 entry2->Close();
1220 FlushQueueForTest();
1222 // Make sure that the timestamp is not the same.
1223 AddDelay();
1225 // Read from the last item in the LRU.
1226 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1227 entry1->Close();
1229 void* iter = NULL;
1230 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1231 EXPECT_EQ(entry2->GetKey(), second);
1232 entry2->Close();
1233 cache_->EndEnumeration(&iter);
1236 #if !defined(LEAK_SANITIZER)
1237 // Verify handling of invalid entries while doing enumerations.
1238 // We'll be leaking memory from this test.
1239 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1240 InitCache();
1242 std::string key("Some key");
1243 disk_cache::Entry *entry, *entry1, *entry2;
1244 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1246 const int kSize = 50;
1247 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1248 memset(buffer1->data(), 0, kSize);
1249 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1250 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1251 entry1->Close();
1252 ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1253 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1255 std::string key2("Another key");
1256 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1257 entry2->Close();
1258 ASSERT_EQ(2, cache_->GetEntryCount());
1260 SimulateCrash();
1262 void* iter = NULL;
1263 int count = 0;
1264 while (OpenNextEntry(&iter, &entry) == net::OK) {
1265 ASSERT_TRUE(NULL != entry);
1266 EXPECT_EQ(key2, entry->GetKey());
1267 entry->Close();
1268 count++;
1270 EXPECT_EQ(1, count);
1271 EXPECT_EQ(1, cache_->GetEntryCount());
1274 // We'll be leaking memory from this test.
1275 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1276 BackendInvalidEntryEnumeration();
1279 // We'll be leaking memory from this test.
1280 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1281 SetNewEviction();
1282 BackendInvalidEntryEnumeration();
1284 #endif // !defined(LEAK_SANITIZER)
1286 // Tests that if for some reason entries are modified close to existing cache
1287 // iterators, we don't generate fatal errors or reset the cache.
1288 void DiskCacheBackendTest::BackendFixEnumerators() {
1289 InitCache();
1291 int seed = static_cast<int>(Time::Now().ToInternalValue());
1292 srand(seed);
1294 const int kNumEntries = 10;
1295 for (int i = 0; i < kNumEntries; i++) {
1296 std::string key = GenerateKey(true);
1297 disk_cache::Entry* entry;
1298 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1299 entry->Close();
1301 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1303 disk_cache::Entry *entry1, *entry2;
1304 void* iter1 = NULL;
1305 void* iter2 = NULL;
1306 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
1307 ASSERT_TRUE(NULL != entry1);
1308 entry1->Close();
1309 entry1 = NULL;
1311 // Let's go to the middle of the list.
1312 for (int i = 0; i < kNumEntries / 2; i++) {
1313 if (entry1)
1314 entry1->Close();
1315 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
1316 ASSERT_TRUE(NULL != entry1);
1318 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1319 ASSERT_TRUE(NULL != entry2);
1320 entry2->Close();
1323 // Messing up with entry1 will modify entry2->next.
1324 entry1->Doom();
1325 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1326 ASSERT_TRUE(NULL != entry2);
1328 // The link entry2->entry1 should be broken.
1329 EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1330 entry1->Close();
1331 entry2->Close();
1333 // And the second iterator should keep working.
1334 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1335 ASSERT_TRUE(NULL != entry2);
1336 entry2->Close();
1338 cache_->EndEnumeration(&iter1);
1339 cache_->EndEnumeration(&iter2);
1342 TEST_F(DiskCacheBackendTest, FixEnumerators) {
1343 BackendFixEnumerators();
1346 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1347 SetNewEviction();
1348 BackendFixEnumerators();
1351 void DiskCacheBackendTest::BackendDoomRecent() {
1352 InitCache();
1354 disk_cache::Entry *entry;
1355 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1356 entry->Close();
1357 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1358 entry->Close();
1359 FlushQueueForTest();
1361 AddDelay();
1362 Time middle = Time::Now();
1364 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1365 entry->Close();
1366 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1367 entry->Close();
1368 FlushQueueForTest();
1370 AddDelay();
1371 Time final = Time::Now();
1373 ASSERT_EQ(4, cache_->GetEntryCount());
1374 EXPECT_EQ(net::OK, DoomEntriesSince(final));
1375 ASSERT_EQ(4, cache_->GetEntryCount());
1377 EXPECT_EQ(net::OK, DoomEntriesSince(middle));
1378 ASSERT_EQ(2, cache_->GetEntryCount());
1380 ASSERT_EQ(net::OK, OpenEntry("second", &entry));
1381 entry->Close();
1384 TEST_F(DiskCacheBackendTest, DoomRecent) {
1385 BackendDoomRecent();
1388 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
1389 SetNewEviction();
1390 BackendDoomRecent();
1393 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
1394 SetMemoryOnlyMode();
1395 BackendDoomRecent();
1398 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
1399 base::Time* doomed_end) {
1400 InitCache();
1402 const int kSize = 50;
1403 // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
1404 const int kOffset = 10 + 1024 * 1024;
1406 disk_cache::Entry* entry0 = NULL;
1407 disk_cache::Entry* entry1 = NULL;
1408 disk_cache::Entry* entry2 = NULL;
1410 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1411 CacheTestFillBuffer(buffer->data(), kSize, false);
1413 ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
1414 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
1415 ASSERT_EQ(kSize,
1416 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
1417 entry0->Close();
1419 FlushQueueForTest();
1420 AddDelay();
1421 if (doomed_start)
1422 *doomed_start = base::Time::Now();
1424 // Order in rankings list:
1425 // first_part1, first_part2, second_part1, second_part2
1426 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
1427 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
1428 ASSERT_EQ(kSize,
1429 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
1430 entry1->Close();
1432 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
1433 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
1434 ASSERT_EQ(kSize,
1435 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
1436 entry2->Close();
1438 FlushQueueForTest();
1439 AddDelay();
1440 if (doomed_end)
1441 *doomed_end = base::Time::Now();
1443 // Order in rankings list:
1444 // third_part1, fourth_part1, third_part2, fourth_part2
1445 disk_cache::Entry* entry3 = NULL;
1446 disk_cache::Entry* entry4 = NULL;
1447 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
1448 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
1449 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
1450 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
1451 ASSERT_EQ(kSize,
1452 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
1453 ASSERT_EQ(kSize,
1454 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
1455 entry3->Close();
1456 entry4->Close();
1458 FlushQueueForTest();
1459 AddDelay();
1462 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
1463 SetMemoryOnlyMode();
1464 base::Time start;
1465 InitSparseCache(&start, NULL);
1466 DoomEntriesSince(start);
1467 EXPECT_EQ(1, cache_->GetEntryCount());
1470 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
1471 base::Time start;
1472 InitSparseCache(&start, NULL);
1473 DoomEntriesSince(start);
1474 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1475 // MemBackendImpl does not. Thats why expected value differs here from
1476 // MemoryOnlyDoomEntriesSinceSparse.
1477 EXPECT_EQ(3, cache_->GetEntryCount());
1480 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
1481 SetMemoryOnlyMode();
1482 InitSparseCache(NULL, NULL);
1483 EXPECT_EQ(net::OK, DoomAllEntries());
1484 EXPECT_EQ(0, cache_->GetEntryCount());
1487 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
1488 InitSparseCache(NULL, NULL);
1489 EXPECT_EQ(net::OK, DoomAllEntries());
1490 EXPECT_EQ(0, cache_->GetEntryCount());
1493 void DiskCacheBackendTest::BackendDoomBetween() {
1494 InitCache();
1496 disk_cache::Entry *entry;
1497 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1498 entry->Close();
1499 FlushQueueForTest();
1501 AddDelay();
1502 Time middle_start = Time::Now();
1504 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1505 entry->Close();
1506 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1507 entry->Close();
1508 FlushQueueForTest();
1510 AddDelay();
1511 Time middle_end = Time::Now();
1513 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1514 entry->Close();
1515 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1516 entry->Close();
1517 FlushQueueForTest();
1519 AddDelay();
1520 Time final = Time::Now();
1522 ASSERT_EQ(4, cache_->GetEntryCount());
1523 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end));
1524 ASSERT_EQ(2, cache_->GetEntryCount());
1526 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1527 entry->Close();
1529 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final));
1530 ASSERT_EQ(1, cache_->GetEntryCount());
1532 ASSERT_EQ(net::OK, OpenEntry("first", &entry));
1533 entry->Close();
1536 TEST_F(DiskCacheBackendTest, DoomBetween) {
1537 BackendDoomBetween();
1540 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
1541 SetNewEviction();
1542 BackendDoomBetween();
1545 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
1546 SetMemoryOnlyMode();
1547 BackendDoomBetween();
1550 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
1551 SetMemoryOnlyMode();
1552 base::Time start, end;
1553 InitSparseCache(&start, &end);
1554 DoomEntriesBetween(start, end);
1555 EXPECT_EQ(3, cache_->GetEntryCount());
1557 start = end;
1558 end = base::Time::Now();
1559 DoomEntriesBetween(start, end);
1560 EXPECT_EQ(1, cache_->GetEntryCount());
1563 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
1564 base::Time start, end;
1565 InitSparseCache(&start, &end);
1566 DoomEntriesBetween(start, end);
1567 EXPECT_EQ(9, cache_->GetEntryCount());
1569 start = end;
1570 end = base::Time::Now();
1571 DoomEntriesBetween(start, end);
1572 EXPECT_EQ(3, cache_->GetEntryCount());
1575 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
1576 int num_entries, bool load) {
1577 success_ = false;
1578 ASSERT_TRUE(CopyTestCache(name));
1579 DisableFirstCleanup();
1581 uint32 mask;
1582 if (load) {
1583 mask = 0xf;
1584 SetMaxSize(0x100000);
1585 } else {
1586 // Clear the settings from the previous run.
1587 mask = 0;
1588 SetMaxSize(0);
1590 SetMask(mask);
1592 InitCache();
1593 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
1595 std::string key("the first key");
1596 disk_cache::Entry* entry1;
1597 ASSERT_NE(net::OK, OpenEntry(key, &entry1));
1599 int actual = cache_->GetEntryCount();
1600 if (num_entries != actual) {
1601 ASSERT_TRUE(load);
1602 // If there is a heavy load, inserting an entry will make another entry
1603 // dirty (on the hash bucket) so two entries are removed.
1604 ASSERT_EQ(num_entries - 1, actual);
1607 cache_.reset();
1608 cache_impl_ = NULL;
1610 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask));
1611 success_ = true;
1614 void DiskCacheBackendTest::BackendRecoverInsert() {
1615 // Tests with an empty cache.
1616 BackendTransaction("insert_empty1", 0, false);
1617 ASSERT_TRUE(success_) << "insert_empty1";
1618 BackendTransaction("insert_empty2", 0, false);
1619 ASSERT_TRUE(success_) << "insert_empty2";
1620 BackendTransaction("insert_empty3", 0, false);
1621 ASSERT_TRUE(success_) << "insert_empty3";
1623 // Tests with one entry on the cache.
1624 BackendTransaction("insert_one1", 1, false);
1625 ASSERT_TRUE(success_) << "insert_one1";
1626 BackendTransaction("insert_one2", 1, false);
1627 ASSERT_TRUE(success_) << "insert_one2";
1628 BackendTransaction("insert_one3", 1, false);
1629 ASSERT_TRUE(success_) << "insert_one3";
1631 // Tests with one hundred entries on the cache, tiny index.
1632 BackendTransaction("insert_load1", 100, true);
1633 ASSERT_TRUE(success_) << "insert_load1";
1634 BackendTransaction("insert_load2", 100, true);
1635 ASSERT_TRUE(success_) << "insert_load2";
1638 TEST_F(DiskCacheBackendTest, RecoverInsert) {
1639 BackendRecoverInsert();
1642 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
1643 SetNewEviction();
1644 BackendRecoverInsert();
1647 void DiskCacheBackendTest::BackendRecoverRemove() {
1648 // Removing the only element.
1649 BackendTransaction("remove_one1", 0, false);
1650 ASSERT_TRUE(success_) << "remove_one1";
1651 BackendTransaction("remove_one2", 0, false);
1652 ASSERT_TRUE(success_) << "remove_one2";
1653 BackendTransaction("remove_one3", 0, false);
1654 ASSERT_TRUE(success_) << "remove_one3";
1656 // Removing the head.
1657 BackendTransaction("remove_head1", 1, false);
1658 ASSERT_TRUE(success_) << "remove_head1";
1659 BackendTransaction("remove_head2", 1, false);
1660 ASSERT_TRUE(success_) << "remove_head2";
1661 BackendTransaction("remove_head3", 1, false);
1662 ASSERT_TRUE(success_) << "remove_head3";
1664 // Removing the tail.
1665 BackendTransaction("remove_tail1", 1, false);
1666 ASSERT_TRUE(success_) << "remove_tail1";
1667 BackendTransaction("remove_tail2", 1, false);
1668 ASSERT_TRUE(success_) << "remove_tail2";
1669 BackendTransaction("remove_tail3", 1, false);
1670 ASSERT_TRUE(success_) << "remove_tail3";
1672 // Removing with one hundred entries on the cache, tiny index.
1673 BackendTransaction("remove_load1", 100, true);
1674 ASSERT_TRUE(success_) << "remove_load1";
1675 BackendTransaction("remove_load2", 100, true);
1676 ASSERT_TRUE(success_) << "remove_load2";
1677 BackendTransaction("remove_load3", 100, true);
1678 ASSERT_TRUE(success_) << "remove_load3";
1680 // This case cannot be reverted.
1681 BackendTransaction("remove_one4", 0, false);
1682 ASSERT_TRUE(success_) << "remove_one4";
1683 BackendTransaction("remove_head4", 1, false);
1684 ASSERT_TRUE(success_) << "remove_head4";
1687 TEST_F(DiskCacheBackendTest, RecoverRemove) {
1688 BackendRecoverRemove();
1691 TEST_F(DiskCacheBackendTest, NewEvictionRecoverRemove) {
1692 SetNewEviction();
1693 BackendRecoverRemove();
1696 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1697 success_ = false;
1698 ASSERT_TRUE(CopyTestCache("insert_load1"));
1699 DisableFirstCleanup();
1701 SetMask(0xf);
1702 SetMaxSize(0x1000);
1704 // We should not crash here.
1705 InitCache();
1706 DisableIntegrityCheck();
1709 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
1710 BackendRecoverWithEviction();
1713 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
1714 SetNewEviction();
1715 BackendRecoverWithEviction();
1718 // Tests that the |BackendImpl| fails to start with the wrong cache version.
1719 TEST_F(DiskCacheTest, WrongVersion) {
1720 ASSERT_TRUE(CopyTestCache("wrong_version"));
1721 base::Thread cache_thread("CacheThread");
1722 ASSERT_TRUE(cache_thread.StartWithOptions(
1723 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1724 net::TestCompletionCallback cb;
1726 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1727 cache_path_, cache_thread.message_loop_proxy().get(), NULL));
1728 int rv = cache->Init(cb.callback());
1729 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
1732 class BadEntropyProvider : public base::FieldTrial::EntropyProvider {
1733 public:
1734 virtual ~BadEntropyProvider() {}
1736 virtual double GetEntropyForTrial(const std::string& trial_name,
1737 uint32 randomization_seed) const OVERRIDE {
1738 return 0.5;
1742 // Tests that the disk cache successfully joins the control group, dropping the
1743 // existing cache in favour of a new empty cache.
1744 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
1745 base::Thread cache_thread("CacheThread");
1746 ASSERT_TRUE(cache_thread.StartWithOptions(
1747 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1749 scoped_ptr<disk_cache::BackendImpl> cache =
1750 CreateExistingEntryCache(cache_thread, cache_path_);
1751 ASSERT_TRUE(cache.get());
1752 cache.reset();
1754 // Instantiate the SimpleCacheTrial, forcing this run into the
1755 // ExperimentControl group.
1756 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1757 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1758 "ExperimentControl");
1759 net::TestCompletionCallback cb;
1760 scoped_ptr<disk_cache::Backend> base_cache;
1761 int rv =
1762 disk_cache::CreateCacheBackend(net::DISK_CACHE,
1763 net::CACHE_BACKEND_BLOCKFILE,
1764 cache_path_,
1766 true,
1767 cache_thread.message_loop_proxy().get(),
1768 NULL,
1769 &base_cache,
1770 cb.callback());
1771 ASSERT_EQ(net::OK, cb.GetResult(rv));
1772 EXPECT_EQ(0, base_cache->GetEntryCount());
1775 // Tests that the disk cache can restart in the control group preserving
1776 // existing entries.
1777 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
1778 // Instantiate the SimpleCacheTrial, forcing this run into the
1779 // ExperimentControl group.
1780 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1781 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1782 "ExperimentControl");
1784 base::Thread cache_thread("CacheThread");
1785 ASSERT_TRUE(cache_thread.StartWithOptions(
1786 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1788 scoped_ptr<disk_cache::BackendImpl> cache =
1789 CreateExistingEntryCache(cache_thread, cache_path_);
1790 ASSERT_TRUE(cache.get());
1792 net::TestCompletionCallback cb;
1794 const int kRestartCount = 5;
1795 for (int i=0; i < kRestartCount; ++i) {
1796 cache.reset(new disk_cache::BackendImpl(
1797 cache_path_, cache_thread.message_loop_proxy(), NULL));
1798 int rv = cache->Init(cb.callback());
1799 ASSERT_EQ(net::OK, cb.GetResult(rv));
1800 EXPECT_EQ(1, cache->GetEntryCount());
1802 disk_cache::Entry* entry = NULL;
1803 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1804 EXPECT_EQ(net::OK, cb.GetResult(rv));
1805 EXPECT_TRUE(entry);
1806 entry->Close();
1810 // Tests that the disk cache can leave the control group preserving existing
1811 // entries.
1812 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
1813 base::Thread cache_thread("CacheThread");
1814 ASSERT_TRUE(cache_thread.StartWithOptions(
1815 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1818 // Instantiate the SimpleCacheTrial, forcing this run into the
1819 // ExperimentControl group.
1820 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1821 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1822 "ExperimentControl");
1824 scoped_ptr<disk_cache::BackendImpl> cache =
1825 CreateExistingEntryCache(cache_thread, cache_path_);
1826 ASSERT_TRUE(cache.get());
1829 // Instantiate the SimpleCacheTrial, forcing this run into the
1830 // ExperimentNo group.
1831 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1832 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1833 net::TestCompletionCallback cb;
1835 const int kRestartCount = 5;
1836 for (int i = 0; i < kRestartCount; ++i) {
1837 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1838 cache_path_, cache_thread.message_loop_proxy(), NULL));
1839 int rv = cache->Init(cb.callback());
1840 ASSERT_EQ(net::OK, cb.GetResult(rv));
1841 EXPECT_EQ(1, cache->GetEntryCount());
1843 disk_cache::Entry* entry = NULL;
1844 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1845 EXPECT_EQ(net::OK, cb.GetResult(rv));
1846 EXPECT_TRUE(entry);
1847 entry->Close();
1851 // Tests that the cache is properly restarted on recovery error.
1852 TEST_F(DiskCacheBackendTest, DeleteOld) {
1853 ASSERT_TRUE(CopyTestCache("wrong_version"));
1854 SetNewEviction();
1855 base::Thread cache_thread("CacheThread");
1856 ASSERT_TRUE(cache_thread.StartWithOptions(
1857 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1859 net::TestCompletionCallback cb;
1860 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
1861 base::FilePath path(cache_path_);
1862 int rv =
1863 disk_cache::CreateCacheBackend(net::DISK_CACHE,
1864 net::CACHE_BACKEND_BLOCKFILE,
1865 path,
1867 true,
1868 cache_thread.message_loop_proxy().get(),
1869 NULL,
1870 &cache_,
1871 cb.callback());
1872 path.clear(); // Make sure path was captured by the previous call.
1873 ASSERT_EQ(net::OK, cb.GetResult(rv));
1874 base::ThreadRestrictions::SetIOAllowed(prev);
1875 cache_.reset();
1876 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
1879 // We want to be able to deal with messed up entries on disk.
1880 void DiskCacheBackendTest::BackendInvalidEntry2() {
1881 ASSERT_TRUE(CopyTestCache("bad_entry"));
1882 DisableFirstCleanup();
1883 InitCache();
1885 disk_cache::Entry *entry1, *entry2;
1886 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
1887 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
1888 entry1->Close();
1890 // CheckCacheIntegrity will fail at this point.
1891 DisableIntegrityCheck();
1894 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
1895 BackendInvalidEntry2();
1898 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
1899 SetNewEviction();
1900 BackendInvalidEntry2();
1903 // Tests that we don't crash or hang when enumerating this cache.
1904 void DiskCacheBackendTest::BackendInvalidEntry3() {
1905 SetMask(0x1); // 2-entry table.
1906 SetMaxSize(0x3000); // 12 kB.
1907 DisableFirstCleanup();
1908 InitCache();
1910 disk_cache::Entry* entry;
1911 void* iter = NULL;
1912 while (OpenNextEntry(&iter, &entry) == net::OK) {
1913 entry->Close();
1917 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
1918 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
1919 BackendInvalidEntry3();
1922 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
1923 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
1924 SetNewEviction();
1925 BackendInvalidEntry3();
1926 DisableIntegrityCheck();
1929 // Test that we handle a dirty entry on the LRU list, already replaced with
1930 // the same key, and with hash collisions.
1931 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
1932 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
1933 SetMask(0x1); // 2-entry table.
1934 SetMaxSize(0x3000); // 12 kB.
1935 DisableFirstCleanup();
1936 InitCache();
1938 TrimForTest(false);
1941 // Test that we handle a dirty entry on the deleted list, already replaced with
1942 // the same key, and with hash collisions.
1943 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
1944 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
1945 SetNewEviction();
1946 SetMask(0x1); // 2-entry table.
1947 SetMaxSize(0x3000); // 12 kB.
1948 DisableFirstCleanup();
1949 InitCache();
1951 TrimDeletedListForTest(false);
1954 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
1955 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
1956 SetMask(0x1); // 2-entry table.
1957 SetMaxSize(0x3000); // 12 kB.
1958 DisableFirstCleanup();
1959 InitCache();
1961 // There is a dirty entry (but marked as clean) at the end, pointing to a
1962 // deleted entry through the hash collision list. We should not re-insert the
1963 // deleted entry into the index table.
1965 TrimForTest(false);
1966 // The cache should be clean (as detected by CheckCacheIntegrity).
1969 // Tests that we don't hang when there is a loop on the hash collision list.
1970 // The test cache could be a result of bug 69135.
1971 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
1972 ASSERT_TRUE(CopyTestCache("list_loop2"));
1973 SetMask(0x1); // 2-entry table.
1974 SetMaxSize(0x3000); // 12 kB.
1975 DisableFirstCleanup();
1976 InitCache();
1978 // The second entry points at itselft, and the first entry is not accessible
1979 // though the index, but it is at the head of the LRU.
1981 disk_cache::Entry* entry;
1982 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
1983 entry->Close();
1985 TrimForTest(false);
1986 TrimForTest(false);
1987 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
1988 entry->Close();
1989 EXPECT_EQ(1, cache_->GetEntryCount());
1992 // Tests that we don't hang when there is a loop on the hash collision list.
1993 // The test cache could be a result of bug 69135.
1994 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
1995 ASSERT_TRUE(CopyTestCache("list_loop3"));
1996 SetMask(0x1); // 2-entry table.
1997 SetMaxSize(0x3000); // 12 kB.
1998 DisableFirstCleanup();
1999 InitCache();
2001 // There is a wide loop of 5 entries.
2003 disk_cache::Entry* entry;
2004 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2007 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2008 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2009 DisableFirstCleanup();
2010 SetNewEviction();
2011 InitCache();
2013 // The second entry is dirty, but removing it should not corrupt the list.
2014 disk_cache::Entry* entry;
2015 ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2016 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
2018 // This should not delete the cache.
2019 entry->Doom();
2020 FlushQueueForTest();
2021 entry->Close();
2023 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry));
2024 entry->Close();
2027 // Tests handling of corrupt entries by keeping the rankings node around, with
2028 // a fatal failure.
2029 void DiskCacheBackendTest::BackendInvalidEntry7() {
2030 const int kSize = 0x3000; // 12 kB.
2031 SetMaxSize(kSize * 10);
2032 InitCache();
2034 std::string first("some key");
2035 std::string second("something else");
2036 disk_cache::Entry* entry;
2037 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2038 entry->Close();
2039 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2041 // Corrupt this entry.
2042 disk_cache::EntryImpl* entry_impl =
2043 static_cast<disk_cache::EntryImpl*>(entry);
2045 entry_impl->rankings()->Data()->next = 0;
2046 entry_impl->rankings()->Store();
2047 entry->Close();
2048 FlushQueueForTest();
2049 EXPECT_EQ(2, cache_->GetEntryCount());
2051 // This should detect the bad entry.
2052 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2053 EXPECT_EQ(1, cache_->GetEntryCount());
2055 // We should delete the cache. The list still has a corrupt node.
2056 void* iter = NULL;
2057 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2058 FlushQueueForTest();
2059 EXPECT_EQ(0, cache_->GetEntryCount());
2062 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2063 BackendInvalidEntry7();
2066 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2067 SetNewEviction();
2068 BackendInvalidEntry7();
2071 // Tests handling of corrupt entries by keeping the rankings node around, with
2072 // a non fatal failure.
2073 void DiskCacheBackendTest::BackendInvalidEntry8() {
2074 const int kSize = 0x3000; // 12 kB
2075 SetMaxSize(kSize * 10);
2076 InitCache();
2078 std::string first("some key");
2079 std::string second("something else");
2080 disk_cache::Entry* entry;
2081 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2082 entry->Close();
2083 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2085 // Corrupt this entry.
2086 disk_cache::EntryImpl* entry_impl =
2087 static_cast<disk_cache::EntryImpl*>(entry);
2089 entry_impl->rankings()->Data()->contents = 0;
2090 entry_impl->rankings()->Store();
2091 entry->Close();
2092 FlushQueueForTest();
2093 EXPECT_EQ(2, cache_->GetEntryCount());
2095 // This should detect the bad entry.
2096 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2097 EXPECT_EQ(1, cache_->GetEntryCount());
2099 // We should not delete the cache.
2100 void* iter = NULL;
2101 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2102 entry->Close();
2103 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2104 EXPECT_EQ(1, cache_->GetEntryCount());
2107 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2108 BackendInvalidEntry8();
2111 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2112 SetNewEviction();
2113 BackendInvalidEntry8();
2116 // Tests handling of corrupt entries detected by enumerations. Note that these
2117 // tests (xx9 to xx11) are basically just going though slightly different
2118 // codepaths so they are tighlty coupled with the code, but that is better than
2119 // not testing error handling code.
2120 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2121 const int kSize = 0x3000; // 12 kB.
2122 SetMaxSize(kSize * 10);
2123 InitCache();
2125 std::string first("some key");
2126 std::string second("something else");
2127 disk_cache::Entry* entry;
2128 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2129 entry->Close();
2130 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2132 // Corrupt this entry.
2133 disk_cache::EntryImpl* entry_impl =
2134 static_cast<disk_cache::EntryImpl*>(entry);
2136 entry_impl->entry()->Data()->state = 0xbad;
2137 entry_impl->entry()->Store();
2138 entry->Close();
2139 FlushQueueForTest();
2140 EXPECT_EQ(2, cache_->GetEntryCount());
2142 if (eviction) {
2143 TrimForTest(false);
2144 EXPECT_EQ(1, cache_->GetEntryCount());
2145 TrimForTest(false);
2146 EXPECT_EQ(1, cache_->GetEntryCount());
2147 } else {
2148 // We should detect the problem through the list, but we should not delete
2149 // the entry, just fail the iteration.
2150 void* iter = NULL;
2151 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2153 // Now a full iteration will work, and return one entry.
2154 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2155 entry->Close();
2156 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2158 // This should detect what's left of the bad entry.
2159 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2160 EXPECT_EQ(2, cache_->GetEntryCount());
2162 DisableIntegrityCheck();
2165 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2166 BackendInvalidEntry9(false);
2169 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2170 SetNewEviction();
2171 BackendInvalidEntry9(false);
2174 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2175 BackendInvalidEntry9(true);
2178 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2179 SetNewEviction();
2180 BackendInvalidEntry9(true);
2183 // Tests handling of corrupt entries detected by enumerations.
2184 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2185 const int kSize = 0x3000; // 12 kB.
2186 SetMaxSize(kSize * 10);
2187 SetNewEviction();
2188 InitCache();
2190 std::string first("some key");
2191 std::string second("something else");
2192 disk_cache::Entry* entry;
2193 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2194 entry->Close();
2195 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2196 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2197 entry->Close();
2198 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2200 // Corrupt this entry.
2201 disk_cache::EntryImpl* entry_impl =
2202 static_cast<disk_cache::EntryImpl*>(entry);
2204 entry_impl->entry()->Data()->state = 0xbad;
2205 entry_impl->entry()->Store();
2206 entry->Close();
2207 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2208 entry->Close();
2209 EXPECT_EQ(3, cache_->GetEntryCount());
2211 // We have:
2212 // List 0: third -> second (bad).
2213 // List 1: first.
2215 if (eviction) {
2216 // Detection order: second -> first -> third.
2217 TrimForTest(false);
2218 EXPECT_EQ(3, cache_->GetEntryCount());
2219 TrimForTest(false);
2220 EXPECT_EQ(2, cache_->GetEntryCount());
2221 TrimForTest(false);
2222 EXPECT_EQ(1, cache_->GetEntryCount());
2223 } else {
2224 // Detection order: third -> second -> first.
2225 // We should detect the problem through the list, but we should not delete
2226 // the entry.
2227 void* iter = NULL;
2228 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2229 entry->Close();
2230 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2231 EXPECT_EQ(first, entry->GetKey());
2232 entry->Close();
2233 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2235 DisableIntegrityCheck();
2238 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2239 BackendInvalidEntry10(false);
2242 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
2243 BackendInvalidEntry10(true);
2246 // Tests handling of corrupt entries detected by enumerations.
2247 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
2248 const int kSize = 0x3000; // 12 kB.
2249 SetMaxSize(kSize * 10);
2250 SetNewEviction();
2251 InitCache();
2253 std::string first("some key");
2254 std::string second("something else");
2255 disk_cache::Entry* entry;
2256 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2257 entry->Close();
2258 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2259 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2260 entry->Close();
2261 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2262 entry->Close();
2263 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
2264 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2266 // Corrupt this entry.
2267 disk_cache::EntryImpl* entry_impl =
2268 static_cast<disk_cache::EntryImpl*>(entry);
2270 entry_impl->entry()->Data()->state = 0xbad;
2271 entry_impl->entry()->Store();
2272 entry->Close();
2273 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2274 entry->Close();
2275 FlushQueueForTest();
2276 EXPECT_EQ(3, cache_->GetEntryCount());
2278 // We have:
2279 // List 0: third.
2280 // List 1: second (bad) -> first.
2282 if (eviction) {
2283 // Detection order: third -> first -> second.
2284 TrimForTest(false);
2285 EXPECT_EQ(2, cache_->GetEntryCount());
2286 TrimForTest(false);
2287 EXPECT_EQ(1, cache_->GetEntryCount());
2288 TrimForTest(false);
2289 EXPECT_EQ(1, cache_->GetEntryCount());
2290 } else {
2291 // Detection order: third -> second.
2292 // We should detect the problem through the list, but we should not delete
2293 // the entry, just fail the iteration.
2294 void* iter = NULL;
2295 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2296 entry->Close();
2297 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2299 // Now a full iteration will work, and return two entries.
2300 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2301 entry->Close();
2302 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2303 entry->Close();
2304 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2306 DisableIntegrityCheck();
2309 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
2310 BackendInvalidEntry11(false);
2313 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
2314 BackendInvalidEntry11(true);
2317 // Tests handling of corrupt entries in the middle of a long eviction run.
2318 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2319 const int kSize = 0x3000; // 12 kB
2320 SetMaxSize(kSize * 10);
2321 InitCache();
2323 std::string first("some key");
2324 std::string second("something else");
2325 disk_cache::Entry* entry;
2326 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2327 entry->Close();
2328 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2330 // Corrupt this entry.
2331 disk_cache::EntryImpl* entry_impl =
2332 static_cast<disk_cache::EntryImpl*>(entry);
2334 entry_impl->entry()->Data()->state = 0xbad;
2335 entry_impl->entry()->Store();
2336 entry->Close();
2337 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2338 entry->Close();
2339 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
2340 TrimForTest(true);
2341 EXPECT_EQ(1, cache_->GetEntryCount());
2342 entry->Close();
2343 DisableIntegrityCheck();
2346 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
2347 BackendTrimInvalidEntry12();
2350 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
2351 SetNewEviction();
2352 BackendTrimInvalidEntry12();
2355 // We want to be able to deal with messed up entries on disk.
2356 void DiskCacheBackendTest::BackendInvalidRankings2() {
2357 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2358 DisableFirstCleanup();
2359 InitCache();
2361 disk_cache::Entry *entry1, *entry2;
2362 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
2363 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2));
2364 entry2->Close();
2366 // CheckCacheIntegrity will fail at this point.
2367 DisableIntegrityCheck();
2370 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
2371 BackendInvalidRankings2();
2374 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
2375 SetNewEviction();
2376 BackendInvalidRankings2();
2379 // If the LRU is corrupt, we delete the cache.
2380 void DiskCacheBackendTest::BackendInvalidRankings() {
2381 disk_cache::Entry* entry;
2382 void* iter = NULL;
2383 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2384 entry->Close();
2385 EXPECT_EQ(2, cache_->GetEntryCount());
2387 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2388 FlushQueueForTest(); // Allow the restart to finish.
2389 EXPECT_EQ(0, cache_->GetEntryCount());
2392 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
2393 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2394 DisableFirstCleanup();
2395 InitCache();
2396 BackendInvalidRankings();
2399 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
2400 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2401 DisableFirstCleanup();
2402 SetNewEviction();
2403 InitCache();
2404 BackendInvalidRankings();
2407 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
2408 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2409 DisableFirstCleanup();
2410 InitCache();
2411 SetTestMode(); // Fail cache reinitialization.
2412 BackendInvalidRankings();
2415 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
2416 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2417 DisableFirstCleanup();
2418 SetNewEviction();
2419 InitCache();
2420 SetTestMode(); // Fail cache reinitialization.
2421 BackendInvalidRankings();
2424 // If the LRU is corrupt and we have open entries, we disable the cache.
2425 void DiskCacheBackendTest::BackendDisable() {
2426 disk_cache::Entry *entry1, *entry2;
2427 void* iter = NULL;
2428 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2430 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
2431 EXPECT_EQ(0, cache_->GetEntryCount());
2432 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2434 entry1->Close();
2435 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2436 FlushQueueForTest(); // This one actually allows that task to complete.
2438 EXPECT_EQ(0, cache_->GetEntryCount());
2441 TEST_F(DiskCacheBackendTest, DisableSuccess) {
2442 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2443 DisableFirstCleanup();
2444 InitCache();
2445 BackendDisable();
2448 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
2449 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2450 DisableFirstCleanup();
2451 SetNewEviction();
2452 InitCache();
2453 BackendDisable();
2456 TEST_F(DiskCacheBackendTest, DisableFailure) {
2457 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2458 DisableFirstCleanup();
2459 InitCache();
2460 SetTestMode(); // Fail cache reinitialization.
2461 BackendDisable();
2464 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
2465 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2466 DisableFirstCleanup();
2467 SetNewEviction();
2468 InitCache();
2469 SetTestMode(); // Fail cache reinitialization.
2470 BackendDisable();
2473 // This is another type of corruption on the LRU; disable the cache.
2474 void DiskCacheBackendTest::BackendDisable2() {
2475 EXPECT_EQ(8, cache_->GetEntryCount());
2477 disk_cache::Entry* entry;
2478 void* iter = NULL;
2479 int count = 0;
2480 while (OpenNextEntry(&iter, &entry) == net::OK) {
2481 ASSERT_TRUE(NULL != entry);
2482 entry->Close();
2483 count++;
2484 ASSERT_LT(count, 9);
2487 FlushQueueForTest();
2488 EXPECT_EQ(0, cache_->GetEntryCount());
2491 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
2492 ASSERT_TRUE(CopyTestCache("list_loop"));
2493 DisableFirstCleanup();
2494 InitCache();
2495 BackendDisable2();
2498 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
2499 ASSERT_TRUE(CopyTestCache("list_loop"));
2500 DisableFirstCleanup();
2501 SetNewEviction();
2502 InitCache();
2503 BackendDisable2();
2506 TEST_F(DiskCacheBackendTest, DisableFailure2) {
2507 ASSERT_TRUE(CopyTestCache("list_loop"));
2508 DisableFirstCleanup();
2509 InitCache();
2510 SetTestMode(); // Fail cache reinitialization.
2511 BackendDisable2();
2514 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
2515 ASSERT_TRUE(CopyTestCache("list_loop"));
2516 DisableFirstCleanup();
2517 SetNewEviction();
2518 InitCache();
2519 SetTestMode(); // Fail cache reinitialization.
2520 BackendDisable2();
2523 // If the index size changes when we disable the cache, we should not crash.
2524 void DiskCacheBackendTest::BackendDisable3() {
2525 disk_cache::Entry *entry1, *entry2;
2526 void* iter = NULL;
2527 EXPECT_EQ(2, cache_->GetEntryCount());
2528 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2529 entry1->Close();
2531 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
2532 FlushQueueForTest();
2534 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2));
2535 entry2->Close();
2537 EXPECT_EQ(1, cache_->GetEntryCount());
2540 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
2541 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2542 DisableFirstCleanup();
2543 SetMaxSize(20 * 1024 * 1024);
2544 InitCache();
2545 BackendDisable3();
2548 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
2549 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2550 DisableFirstCleanup();
2551 SetMaxSize(20 * 1024 * 1024);
2552 SetNewEviction();
2553 InitCache();
2554 BackendDisable3();
2557 // If we disable the cache, already open entries should work as far as possible.
2558 void DiskCacheBackendTest::BackendDisable4() {
2559 disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
2560 void* iter = NULL;
2561 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2563 char key2[2000];
2564 char key3[20000];
2565 CacheTestFillBuffer(key2, sizeof(key2), true);
2566 CacheTestFillBuffer(key3, sizeof(key3), true);
2567 key2[sizeof(key2) - 1] = '\0';
2568 key3[sizeof(key3) - 1] = '\0';
2569 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2570 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3));
2572 const int kBufSize = 20000;
2573 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
2574 memset(buf->data(), 0, kBufSize);
2575 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2576 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2578 // This line should disable the cache but not delete it.
2579 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4));
2580 EXPECT_EQ(0, cache_->GetEntryCount());
2582 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
2584 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
2585 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2586 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
2588 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
2589 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2590 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
2592 std::string key = entry2->GetKey();
2593 EXPECT_EQ(sizeof(key2) - 1, key.size());
2594 key = entry3->GetKey();
2595 EXPECT_EQ(sizeof(key3) - 1, key.size());
2597 entry1->Close();
2598 entry2->Close();
2599 entry3->Close();
2600 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2601 FlushQueueForTest(); // This one actually allows that task to complete.
2603 EXPECT_EQ(0, cache_->GetEntryCount());
2606 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
2607 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2608 DisableFirstCleanup();
2609 InitCache();
2610 BackendDisable4();
2613 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
2614 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2615 DisableFirstCleanup();
2616 SetNewEviction();
2617 InitCache();
2618 BackendDisable4();
2621 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
2622 MessageLoopHelper helper;
2624 ASSERT_TRUE(CleanupCacheDir());
2625 scoped_ptr<disk_cache::BackendImpl> cache;
2626 cache.reset(new disk_cache::BackendImpl(
2627 cache_path_, base::MessageLoopProxy::current().get(), NULL));
2628 ASSERT_TRUE(NULL != cache.get());
2629 cache->SetUnitTestMode();
2630 ASSERT_EQ(net::OK, cache->SyncInit());
2632 // Wait for a callback that never comes... about 2 secs :). The message loop
2633 // has to run to allow invocation of the usage timer.
2634 helper.WaitUntilCacheIoFinished(1);
2637 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
2638 InitCache();
2639 disk_cache::Entry* entry;
2640 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
2641 entry->Close();
2642 FlushQueueForTest();
2644 disk_cache::StatsItems stats;
2645 cache_->GetStats(&stats);
2646 EXPECT_FALSE(stats.empty());
2648 disk_cache::StatsItems::value_type hits("Create hit", "0x1");
2649 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2651 cache_.reset();
2653 // Now open the cache and verify that the stats are still there.
2654 DisableFirstCleanup();
2655 InitCache();
2656 EXPECT_EQ(1, cache_->GetEntryCount());
2658 stats.clear();
2659 cache_->GetStats(&stats);
2660 EXPECT_FALSE(stats.empty());
2662 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2665 void DiskCacheBackendTest::BackendDoomAll() {
2666 InitCache();
2668 disk_cache::Entry *entry1, *entry2;
2669 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
2670 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
2671 entry1->Close();
2672 entry2->Close();
2674 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2675 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2677 ASSERT_EQ(4, cache_->GetEntryCount());
2678 EXPECT_EQ(net::OK, DoomAllEntries());
2679 ASSERT_EQ(0, cache_->GetEntryCount());
2681 // We should stop posting tasks at some point (if we post any).
2682 base::MessageLoop::current()->RunUntilIdle();
2684 disk_cache::Entry *entry3, *entry4;
2685 EXPECT_NE(net::OK, OpenEntry("third", &entry3));
2686 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
2687 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
2689 EXPECT_EQ(net::OK, DoomAllEntries());
2690 ASSERT_EQ(0, cache_->GetEntryCount());
2692 entry1->Close();
2693 entry2->Close();
2694 entry3->Doom(); // The entry should be already doomed, but this must work.
2695 entry3->Close();
2696 entry4->Close();
2698 // Now try with all references released.
2699 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2700 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2701 entry1->Close();
2702 entry2->Close();
2704 ASSERT_EQ(2, cache_->GetEntryCount());
2705 EXPECT_EQ(net::OK, DoomAllEntries());
2706 ASSERT_EQ(0, cache_->GetEntryCount());
2708 EXPECT_EQ(net::OK, DoomAllEntries());
2711 TEST_F(DiskCacheBackendTest, DoomAll) {
2712 BackendDoomAll();
2715 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
2716 SetNewEviction();
2717 BackendDoomAll();
2720 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
2721 SetMemoryOnlyMode();
2722 BackendDoomAll();
2725 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
2726 SetCacheType(net::APP_CACHE);
2727 BackendDoomAll();
2730 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
2731 SetCacheType(net::SHADER_CACHE);
2732 BackendDoomAll();
2735 // If the index size changes when we doom the cache, we should not crash.
2736 void DiskCacheBackendTest::BackendDoomAll2() {
2737 EXPECT_EQ(2, cache_->GetEntryCount());
2738 EXPECT_EQ(net::OK, DoomAllEntries());
2740 disk_cache::Entry* entry;
2741 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry));
2742 entry->Close();
2744 EXPECT_EQ(1, cache_->GetEntryCount());
2747 TEST_F(DiskCacheBackendTest, DoomAll2) {
2748 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2749 DisableFirstCleanup();
2750 SetMaxSize(20 * 1024 * 1024);
2751 InitCache();
2752 BackendDoomAll2();
2755 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
2756 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2757 DisableFirstCleanup();
2758 SetMaxSize(20 * 1024 * 1024);
2759 SetNewEviction();
2760 InitCache();
2761 BackendDoomAll2();
2764 // We should be able to create the same entry on multiple simultaneous instances
2765 // of the cache.
2766 TEST_F(DiskCacheTest, MultipleInstances) {
2767 base::ScopedTempDir store1, store2;
2768 ASSERT_TRUE(store1.CreateUniqueTempDir());
2769 ASSERT_TRUE(store2.CreateUniqueTempDir());
2771 base::Thread cache_thread("CacheThread");
2772 ASSERT_TRUE(cache_thread.StartWithOptions(
2773 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
2774 net::TestCompletionCallback cb;
2776 const int kNumberOfCaches = 2;
2777 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
2779 int rv =
2780 disk_cache::CreateCacheBackend(net::DISK_CACHE,
2781 net::CACHE_BACKEND_DEFAULT,
2782 store1.path(),
2784 false,
2785 cache_thread.message_loop_proxy().get(),
2786 NULL,
2787 &cache[0],
2788 cb.callback());
2789 ASSERT_EQ(net::OK, cb.GetResult(rv));
2790 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE,
2791 net::CACHE_BACKEND_DEFAULT,
2792 store2.path(),
2794 false,
2795 cache_thread.message_loop_proxy().get(),
2796 NULL,
2797 &cache[1],
2798 cb.callback());
2799 ASSERT_EQ(net::OK, cb.GetResult(rv));
2801 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
2803 std::string key("the first key");
2804 disk_cache::Entry* entry;
2805 for (int i = 0; i < kNumberOfCaches; i++) {
2806 rv = cache[i]->CreateEntry(key, &entry, cb.callback());
2807 ASSERT_EQ(net::OK, cb.GetResult(rv));
2808 entry->Close();
2812 // Test the six regions of the curve that determines the max cache size.
2813 TEST_F(DiskCacheTest, AutomaticMaxSize) {
2814 const int kDefaultSize = 80 * 1024 * 1024;
2815 int64 large_size = kDefaultSize;
2816 int64 largest_size = kint32max;
2818 // Region 1: expected = available * 0.8
2819 EXPECT_EQ((kDefaultSize - 1) * 8 / 10,
2820 disk_cache::PreferedCacheSize(large_size - 1));
2821 EXPECT_EQ(kDefaultSize * 8 / 10,
2822 disk_cache::PreferedCacheSize(large_size));
2823 EXPECT_EQ(kDefaultSize - 1,
2824 disk_cache::PreferedCacheSize(large_size * 10 / 8 - 1));
2826 // Region 2: expected = default_size
2827 EXPECT_EQ(kDefaultSize,
2828 disk_cache::PreferedCacheSize(large_size * 10 / 8));
2829 EXPECT_EQ(kDefaultSize,
2830 disk_cache::PreferedCacheSize(large_size * 10 - 1));
2832 // Region 3: expected = available * 0.1
2833 EXPECT_EQ(kDefaultSize,
2834 disk_cache::PreferedCacheSize(large_size * 10));
2835 EXPECT_EQ((kDefaultSize * 25 - 1) / 10,
2836 disk_cache::PreferedCacheSize(large_size * 25 - 1));
2838 // Region 4: expected = default_size * 2.5
2839 EXPECT_EQ(kDefaultSize * 25 / 10,
2840 disk_cache::PreferedCacheSize(large_size * 25));
2841 EXPECT_EQ(kDefaultSize * 25 / 10,
2842 disk_cache::PreferedCacheSize(large_size * 100 - 1));
2843 EXPECT_EQ(kDefaultSize * 25 / 10,
2844 disk_cache::PreferedCacheSize(large_size * 100));
2845 EXPECT_EQ(kDefaultSize * 25 / 10,
2846 disk_cache::PreferedCacheSize(large_size * 250 - 1));
2848 // Region 5: expected = available * 0.1
2849 EXPECT_EQ(kDefaultSize * 25 / 10,
2850 disk_cache::PreferedCacheSize(large_size * 250));
2851 EXPECT_EQ(kint32max - 1,
2852 disk_cache::PreferedCacheSize(largest_size * 100 - 1));
2854 // Region 6: expected = kint32max
2855 EXPECT_EQ(kint32max,
2856 disk_cache::PreferedCacheSize(largest_size * 100));
2857 EXPECT_EQ(kint32max,
2858 disk_cache::PreferedCacheSize(largest_size * 10000));
2861 // Tests that we can "migrate" a running instance from one experiment group to
2862 // another.
2863 TEST_F(DiskCacheBackendTest, Histograms) {
2864 InitCache();
2865 disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro.
2867 for (int i = 1; i < 3; i++) {
2868 CACHE_UMA(HOURS, "FillupTime", i, 28);
2872 // Make sure that we keep the total memory used by the internal buffers under
2873 // control.
2874 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
2875 InitCache();
2876 std::string key("the first key");
2877 disk_cache::Entry* entry;
2878 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2880 const int kSize = 200;
2881 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
2882 CacheTestFillBuffer(buffer->data(), kSize, true);
2884 for (int i = 0; i < 10; i++) {
2885 SCOPED_TRACE(i);
2886 // Allocate 2MB for this entry.
2887 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
2888 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
2889 EXPECT_EQ(kSize,
2890 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
2891 EXPECT_EQ(kSize,
2892 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
2894 // Delete one of the buffers and truncate the other.
2895 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
2896 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
2898 // Delete the second buffer, writing 10 bytes to disk.
2899 entry->Close();
2900 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2903 entry->Close();
2904 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
2907 // This test assumes at least 150MB of system memory.
2908 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
2909 InitCache();
2911 const int kOneMB = 1024 * 1024;
2912 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
2913 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
2915 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
2916 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
2918 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
2919 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
2921 cache_impl_->BufferDeleted(kOneMB);
2922 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
2924 // Check the upper limit.
2925 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
2927 for (int i = 0; i < 30; i++)
2928 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
2930 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
2933 // Tests that sharing of external files works and we are able to delete the
2934 // files when we need to.
2935 TEST_F(DiskCacheBackendTest, FileSharing) {
2936 InitCache();
2938 disk_cache::Addr address(0x80000001);
2939 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
2940 base::FilePath name = cache_impl_->GetFileName(address);
2942 scoped_refptr<disk_cache::File> file(new disk_cache::File(false));
2943 file->Init(name);
2945 #if defined(OS_WIN)
2946 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
2947 DWORD access = GENERIC_READ | GENERIC_WRITE;
2948 base::win::ScopedHandle file2(CreateFile(
2949 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
2950 EXPECT_FALSE(file2.IsValid());
2952 sharing |= FILE_SHARE_DELETE;
2953 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL,
2954 OPEN_EXISTING, 0, NULL));
2955 EXPECT_TRUE(file2.IsValid());
2956 #endif
2958 EXPECT_TRUE(base::DeleteFile(name, false));
2960 // We should be able to use the file.
2961 const int kSize = 200;
2962 char buffer1[kSize];
2963 char buffer2[kSize];
2964 memset(buffer1, 't', kSize);
2965 memset(buffer2, 0, kSize);
2966 EXPECT_TRUE(file->Write(buffer1, kSize, 0));
2967 EXPECT_TRUE(file->Read(buffer2, kSize, 0));
2968 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
2970 EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
2973 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
2974 InitCache();
2976 disk_cache::Entry* entry;
2978 for (int i = 0; i < 2; ++i) {
2979 std::string key = base::StringPrintf("key%d", i);
2980 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2981 entry->Close();
2984 // Ping the oldest entry.
2985 cache_->OnExternalCacheHit("key0");
2987 TrimForTest(false);
2989 // Make sure the older key remains.
2990 EXPECT_EQ(1, cache_->GetEntryCount());
2991 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
2992 entry->Close();
2995 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
2996 SetCacheType(net::SHADER_CACHE);
2997 InitCache();
2999 disk_cache::Entry* entry;
3001 for (int i = 0; i < 2; ++i) {
3002 std::string key = base::StringPrintf("key%d", i);
3003 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3004 entry->Close();
3007 // Ping the oldest entry.
3008 cache_->OnExternalCacheHit("key0");
3010 TrimForTest(false);
3012 // Make sure the older key remains.
3013 EXPECT_EQ(1, cache_->GetEntryCount());
3014 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3015 entry->Close();
3018 void DiskCacheBackendTest::TracingBackendBasics() {
3019 InitCache();
3020 cache_.reset(new disk_cache::TracingCacheBackend(cache_.Pass()));
3021 cache_impl_ = NULL;
3022 EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
3023 if (!simple_cache_mode_) {
3024 EXPECT_EQ(0, cache_->GetEntryCount());
3027 net::TestCompletionCallback cb;
3028 disk_cache::Entry* entry = NULL;
3029 EXPECT_NE(net::OK, OpenEntry("key", &entry));
3030 EXPECT_TRUE(NULL == entry);
3032 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3033 EXPECT_TRUE(NULL != entry);
3035 disk_cache::Entry* same_entry = NULL;
3036 ASSERT_EQ(net::OK, OpenEntry("key", &same_entry));
3037 EXPECT_TRUE(NULL != same_entry);
3039 if (!simple_cache_mode_) {
3040 EXPECT_EQ(1, cache_->GetEntryCount());
3042 entry->Close();
3043 entry = NULL;
3044 same_entry->Close();
3045 same_entry = NULL;
3048 TEST_F(DiskCacheBackendTest, TracingBackendBasics) {
3049 TracingBackendBasics();
3052 // The simple cache backend isn't intended to work on windows, which has very
3053 // different file system guarantees from Windows.
3054 #if !defined(OS_WIN)
3056 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3057 SetSimpleCacheMode();
3058 BackendBasics();
3061 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3062 SetCacheType(net::APP_CACHE);
3063 SetSimpleCacheMode();
3064 BackendBasics();
3067 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3068 SetSimpleCacheMode();
3069 BackendKeying();
3072 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3073 SetSimpleCacheMode();
3074 SetCacheType(net::APP_CACHE);
3075 BackendKeying();
3078 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) {
3079 SetSimpleCacheMode();
3080 BackendSetSize();
3083 // MacOS has a default open file limit of 256 files, which is incompatible with
3084 // this simple cache test.
3085 #if defined(OS_MACOSX)
3086 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3087 #else
3088 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3089 #endif
3091 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) {
3092 SetMaxSize(0x100000);
3093 SetSimpleCacheMode();
3094 BackendLoad();
3097 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) {
3098 SetCacheType(net::APP_CACHE);
3099 SetSimpleCacheMode();
3100 SetMaxSize(0x100000);
3101 BackendLoad();
3104 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
3105 SetSimpleCacheMode();
3106 BackendDoomRecent();
3109 TEST_F(DiskCacheBackendTest, SimpleDoomBetween) {
3110 SetSimpleCacheMode();
3111 BackendDoomBetween();
3114 // See http://crbug.com/237450.
3115 TEST_F(DiskCacheBackendTest, FLAKY_SimpleCacheDoomAll) {
3116 SetSimpleCacheMode();
3117 BackendDoomAll();
3120 TEST_F(DiskCacheBackendTest, FLAKY_SimpleCacheAppCacheOnlyDoomAll) {
3121 SetCacheType(net::APP_CACHE);
3122 SetSimpleCacheMode();
3123 BackendDoomAll();
3126 TEST_F(DiskCacheBackendTest, SimpleCacheTracingBackendBasics) {
3127 SetSimpleCacheMode();
3128 TracingBackendBasics();
3129 // TODO(pasko): implement integrity checking on the Simple Backend.
3130 DisableIntegrityCheck();
3133 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
3134 SetSimpleCacheMode();
3135 InitCache();
3137 const char* key = "the first key";
3138 disk_cache::Entry* entry = NULL;
3140 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3141 ASSERT_TRUE(entry != NULL);
3142 entry->Close();
3143 entry = NULL;
3145 // To make sure the file creation completed we need to call open again so that
3146 // we block until it actually created the files.
3147 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3148 ASSERT_TRUE(entry != NULL);
3149 entry->Close();
3150 entry = NULL;
3152 // Delete one of the files in the entry.
3153 base::FilePath to_delete_file = cache_path_.AppendASCII(
3154 disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, 0));
3155 EXPECT_TRUE(base::PathExists(to_delete_file));
3156 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
3158 // Failing to open the entry should delete the rest of these files.
3159 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3161 // Confirm the rest of the files are gone.
3162 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
3163 base::FilePath
3164 should_be_gone_file(cache_path_.AppendASCII(
3165 disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, i)));
3166 EXPECT_FALSE(base::PathExists(should_be_gone_file));
3170 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
3171 SetSimpleCacheMode();
3172 InitCache();
3174 const char* key = "the first key";
3175 disk_cache::Entry* entry = NULL;
3177 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3178 disk_cache::Entry* null = NULL;
3179 ASSERT_NE(null, entry);
3180 entry->Close();
3181 entry = NULL;
3183 // To make sure the file creation completed we need to call open again so that
3184 // we block until it actually created the files.
3185 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3186 ASSERT_NE(null, entry);
3187 entry->Close();
3188 entry = NULL;
3190 // Write an invalid header on stream 1.
3191 base::FilePath entry_file1_path = cache_path_.AppendASCII(
3192 disk_cache::simple_util::GetFilenameFromKeyAndIndex(key, 1));
3194 disk_cache::SimpleFileHeader header;
3195 header.initial_magic_number = GG_UINT64_C(0xbadf00d);
3196 EXPECT_EQ(
3197 implicit_cast<int>(sizeof(header)),
3198 file_util::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
3199 sizeof(header)));
3200 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3203 // Tests that the Simple Cache Backend fails to initialize with non-matching
3204 // file structure on disk.
3205 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
3206 // Create a cache structure with the |BackendImpl|.
3207 InitCache();
3208 disk_cache::Entry* entry;
3209 const int kSize = 50;
3210 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3211 CacheTestFillBuffer(buffer->data(), kSize, false);
3212 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3213 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3214 entry->Close();
3215 cache_.reset();
3217 // Check that the |SimpleBackendImpl| does not favor this structure.
3218 base::Thread cache_thread("CacheThread");
3219 ASSERT_TRUE(cache_thread.StartWithOptions(
3220 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3221 disk_cache::SimpleBackendImpl* simple_cache =
3222 new disk_cache::SimpleBackendImpl(cache_path_,
3224 net::DISK_CACHE,
3225 cache_thread.message_loop_proxy().get(),
3226 NULL);
3227 net::TestCompletionCallback cb;
3228 int rv = simple_cache->Init(cb.callback());
3229 EXPECT_NE(net::OK, cb.GetResult(rv));
3230 delete simple_cache;
3231 DisableIntegrityCheck();
3234 // Tests that the |BackendImpl| refuses to initialize on top of the files
3235 // generated by the Simple Cache Backend.
3236 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
3237 // Create a cache structure with the |SimpleBackendImpl|.
3238 SetSimpleCacheMode();
3239 InitCache();
3240 disk_cache::Entry* entry;
3241 const int kSize = 50;
3242 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3243 CacheTestFillBuffer(buffer->data(), kSize, false);
3244 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3245 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3246 entry->Close();
3247 cache_.reset();
3249 // Check that the |BackendImpl| does not favor this structure.
3250 base::Thread cache_thread("CacheThread");
3251 ASSERT_TRUE(cache_thread.StartWithOptions(
3252 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3253 disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(
3254 cache_path_, base::MessageLoopProxy::current().get(), NULL);
3255 cache->SetUnitTestMode();
3256 net::TestCompletionCallback cb;
3257 int rv = cache->Init(cb.callback());
3258 EXPECT_NE(net::OK, cb.GetResult(rv));
3259 delete cache;
3260 DisableIntegrityCheck();
3263 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
3264 SetSimpleCacheMode();
3265 BackendFixEnumerators();
3268 // Creates entries based on random keys. Stores these keys in |key_pool|.
3269 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
3270 std::set<std::string>* key_pool) {
3271 const int kNumEntries = 10;
3273 for (int i = 0; i < kNumEntries; ++i) {
3274 std::string key = GenerateKey(true);
3275 disk_cache::Entry* entry;
3276 if (CreateEntry(key, &entry) != net::OK)
3277 return false;
3278 key_pool->insert(key);
3279 entry->Close();
3281 return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
3284 // Performs iteration over the backend and checks that the keys of entries
3285 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
3286 // will be opened, if it is positive. Otherwise, iteration will continue until
3287 // OpenNextEntry stops returning net::OK.
3288 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
3289 int max_to_open,
3290 void** iter,
3291 std::set<std::string>* keys_to_match,
3292 size_t* count) {
3293 disk_cache::Entry* entry;
3295 while (OpenNextEntry(iter, &entry) == net::OK) {
3296 if (!entry)
3297 return false;
3298 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
3299 entry->Close();
3300 ++(*count);
3301 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
3302 break;
3305 return true;
3308 // Tests basic functionality of the SimpleBackend implementation of the
3309 // enumeration API.
3310 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
3311 SetSimpleCacheMode();
3312 InitCache();
3313 std::set<std::string> key_pool;
3314 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3316 // Check that enumeration returns all entries.
3317 std::set<std::string> keys_to_match(key_pool);
3318 void* iter = NULL;
3319 size_t count = 0;
3320 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3321 cache_->EndEnumeration(&iter);
3322 EXPECT_EQ(key_pool.size(), count);
3323 EXPECT_TRUE(keys_to_match.empty());
3325 // Check that opening entries does not affect enumeration.
3326 keys_to_match = key_pool;
3327 iter = NULL;
3328 count = 0;
3329 disk_cache::Entry* entry_opened_before;
3330 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
3331 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3332 &iter,
3333 &keys_to_match,
3334 &count));
3336 disk_cache::Entry* entry_opened_middle;
3337 ASSERT_EQ(net::OK,
3338 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
3339 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3340 cache_->EndEnumeration(&iter);
3341 entry_opened_before->Close();
3342 entry_opened_middle->Close();
3344 EXPECT_EQ(key_pool.size(), count);
3345 EXPECT_TRUE(keys_to_match.empty());
3348 // Tests that the enumerations are not affected by dooming an entry in the
3349 // middle.
3350 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
3351 SetSimpleCacheMode();
3352 InitCache();
3353 std::set<std::string> key_pool;
3354 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3356 // Check that enumeration returns all entries but the doomed one.
3357 std::set<std::string> keys_to_match(key_pool);
3358 void* iter = NULL;
3359 size_t count = 0;
3360 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3361 &iter,
3362 &keys_to_match,
3363 &count));
3365 std::string key_to_delete = *(keys_to_match.begin());
3366 DoomEntry(key_to_delete);
3367 keys_to_match.erase(key_to_delete);
3368 key_pool.erase(key_to_delete);
3369 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3370 cache_->EndEnumeration(&iter);
3372 EXPECT_EQ(key_pool.size(), count);
3373 EXPECT_TRUE(keys_to_match.empty());
3376 // Tests that enumerations are not affected by corrupt files.
3377 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
3378 SetSimpleCacheMode();
3379 InitCache();
3380 std::set<std::string> key_pool;
3381 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3383 // Create a corrupt entry. The write/read sequence ensures that the entry will
3384 // have been created before corrupting the platform files, in the case of
3385 // optimistic operations.
3386 const std::string key = "the key";
3387 disk_cache::Entry* corrupted_entry;
3389 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
3390 ASSERT_TRUE(corrupted_entry);
3391 const int kSize = 50;
3392 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3393 CacheTestFillBuffer(buffer->data(), kSize, false);
3394 ASSERT_EQ(kSize,
3395 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
3396 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
3397 corrupted_entry->Close();
3399 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3400 key, cache_path_));
3401 EXPECT_EQ(key_pool.size() + 1,
3402 implicit_cast<size_t>(cache_->GetEntryCount()));
3404 // Check that enumeration returns all entries but the corrupt one.
3405 std::set<std::string> keys_to_match(key_pool);
3406 void* iter = NULL;
3407 size_t count = 0;
3408 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3409 cache_->EndEnumeration(&iter);
3411 EXPECT_EQ(key_pool.size(), count);
3412 EXPECT_TRUE(keys_to_match.empty());
3415 #endif // !defined(OS_WIN)