Disable view source for Developer Tools.
[chromium-blink-merge.git] / net / disk_cache / backend_unittest.cc
blob02d3b681dc6694dd627bd2ab56b1c8e368c9bbdd
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/file_util.h"
7 #include "base/metrics/field_trial.h"
8 #include "base/port.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
12 #include "base/threading/platform_thread.h"
13 #include "base/threading/thread_restrictions.h"
14 #include "net/base/cache_type.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/net_errors.h"
17 #include "net/base/test_completion_callback.h"
18 #include "net/disk_cache/backend_impl.h"
19 #include "net/disk_cache/cache_util.h"
20 #include "net/disk_cache/disk_cache_test_base.h"
21 #include "net/disk_cache/disk_cache_test_util.h"
22 #include "net/disk_cache/entry_impl.h"
23 #include "net/disk_cache/experiments.h"
24 #include "net/disk_cache/histogram_macros.h"
25 #include "net/disk_cache/mapped_file.h"
26 #include "net/disk_cache/mem_backend_impl.h"
27 #include "net/disk_cache/simple/simple_backend_impl.h"
28 #include "net/disk_cache/simple/simple_entry_format.h"
29 #include "net/disk_cache/simple/simple_test_util.h"
30 #include "net/disk_cache/simple/simple_util.h"
31 #include "net/disk_cache/tracing_cache_backend.h"
32 #include "testing/gtest/include/gtest/gtest.h"
34 #if defined(OS_WIN)
35 #include "base/win/scoped_handle.h"
36 #endif
38 using base::Time;
40 namespace {
42 const char kExistingEntryKey[] = "existing entry key";
44 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
45 const base::Thread& cache_thread,
46 base::FilePath& cache_path) {
47 net::TestCompletionCallback cb;
49 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
50 cache_path, cache_thread.message_loop_proxy(), NULL));
51 int rv = cache->Init(cb.callback());
52 if (cb.GetResult(rv) != net::OK)
53 return scoped_ptr<disk_cache::BackendImpl>();
55 disk_cache::Entry* entry = NULL;
56 rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
57 if (cb.GetResult(rv) != net::OK)
58 return scoped_ptr<disk_cache::BackendImpl>();
59 entry->Close();
61 return cache.Pass();
64 } // namespace
66 // Tests that can run with different types of caches.
67 class DiskCacheBackendTest : public DiskCacheTestWithCache {
68 protected:
69 // Some utility methods:
71 // Perform IO operations on the cache until there is pending IO.
72 int GeneratePendingIO(net::TestCompletionCallback* cb);
74 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
75 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
76 // There are 4 entries after doomed_start and 2 after doomed_end.
77 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
79 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
80 bool EnumerateAndMatchKeys(int max_to_open,
81 void** iter,
82 std::set<std::string>* keys_to_match,
83 size_t* count);
85 // Actual tests:
86 void BackendBasics();
87 void BackendKeying();
88 void BackendShutdownWithPendingFileIO(bool fast);
89 void BackendShutdownWithPendingIO(bool fast);
90 void BackendShutdownWithPendingCreate(bool fast);
91 void BackendSetSize();
92 void BackendLoad();
93 void BackendChain();
94 void BackendValidEntry();
95 void BackendInvalidEntry();
96 void BackendInvalidEntryRead();
97 void BackendInvalidEntryWithLoad();
98 void BackendTrimInvalidEntry();
99 void BackendTrimInvalidEntry2();
100 void BackendEnumerations();
101 void BackendEnumerations2();
102 void BackendInvalidEntryEnumeration();
103 void BackendFixEnumerators();
104 void BackendDoomRecent();
105 void BackendDoomBetween();
106 void BackendTransaction(const std::string& name, int num_entries, bool load);
107 void BackendRecoverInsert();
108 void BackendRecoverRemove();
109 void BackendRecoverWithEviction();
110 void BackendInvalidEntry2();
111 void BackendInvalidEntry3();
112 void BackendInvalidEntry7();
113 void BackendInvalidEntry8();
114 void BackendInvalidEntry9(bool eviction);
115 void BackendInvalidEntry10(bool eviction);
116 void BackendInvalidEntry11(bool eviction);
117 void BackendTrimInvalidEntry12();
118 void BackendDoomAll();
119 void BackendDoomAll2();
120 void BackendInvalidRankings();
121 void BackendInvalidRankings2();
122 void BackendDisable();
123 void BackendDisable2();
124 void BackendDisable3();
125 void BackendDisable4();
126 void TracingBackendBasics();
129 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
130 if (!use_current_thread_) {
131 ADD_FAILURE();
132 return net::ERR_FAILED;
135 disk_cache::Entry* entry;
136 int rv = cache_->CreateEntry("some key", &entry, cb->callback());
137 if (cb->GetResult(rv) != net::OK)
138 return net::ERR_CACHE_CREATE_FAILURE;
140 const int kSize = 25000;
141 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
142 CacheTestFillBuffer(buffer->data(), kSize, false);
144 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
145 // We are using the current thread as the cache thread because we want to
146 // be able to call directly this method to make sure that the OS (instead
147 // of us switching thread) is returning IO pending.
148 if (!simple_cache_mode_) {
149 rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
150 0, i, buffer.get(), kSize, cb->callback(), false);
151 } else {
152 rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
155 if (rv == net::ERR_IO_PENDING)
156 break;
157 if (rv != kSize)
158 rv = net::ERR_FAILED;
161 // Don't call Close() to avoid going through the queue or we'll deadlock
162 // waiting for the operation to finish.
163 if (!simple_cache_mode_)
164 static_cast<disk_cache::EntryImpl*>(entry)->Release();
165 else
166 entry->Close();
168 return rv;
171 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
172 base::Time* doomed_end) {
173 InitCache();
175 const int kSize = 50;
176 // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
177 const int kOffset = 10 + 1024 * 1024;
179 disk_cache::Entry* entry0 = NULL;
180 disk_cache::Entry* entry1 = NULL;
181 disk_cache::Entry* entry2 = NULL;
183 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
184 CacheTestFillBuffer(buffer->data(), kSize, false);
186 ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
187 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
188 ASSERT_EQ(kSize,
189 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
190 entry0->Close();
192 FlushQueueForTest();
193 AddDelay();
194 if (doomed_start)
195 *doomed_start = base::Time::Now();
197 // Order in rankings list:
198 // first_part1, first_part2, second_part1, second_part2
199 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
200 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
201 ASSERT_EQ(kSize,
202 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
203 entry1->Close();
205 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
206 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
207 ASSERT_EQ(kSize,
208 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
209 entry2->Close();
211 FlushQueueForTest();
212 AddDelay();
213 if (doomed_end)
214 *doomed_end = base::Time::Now();
216 // Order in rankings list:
217 // third_part1, fourth_part1, third_part2, fourth_part2
218 disk_cache::Entry* entry3 = NULL;
219 disk_cache::Entry* entry4 = NULL;
220 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
221 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
222 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
223 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
224 ASSERT_EQ(kSize,
225 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
226 ASSERT_EQ(kSize,
227 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
228 entry3->Close();
229 entry4->Close();
231 FlushQueueForTest();
232 AddDelay();
235 // Creates entries based on random keys. Stores these keys in |key_pool|.
236 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
237 std::set<std::string>* key_pool) {
238 const int kNumEntries = 10;
240 for (int i = 0; i < kNumEntries; ++i) {
241 std::string key = GenerateKey(true);
242 disk_cache::Entry* entry;
243 if (CreateEntry(key, &entry) != net::OK)
244 return false;
245 key_pool->insert(key);
246 entry->Close();
248 return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
251 // Performs iteration over the backend and checks that the keys of entries
252 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
253 // will be opened, if it is positive. Otherwise, iteration will continue until
254 // OpenNextEntry stops returning net::OK.
255 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
256 int max_to_open,
257 void** iter,
258 std::set<std::string>* keys_to_match,
259 size_t* count) {
260 disk_cache::Entry* entry;
262 while (OpenNextEntry(iter, &entry) == net::OK) {
263 if (!entry)
264 return false;
265 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
266 entry->Close();
267 ++(*count);
268 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
269 break;
272 return true;
275 void DiskCacheBackendTest::BackendBasics() {
276 InitCache();
277 disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
278 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
279 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
280 ASSERT_TRUE(NULL != entry1);
281 entry1->Close();
282 entry1 = NULL;
284 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
285 ASSERT_TRUE(NULL != entry1);
286 entry1->Close();
287 entry1 = NULL;
289 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
290 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
291 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
292 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
293 ASSERT_TRUE(NULL != entry1);
294 ASSERT_TRUE(NULL != entry2);
295 EXPECT_EQ(2, cache_->GetEntryCount());
297 disk_cache::Entry* entry3 = NULL;
298 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3));
299 ASSERT_TRUE(NULL != entry3);
300 EXPECT_TRUE(entry2 == entry3);
301 EXPECT_EQ(2, cache_->GetEntryCount());
303 EXPECT_EQ(net::OK, DoomEntry("some other key"));
304 EXPECT_EQ(1, cache_->GetEntryCount());
305 entry1->Close();
306 entry2->Close();
307 entry3->Close();
309 EXPECT_EQ(net::OK, DoomEntry("the first key"));
310 EXPECT_EQ(0, cache_->GetEntryCount());
312 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
313 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
314 entry1->Doom();
315 entry1->Close();
316 EXPECT_EQ(net::OK, DoomEntry("some other key"));
317 EXPECT_EQ(0, cache_->GetEntryCount());
318 entry2->Close();
321 TEST_F(DiskCacheBackendTest, Basics) {
322 BackendBasics();
325 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
326 SetNewEviction();
327 BackendBasics();
330 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
331 SetMemoryOnlyMode();
332 BackendBasics();
335 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
336 SetCacheType(net::APP_CACHE);
337 BackendBasics();
340 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
341 SetCacheType(net::SHADER_CACHE);
342 BackendBasics();
345 void DiskCacheBackendTest::BackendKeying() {
346 InitCache();
347 const char* kName1 = "the first key";
348 const char* kName2 = "the first Key";
349 disk_cache::Entry *entry1, *entry2;
350 ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1));
352 ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2));
353 EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
354 entry2->Close();
356 char buffer[30];
357 base::strlcpy(buffer, kName1, arraysize(buffer));
358 ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2));
359 EXPECT_TRUE(entry1 == entry2);
360 entry2->Close();
362 base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
363 ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2));
364 EXPECT_TRUE(entry1 == entry2);
365 entry2->Close();
367 base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3);
368 ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2));
369 EXPECT_TRUE(entry1 == entry2);
370 entry2->Close();
372 // Now verify long keys.
373 char buffer2[20000];
374 memset(buffer2, 's', sizeof(buffer2));
375 buffer2[1023] = '\0';
376 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
377 entry2->Close();
379 buffer2[1023] = 'g';
380 buffer2[19999] = '\0';
381 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
382 entry2->Close();
383 entry1->Close();
386 TEST_F(DiskCacheBackendTest, Keying) {
387 BackendKeying();
390 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
391 SetNewEviction();
392 BackendKeying();
395 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
396 SetMemoryOnlyMode();
397 BackendKeying();
400 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
401 SetCacheType(net::APP_CACHE);
402 BackendKeying();
405 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
406 SetCacheType(net::SHADER_CACHE);
407 BackendKeying();
410 TEST_F(DiskCacheTest, CreateBackend) {
411 net::TestCompletionCallback cb;
414 ASSERT_TRUE(CleanupCacheDir());
415 base::Thread cache_thread("CacheThread");
416 ASSERT_TRUE(cache_thread.StartWithOptions(
417 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
419 // Test the private factory method(s).
420 scoped_ptr<disk_cache::Backend> cache;
421 cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
422 ASSERT_TRUE(cache.get());
423 cache.reset();
425 // Now test the public API.
426 int rv =
427 disk_cache::CreateCacheBackend(net::DISK_CACHE,
428 net::CACHE_BACKEND_DEFAULT,
429 cache_path_,
431 false,
432 cache_thread.message_loop_proxy().get(),
433 NULL,
434 &cache,
435 cb.callback());
436 ASSERT_EQ(net::OK, cb.GetResult(rv));
437 ASSERT_TRUE(cache.get());
438 cache.reset();
440 rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
441 net::CACHE_BACKEND_DEFAULT,
442 base::FilePath(), 0,
443 false, NULL, NULL, &cache,
444 cb.callback());
445 ASSERT_EQ(net::OK, cb.GetResult(rv));
446 ASSERT_TRUE(cache.get());
447 cache.reset();
450 base::MessageLoop::current()->RunUntilIdle();
453 // Tests that |BackendImpl| fails to initialize with a missing file.
454 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
455 ASSERT_TRUE(CopyTestCache("bad_entry"));
456 base::FilePath filename = cache_path_.AppendASCII("data_1");
457 base::DeleteFile(filename, false);
458 base::Thread cache_thread("CacheThread");
459 ASSERT_TRUE(cache_thread.StartWithOptions(
460 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
461 net::TestCompletionCallback cb;
463 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
464 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
465 cache_path_, cache_thread.message_loop_proxy().get(), NULL));
466 int rv = cache->Init(cb.callback());
467 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
468 base::ThreadRestrictions::SetIOAllowed(prev);
470 cache.reset();
471 DisableIntegrityCheck();
474 TEST_F(DiskCacheBackendTest, ExternalFiles) {
475 InitCache();
476 // First, let's create a file on the folder.
477 base::FilePath filename = cache_path_.AppendASCII("f_000001");
479 const int kSize = 50;
480 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
481 CacheTestFillBuffer(buffer1->data(), kSize, false);
482 ASSERT_EQ(kSize, file_util::WriteFile(filename, buffer1->data(), kSize));
484 // Now let's create a file with the cache.
485 disk_cache::Entry* entry;
486 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
487 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
488 entry->Close();
490 // And verify that the first file is still there.
491 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
492 ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
493 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
496 // Tests that we deal with file-level pending operations at destruction time.
497 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
498 ASSERT_TRUE(CleanupCacheDir());
499 uint32 flags = disk_cache::kNoBuffering;
500 if (!fast)
501 flags |= disk_cache::kNoRandom;
503 UseCurrentThread();
504 CreateBackend(flags, NULL);
506 net::TestCompletionCallback cb;
507 int rv = GeneratePendingIO(&cb);
509 // The cache destructor will see one pending operation here.
510 cache_.reset();
512 if (rv == net::ERR_IO_PENDING) {
513 if (fast || simple_cache_mode_)
514 EXPECT_FALSE(cb.have_result());
515 else
516 EXPECT_TRUE(cb.have_result());
519 base::MessageLoop::current()->RunUntilIdle();
521 #if !defined(OS_IOS)
522 // Wait for the actual operation to complete, or we'll keep a file handle that
523 // may cause issues later. Note that on iOS systems even though this test
524 // uses a single thread, the actual IO is posted to a worker thread and the
525 // cache destructor breaks the link to reach cb when the operation completes.
526 rv = cb.GetResult(rv);
527 #endif
530 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
531 BackendShutdownWithPendingFileIO(false);
534 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
535 // builds because they contain a lot of intentional memory leaks.
536 // The wrapper scripts used to run tests under Valgrind Memcheck and
537 // Heapchecker will also disable these tests under those tools. See:
538 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
539 // tools/heapcheck/net_unittests.gtest-heapcheck.txt
540 #if !defined(LEAK_SANITIZER)
541 // We'll be leaking from this test.
542 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
543 // The integrity test sets kNoRandom so there's a version mismatch if we don't
544 // force new eviction.
545 SetNewEviction();
546 BackendShutdownWithPendingFileIO(true);
548 #endif
550 // See crbug.com/330074
551 #if !defined(OS_IOS)
552 // Tests that one cache instance is not affected by another one going away.
553 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
554 base::ScopedTempDir store;
555 ASSERT_TRUE(store.CreateUniqueTempDir());
557 net::TestCompletionCallback cb;
558 scoped_ptr<disk_cache::Backend> extra_cache;
559 int rv = disk_cache::CreateCacheBackend(
560 net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, store.path(), 0,
561 false, base::MessageLoopProxy::current().get(), NULL,
562 &extra_cache, cb.callback());
563 ASSERT_EQ(net::OK, cb.GetResult(rv));
564 ASSERT_TRUE(extra_cache.get() != NULL);
566 ASSERT_TRUE(CleanupCacheDir());
567 SetNewEviction(); // Match the expected behavior for integrity verification.
568 UseCurrentThread();
570 CreateBackend(disk_cache::kNoBuffering, NULL);
571 rv = GeneratePendingIO(&cb);
573 // cache_ has a pending operation, and extra_cache will go away.
574 extra_cache.reset();
576 if (rv == net::ERR_IO_PENDING)
577 EXPECT_FALSE(cb.have_result());
579 base::MessageLoop::current()->RunUntilIdle();
581 // Wait for the actual operation to complete, or we'll keep a file handle that
582 // may cause issues later.
583 rv = cb.GetResult(rv);
585 #endif
587 // Tests that we deal with background-thread pending operations.
588 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
589 net::TestCompletionCallback cb;
592 ASSERT_TRUE(CleanupCacheDir());
593 base::Thread cache_thread("CacheThread");
594 ASSERT_TRUE(cache_thread.StartWithOptions(
595 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
597 uint32 flags = disk_cache::kNoBuffering;
598 if (!fast)
599 flags |= disk_cache::kNoRandom;
601 CreateBackend(flags, &cache_thread);
603 disk_cache::Entry* entry;
604 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
605 ASSERT_EQ(net::OK, cb.GetResult(rv));
607 entry->Close();
609 // The cache destructor will see one pending operation here.
610 cache_.reset();
613 base::MessageLoop::current()->RunUntilIdle();
616 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
617 BackendShutdownWithPendingIO(false);
620 #if !defined(LEAK_SANITIZER)
621 // We'll be leaking from this test.
622 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
623 // The integrity test sets kNoRandom so there's a version mismatch if we don't
624 // force new eviction.
625 SetNewEviction();
626 BackendShutdownWithPendingIO(true);
628 #endif
630 // Tests that we deal with create-type pending operations.
631 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
632 net::TestCompletionCallback cb;
635 ASSERT_TRUE(CleanupCacheDir());
636 base::Thread cache_thread("CacheThread");
637 ASSERT_TRUE(cache_thread.StartWithOptions(
638 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
640 disk_cache::BackendFlags flags =
641 fast ? disk_cache::kNone : disk_cache::kNoRandom;
642 CreateBackend(flags, &cache_thread);
644 disk_cache::Entry* entry;
645 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
646 ASSERT_EQ(net::ERR_IO_PENDING, rv);
648 cache_.reset();
649 EXPECT_FALSE(cb.have_result());
652 base::MessageLoop::current()->RunUntilIdle();
655 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
656 BackendShutdownWithPendingCreate(false);
659 #if !defined(LEAK_SANITIZER)
660 // We'll be leaking an entry from this test.
661 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
662 // The integrity test sets kNoRandom so there's a version mismatch if we don't
663 // force new eviction.
664 SetNewEviction();
665 BackendShutdownWithPendingCreate(true);
667 #endif
669 TEST_F(DiskCacheTest, TruncatedIndex) {
670 ASSERT_TRUE(CleanupCacheDir());
671 base::FilePath index = cache_path_.AppendASCII("index");
672 ASSERT_EQ(5, file_util::WriteFile(index, "hello", 5));
674 base::Thread cache_thread("CacheThread");
675 ASSERT_TRUE(cache_thread.StartWithOptions(
676 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
677 net::TestCompletionCallback cb;
679 scoped_ptr<disk_cache::Backend> backend;
680 int rv =
681 disk_cache::CreateCacheBackend(net::DISK_CACHE,
682 net::CACHE_BACKEND_BLOCKFILE,
683 cache_path_,
685 false,
686 cache_thread.message_loop_proxy().get(),
687 NULL,
688 &backend,
689 cb.callback());
690 ASSERT_NE(net::OK, cb.GetResult(rv));
692 ASSERT_FALSE(backend);
695 void DiskCacheBackendTest::BackendSetSize() {
696 const int cache_size = 0x10000; // 64 kB
697 SetMaxSize(cache_size);
698 InitCache();
700 std::string first("some key");
701 std::string second("something else");
702 disk_cache::Entry* entry;
703 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
705 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
706 memset(buffer->data(), 0, cache_size);
707 EXPECT_EQ(cache_size / 10,
708 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
709 << "normal file";
711 EXPECT_EQ(net::ERR_FAILED,
712 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
713 << "file size above the limit";
715 // By doubling the total size, we make this file cacheable.
716 SetMaxSize(cache_size * 2);
717 EXPECT_EQ(cache_size / 5,
718 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
720 // Let's fill up the cache!.
721 SetMaxSize(cache_size * 10);
722 EXPECT_EQ(cache_size * 3 / 4,
723 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
724 entry->Close();
725 FlushQueueForTest();
727 SetMaxSize(cache_size);
729 // The cache is 95% full.
731 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
732 EXPECT_EQ(cache_size / 10,
733 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
735 disk_cache::Entry* entry2;
736 ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
737 EXPECT_EQ(cache_size / 10,
738 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
739 entry2->Close(); // This will trigger the cache trim.
741 EXPECT_NE(net::OK, OpenEntry(first, &entry2));
743 FlushQueueForTest(); // Make sure that we are done trimming the cache.
744 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
746 entry->Close();
747 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
748 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
749 entry->Close();
752 TEST_F(DiskCacheBackendTest, SetSize) {
753 BackendSetSize();
756 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
757 SetNewEviction();
758 BackendSetSize();
761 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
762 SetMemoryOnlyMode();
763 BackendSetSize();
766 void DiskCacheBackendTest::BackendLoad() {
767 InitCache();
768 int seed = static_cast<int>(Time::Now().ToInternalValue());
769 srand(seed);
771 disk_cache::Entry* entries[100];
772 for (int i = 0; i < 100; i++) {
773 std::string key = GenerateKey(true);
774 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
776 EXPECT_EQ(100, cache_->GetEntryCount());
778 for (int i = 0; i < 100; i++) {
779 int source1 = rand() % 100;
780 int source2 = rand() % 100;
781 disk_cache::Entry* temp = entries[source1];
782 entries[source1] = entries[source2];
783 entries[source2] = temp;
786 for (int i = 0; i < 100; i++) {
787 disk_cache::Entry* entry;
788 ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry));
789 EXPECT_TRUE(entry == entries[i]);
790 entry->Close();
791 entries[i]->Doom();
792 entries[i]->Close();
794 FlushQueueForTest();
795 EXPECT_EQ(0, cache_->GetEntryCount());
798 TEST_F(DiskCacheBackendTest, Load) {
799 // Work with a tiny index table (16 entries)
800 SetMask(0xf);
801 SetMaxSize(0x100000);
802 BackendLoad();
805 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
806 SetNewEviction();
807 // Work with a tiny index table (16 entries)
808 SetMask(0xf);
809 SetMaxSize(0x100000);
810 BackendLoad();
813 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
814 SetMaxSize(0x100000);
815 SetMemoryOnlyMode();
816 BackendLoad();
819 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
820 SetCacheType(net::APP_CACHE);
821 // Work with a tiny index table (16 entries)
822 SetMask(0xf);
823 SetMaxSize(0x100000);
824 BackendLoad();
827 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
828 SetCacheType(net::SHADER_CACHE);
829 // Work with a tiny index table (16 entries)
830 SetMask(0xf);
831 SetMaxSize(0x100000);
832 BackendLoad();
835 // Tests the chaining of an entry to the current head.
836 void DiskCacheBackendTest::BackendChain() {
837 SetMask(0x1); // 2-entry table.
838 SetMaxSize(0x3000); // 12 kB.
839 InitCache();
841 disk_cache::Entry* entry;
842 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
843 entry->Close();
844 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry));
845 entry->Close();
848 TEST_F(DiskCacheBackendTest, Chain) {
849 BackendChain();
852 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
853 SetNewEviction();
854 BackendChain();
857 TEST_F(DiskCacheBackendTest, AppCacheChain) {
858 SetCacheType(net::APP_CACHE);
859 BackendChain();
862 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
863 SetCacheType(net::SHADER_CACHE);
864 BackendChain();
867 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
868 SetNewEviction();
869 InitCache();
871 disk_cache::Entry* entry;
872 for (int i = 0; i < 100; i++) {
873 std::string name(base::StringPrintf("Key %d", i));
874 ASSERT_EQ(net::OK, CreateEntry(name, &entry));
875 entry->Close();
876 if (i < 90) {
877 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
878 ASSERT_EQ(net::OK, OpenEntry(name, &entry));
879 entry->Close();
883 // The first eviction must come from list 1 (10% limit), the second must come
884 // from list 0.
885 TrimForTest(false);
886 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
887 TrimForTest(false);
888 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
890 // Double check that we still have the list tails.
891 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry));
892 entry->Close();
893 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry));
894 entry->Close();
897 // Before looking for invalid entries, let's check a valid entry.
898 void DiskCacheBackendTest::BackendValidEntry() {
899 InitCache();
901 std::string key("Some key");
902 disk_cache::Entry* entry;
903 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
905 const int kSize = 50;
906 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
907 memset(buffer1->data(), 0, kSize);
908 base::strlcpy(buffer1->data(), "And the data to save", kSize);
909 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
910 entry->Close();
911 SimulateCrash();
913 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
915 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
916 memset(buffer2->data(), 0, kSize);
917 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
918 entry->Close();
919 EXPECT_STREQ(buffer1->data(), buffer2->data());
922 TEST_F(DiskCacheBackendTest, ValidEntry) {
923 BackendValidEntry();
926 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
927 SetNewEviction();
928 BackendValidEntry();
931 // The same logic of the previous test (ValidEntry), but this time force the
932 // entry to be invalid, simulating a crash in the middle.
933 // We'll be leaking memory from this test.
934 void DiskCacheBackendTest::BackendInvalidEntry() {
935 InitCache();
937 std::string key("Some key");
938 disk_cache::Entry* entry;
939 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
941 const int kSize = 50;
942 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
943 memset(buffer->data(), 0, kSize);
944 base::strlcpy(buffer->data(), "And the data to save", kSize);
945 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
946 SimulateCrash();
948 EXPECT_NE(net::OK, OpenEntry(key, &entry));
949 EXPECT_EQ(0, cache_->GetEntryCount());
952 #if !defined(LEAK_SANITIZER)
953 // We'll be leaking memory from this test.
954 TEST_F(DiskCacheBackendTest, InvalidEntry) {
955 BackendInvalidEntry();
958 // We'll be leaking memory from this test.
959 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
960 SetNewEviction();
961 BackendInvalidEntry();
964 // We'll be leaking memory from this test.
965 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
966 SetCacheType(net::APP_CACHE);
967 BackendInvalidEntry();
970 // We'll be leaking memory from this test.
971 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
972 SetCacheType(net::SHADER_CACHE);
973 BackendInvalidEntry();
976 // Almost the same test, but this time crash the cache after reading an entry.
977 // We'll be leaking memory from this test.
978 void DiskCacheBackendTest::BackendInvalidEntryRead() {
979 InitCache();
981 std::string key("Some key");
982 disk_cache::Entry* entry;
983 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
985 const int kSize = 50;
986 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
987 memset(buffer->data(), 0, kSize);
988 base::strlcpy(buffer->data(), "And the data to save", kSize);
989 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
990 entry->Close();
991 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
992 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
994 SimulateCrash();
996 if (type_ == net::APP_CACHE) {
997 // Reading an entry and crashing should not make it dirty.
998 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
999 EXPECT_EQ(1, cache_->GetEntryCount());
1000 entry->Close();
1001 } else {
1002 EXPECT_NE(net::OK, OpenEntry(key, &entry));
1003 EXPECT_EQ(0, cache_->GetEntryCount());
1007 // We'll be leaking memory from this test.
1008 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1009 BackendInvalidEntryRead();
1012 // We'll be leaking memory from this test.
1013 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1014 SetNewEviction();
1015 BackendInvalidEntryRead();
1018 // We'll be leaking memory from this test.
1019 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1020 SetCacheType(net::APP_CACHE);
1021 BackendInvalidEntryRead();
1024 // We'll be leaking memory from this test.
1025 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1026 SetCacheType(net::SHADER_CACHE);
1027 BackendInvalidEntryRead();
1030 // We'll be leaking memory from this test.
1031 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1032 // Work with a tiny index table (16 entries)
1033 SetMask(0xf);
1034 SetMaxSize(0x100000);
1035 InitCache();
1037 int seed = static_cast<int>(Time::Now().ToInternalValue());
1038 srand(seed);
1040 const int kNumEntries = 100;
1041 disk_cache::Entry* entries[kNumEntries];
1042 for (int i = 0; i < kNumEntries; i++) {
1043 std::string key = GenerateKey(true);
1044 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
1046 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1048 for (int i = 0; i < kNumEntries; i++) {
1049 int source1 = rand() % kNumEntries;
1050 int source2 = rand() % kNumEntries;
1051 disk_cache::Entry* temp = entries[source1];
1052 entries[source1] = entries[source2];
1053 entries[source2] = temp;
1056 std::string keys[kNumEntries];
1057 for (int i = 0; i < kNumEntries; i++) {
1058 keys[i] = entries[i]->GetKey();
1059 if (i < kNumEntries / 2)
1060 entries[i]->Close();
1063 SimulateCrash();
1065 for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1066 disk_cache::Entry* entry;
1067 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1070 for (int i = 0; i < kNumEntries / 2; i++) {
1071 disk_cache::Entry* entry;
1072 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry));
1073 entry->Close();
1076 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1079 // We'll be leaking memory from this test.
1080 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1081 BackendInvalidEntryWithLoad();
1084 // We'll be leaking memory from this test.
1085 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1086 SetNewEviction();
1087 BackendInvalidEntryWithLoad();
1090 // We'll be leaking memory from this test.
1091 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1092 SetCacheType(net::APP_CACHE);
1093 BackendInvalidEntryWithLoad();
1096 // We'll be leaking memory from this test.
1097 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1098 SetCacheType(net::SHADER_CACHE);
1099 BackendInvalidEntryWithLoad();
1102 // We'll be leaking memory from this test.
1103 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1104 const int kSize = 0x3000; // 12 kB
1105 SetMaxSize(kSize * 10);
1106 InitCache();
1108 std::string first("some key");
1109 std::string second("something else");
1110 disk_cache::Entry* entry;
1111 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
1113 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1114 memset(buffer->data(), 0, kSize);
1115 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1117 // Simulate a crash.
1118 SimulateCrash();
1120 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
1121 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1123 EXPECT_EQ(2, cache_->GetEntryCount());
1124 SetMaxSize(kSize);
1125 entry->Close(); // Trim the cache.
1126 FlushQueueForTest();
1128 // If we evicted the entry in less than 20mS, we have one entry in the cache;
1129 // if it took more than that, we posted a task and we'll delete the second
1130 // entry too.
1131 base::MessageLoop::current()->RunUntilIdle();
1133 // This may be not thread-safe in general, but for now it's OK so add some
1134 // ThreadSanitizer annotations to ignore data races on cache_.
1135 // See http://crbug.com/55970
1136 ANNOTATE_IGNORE_READS_BEGIN();
1137 EXPECT_GE(1, cache_->GetEntryCount());
1138 ANNOTATE_IGNORE_READS_END();
1140 EXPECT_NE(net::OK, OpenEntry(first, &entry));
1143 // We'll be leaking memory from this test.
1144 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1145 BackendTrimInvalidEntry();
1148 // We'll be leaking memory from this test.
1149 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1150 SetNewEviction();
1151 BackendTrimInvalidEntry();
1154 // We'll be leaking memory from this test.
1155 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1156 SetMask(0xf); // 16-entry table.
1158 const int kSize = 0x3000; // 12 kB
1159 SetMaxSize(kSize * 40);
1160 InitCache();
1162 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1163 memset(buffer->data(), 0, kSize);
1164 disk_cache::Entry* entry;
1166 // Writing 32 entries to this cache chains most of them.
1167 for (int i = 0; i < 32; i++) {
1168 std::string key(base::StringPrintf("some key %d", i));
1169 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1170 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1171 entry->Close();
1172 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1173 // Note that we are not closing the entries.
1176 // Simulate a crash.
1177 SimulateCrash();
1179 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
1180 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1182 FlushQueueForTest();
1183 EXPECT_EQ(33, cache_->GetEntryCount());
1184 SetMaxSize(kSize);
1186 // For the new eviction code, all corrupt entries are on the second list so
1187 // they are not going away that easy.
1188 if (new_eviction_) {
1189 EXPECT_EQ(net::OK, DoomAllEntries());
1192 entry->Close(); // Trim the cache.
1193 FlushQueueForTest();
1195 // We may abort the eviction before cleaning up everything.
1196 base::MessageLoop::current()->RunUntilIdle();
1197 FlushQueueForTest();
1198 // If it's not clear enough: we may still have eviction tasks running at this
1199 // time, so the number of entries is changing while we read it.
1200 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1201 EXPECT_GE(30, cache_->GetEntryCount());
1202 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1205 // We'll be leaking memory from this test.
1206 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1207 BackendTrimInvalidEntry2();
1210 // We'll be leaking memory from this test.
1211 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1212 SetNewEviction();
1213 BackendTrimInvalidEntry2();
1215 #endif // !defined(LEAK_SANITIZER)
1217 void DiskCacheBackendTest::BackendEnumerations() {
1218 InitCache();
1219 Time initial = Time::Now();
1221 const int kNumEntries = 100;
1222 for (int i = 0; i < kNumEntries; i++) {
1223 std::string key = GenerateKey(true);
1224 disk_cache::Entry* entry;
1225 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1226 entry->Close();
1228 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1229 Time final = Time::Now();
1231 disk_cache::Entry* entry;
1232 void* iter = NULL;
1233 int count = 0;
1234 Time last_modified[kNumEntries];
1235 Time last_used[kNumEntries];
1236 while (OpenNextEntry(&iter, &entry) == net::OK) {
1237 ASSERT_TRUE(NULL != entry);
1238 if (count < kNumEntries) {
1239 last_modified[count] = entry->GetLastModified();
1240 last_used[count] = entry->GetLastUsed();
1241 EXPECT_TRUE(initial <= last_modified[count]);
1242 EXPECT_TRUE(final >= last_modified[count]);
1245 entry->Close();
1246 count++;
1248 EXPECT_EQ(kNumEntries, count);
1250 iter = NULL;
1251 count = 0;
1252 // The previous enumeration should not have changed the timestamps.
1253 while (OpenNextEntry(&iter, &entry) == net::OK) {
1254 ASSERT_TRUE(NULL != entry);
1255 if (count < kNumEntries) {
1256 EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1257 EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1259 entry->Close();
1260 count++;
1262 EXPECT_EQ(kNumEntries, count);
1265 TEST_F(DiskCacheBackendTest, Enumerations) {
1266 BackendEnumerations();
1269 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1270 SetNewEviction();
1271 BackendEnumerations();
1274 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1275 SetMemoryOnlyMode();
1276 BackendEnumerations();
1279 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1280 SetCacheType(net::SHADER_CACHE);
1281 BackendEnumerations();
1284 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1285 SetCacheType(net::APP_CACHE);
1286 BackendEnumerations();
1289 // Verifies enumerations while entries are open.
1290 void DiskCacheBackendTest::BackendEnumerations2() {
1291 InitCache();
1292 const std::string first("first");
1293 const std::string second("second");
1294 disk_cache::Entry *entry1, *entry2;
1295 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1296 entry1->Close();
1297 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1298 entry2->Close();
1299 FlushQueueForTest();
1301 // Make sure that the timestamp is not the same.
1302 AddDelay();
1303 ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
1304 void* iter = NULL;
1305 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1306 EXPECT_EQ(entry2->GetKey(), second);
1308 // Two entries and the iterator pointing at "first".
1309 entry1->Close();
1310 entry2->Close();
1312 // The iterator should still be valid, so we should not crash.
1313 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1314 EXPECT_EQ(entry2->GetKey(), first);
1315 entry2->Close();
1316 cache_->EndEnumeration(&iter);
1318 // Modify the oldest entry and get the newest element.
1319 ASSERT_EQ(net::OK, OpenEntry(first, &entry1));
1320 EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false));
1321 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1322 if (type_ == net::APP_CACHE) {
1323 // The list is not updated.
1324 EXPECT_EQ(entry2->GetKey(), second);
1325 } else {
1326 EXPECT_EQ(entry2->GetKey(), first);
1329 entry1->Close();
1330 entry2->Close();
1331 cache_->EndEnumeration(&iter);
1334 TEST_F(DiskCacheBackendTest, Enumerations2) {
1335 BackendEnumerations2();
1338 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1339 SetNewEviction();
1340 BackendEnumerations2();
1343 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
1344 SetMemoryOnlyMode();
1345 BackendEnumerations2();
1348 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1349 SetCacheType(net::APP_CACHE);
1350 BackendEnumerations2();
1353 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1354 SetCacheType(net::SHADER_CACHE);
1355 BackendEnumerations2();
1358 // Verify that ReadData calls do not update the LRU cache
1359 // when using the SHADER_CACHE type.
1360 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1361 SetCacheType(net::SHADER_CACHE);
1362 InitCache();
1363 const std::string first("first");
1364 const std::string second("second");
1365 disk_cache::Entry *entry1, *entry2;
1366 const int kSize = 50;
1367 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1369 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1370 memset(buffer1->data(), 0, kSize);
1371 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1372 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1374 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1375 entry2->Close();
1377 FlushQueueForTest();
1379 // Make sure that the timestamp is not the same.
1380 AddDelay();
1382 // Read from the last item in the LRU.
1383 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1384 entry1->Close();
1386 void* iter = NULL;
1387 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1388 EXPECT_EQ(entry2->GetKey(), second);
1389 entry2->Close();
1390 cache_->EndEnumeration(&iter);
1393 #if !defined(LEAK_SANITIZER)
1394 // Verify handling of invalid entries while doing enumerations.
1395 // We'll be leaking memory from this test.
1396 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1397 InitCache();
1399 std::string key("Some key");
1400 disk_cache::Entry *entry, *entry1, *entry2;
1401 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1403 const int kSize = 50;
1404 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1405 memset(buffer1->data(), 0, kSize);
1406 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1407 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1408 entry1->Close();
1409 ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1410 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1412 std::string key2("Another key");
1413 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1414 entry2->Close();
1415 ASSERT_EQ(2, cache_->GetEntryCount());
1417 SimulateCrash();
1419 void* iter = NULL;
1420 int count = 0;
1421 while (OpenNextEntry(&iter, &entry) == net::OK) {
1422 ASSERT_TRUE(NULL != entry);
1423 EXPECT_EQ(key2, entry->GetKey());
1424 entry->Close();
1425 count++;
1427 EXPECT_EQ(1, count);
1428 EXPECT_EQ(1, cache_->GetEntryCount());
1431 // We'll be leaking memory from this test.
1432 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1433 BackendInvalidEntryEnumeration();
1436 // We'll be leaking memory from this test.
1437 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1438 SetNewEviction();
1439 BackendInvalidEntryEnumeration();
1441 #endif // !defined(LEAK_SANITIZER)
1443 // Tests that if for some reason entries are modified close to existing cache
1444 // iterators, we don't generate fatal errors or reset the cache.
1445 void DiskCacheBackendTest::BackendFixEnumerators() {
1446 InitCache();
1448 int seed = static_cast<int>(Time::Now().ToInternalValue());
1449 srand(seed);
1451 const int kNumEntries = 10;
1452 for (int i = 0; i < kNumEntries; i++) {
1453 std::string key = GenerateKey(true);
1454 disk_cache::Entry* entry;
1455 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1456 entry->Close();
1458 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1460 disk_cache::Entry *entry1, *entry2;
1461 void* iter1 = NULL;
1462 void* iter2 = NULL;
1463 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
1464 ASSERT_TRUE(NULL != entry1);
1465 entry1->Close();
1466 entry1 = NULL;
1468 // Let's go to the middle of the list.
1469 for (int i = 0; i < kNumEntries / 2; i++) {
1470 if (entry1)
1471 entry1->Close();
1472 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
1473 ASSERT_TRUE(NULL != entry1);
1475 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1476 ASSERT_TRUE(NULL != entry2);
1477 entry2->Close();
1480 // Messing up with entry1 will modify entry2->next.
1481 entry1->Doom();
1482 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1483 ASSERT_TRUE(NULL != entry2);
1485 // The link entry2->entry1 should be broken.
1486 EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1487 entry1->Close();
1488 entry2->Close();
1490 // And the second iterator should keep working.
1491 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1492 ASSERT_TRUE(NULL != entry2);
1493 entry2->Close();
1495 cache_->EndEnumeration(&iter1);
1496 cache_->EndEnumeration(&iter2);
1499 TEST_F(DiskCacheBackendTest, FixEnumerators) {
1500 BackendFixEnumerators();
1503 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1504 SetNewEviction();
1505 BackendFixEnumerators();
1508 void DiskCacheBackendTest::BackendDoomRecent() {
1509 InitCache();
1511 disk_cache::Entry *entry;
1512 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1513 entry->Close();
1514 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1515 entry->Close();
1516 FlushQueueForTest();
1518 AddDelay();
1519 Time middle = Time::Now();
1521 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1522 entry->Close();
1523 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1524 entry->Close();
1525 FlushQueueForTest();
1527 AddDelay();
1528 Time final = Time::Now();
1530 ASSERT_EQ(4, cache_->GetEntryCount());
1531 EXPECT_EQ(net::OK, DoomEntriesSince(final));
1532 ASSERT_EQ(4, cache_->GetEntryCount());
1534 EXPECT_EQ(net::OK, DoomEntriesSince(middle));
1535 ASSERT_EQ(2, cache_->GetEntryCount());
1537 ASSERT_EQ(net::OK, OpenEntry("second", &entry));
1538 entry->Close();
1541 TEST_F(DiskCacheBackendTest, DoomRecent) {
1542 BackendDoomRecent();
1545 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
1546 SetNewEviction();
1547 BackendDoomRecent();
1550 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
1551 SetMemoryOnlyMode();
1552 BackendDoomRecent();
1555 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
1556 SetMemoryOnlyMode();
1557 base::Time start;
1558 InitSparseCache(&start, NULL);
1559 DoomEntriesSince(start);
1560 EXPECT_EQ(1, cache_->GetEntryCount());
1563 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
1564 base::Time start;
1565 InitSparseCache(&start, NULL);
1566 DoomEntriesSince(start);
1567 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1568 // MemBackendImpl does not. Thats why expected value differs here from
1569 // MemoryOnlyDoomEntriesSinceSparse.
1570 EXPECT_EQ(3, cache_->GetEntryCount());
1573 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
1574 SetMemoryOnlyMode();
1575 InitSparseCache(NULL, NULL);
1576 EXPECT_EQ(net::OK, DoomAllEntries());
1577 EXPECT_EQ(0, cache_->GetEntryCount());
1580 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
1581 InitSparseCache(NULL, NULL);
1582 EXPECT_EQ(net::OK, DoomAllEntries());
1583 EXPECT_EQ(0, cache_->GetEntryCount());
1586 void DiskCacheBackendTest::BackendDoomBetween() {
1587 InitCache();
1589 disk_cache::Entry *entry;
1590 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1591 entry->Close();
1592 FlushQueueForTest();
1594 AddDelay();
1595 Time middle_start = Time::Now();
1597 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1598 entry->Close();
1599 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1600 entry->Close();
1601 FlushQueueForTest();
1603 AddDelay();
1604 Time middle_end = Time::Now();
1605 AddDelay();
1607 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1608 entry->Close();
1609 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1610 entry->Close();
1611 FlushQueueForTest();
1613 AddDelay();
1614 Time final = Time::Now();
1616 ASSERT_EQ(4, cache_->GetEntryCount());
1617 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end));
1618 ASSERT_EQ(2, cache_->GetEntryCount());
1620 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1621 entry->Close();
1623 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final));
1624 ASSERT_EQ(1, cache_->GetEntryCount());
1626 ASSERT_EQ(net::OK, OpenEntry("first", &entry));
1627 entry->Close();
1630 TEST_F(DiskCacheBackendTest, DoomBetween) {
1631 BackendDoomBetween();
1634 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
1635 SetNewEviction();
1636 BackendDoomBetween();
1639 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
1640 SetMemoryOnlyMode();
1641 BackendDoomBetween();
1644 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
1645 SetMemoryOnlyMode();
1646 base::Time start, end;
1647 InitSparseCache(&start, &end);
1648 DoomEntriesBetween(start, end);
1649 EXPECT_EQ(3, cache_->GetEntryCount());
1651 start = end;
1652 end = base::Time::Now();
1653 DoomEntriesBetween(start, end);
1654 EXPECT_EQ(1, cache_->GetEntryCount());
1657 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
1658 base::Time start, end;
1659 InitSparseCache(&start, &end);
1660 DoomEntriesBetween(start, end);
1661 EXPECT_EQ(9, cache_->GetEntryCount());
1663 start = end;
1664 end = base::Time::Now();
1665 DoomEntriesBetween(start, end);
1666 EXPECT_EQ(3, cache_->GetEntryCount());
1669 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
1670 int num_entries, bool load) {
1671 success_ = false;
1672 ASSERT_TRUE(CopyTestCache(name));
1673 DisableFirstCleanup();
1675 uint32 mask;
1676 if (load) {
1677 mask = 0xf;
1678 SetMaxSize(0x100000);
1679 } else {
1680 // Clear the settings from the previous run.
1681 mask = 0;
1682 SetMaxSize(0);
1684 SetMask(mask);
1686 InitCache();
1687 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
1689 std::string key("the first key");
1690 disk_cache::Entry* entry1;
1691 ASSERT_NE(net::OK, OpenEntry(key, &entry1));
1693 int actual = cache_->GetEntryCount();
1694 if (num_entries != actual) {
1695 ASSERT_TRUE(load);
1696 // If there is a heavy load, inserting an entry will make another entry
1697 // dirty (on the hash bucket) so two entries are removed.
1698 ASSERT_EQ(num_entries - 1, actual);
1701 cache_.reset();
1702 cache_impl_ = NULL;
1704 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask));
1705 success_ = true;
1708 void DiskCacheBackendTest::BackendRecoverInsert() {
1709 // Tests with an empty cache.
1710 BackendTransaction("insert_empty1", 0, false);
1711 ASSERT_TRUE(success_) << "insert_empty1";
1712 BackendTransaction("insert_empty2", 0, false);
1713 ASSERT_TRUE(success_) << "insert_empty2";
1714 BackendTransaction("insert_empty3", 0, false);
1715 ASSERT_TRUE(success_) << "insert_empty3";
1717 // Tests with one entry on the cache.
1718 BackendTransaction("insert_one1", 1, false);
1719 ASSERT_TRUE(success_) << "insert_one1";
1720 BackendTransaction("insert_one2", 1, false);
1721 ASSERT_TRUE(success_) << "insert_one2";
1722 BackendTransaction("insert_one3", 1, false);
1723 ASSERT_TRUE(success_) << "insert_one3";
1725 // Tests with one hundred entries on the cache, tiny index.
1726 BackendTransaction("insert_load1", 100, true);
1727 ASSERT_TRUE(success_) << "insert_load1";
1728 BackendTransaction("insert_load2", 100, true);
1729 ASSERT_TRUE(success_) << "insert_load2";
1732 TEST_F(DiskCacheBackendTest, RecoverInsert) {
1733 BackendRecoverInsert();
1736 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
1737 SetNewEviction();
1738 BackendRecoverInsert();
1741 void DiskCacheBackendTest::BackendRecoverRemove() {
1742 // Removing the only element.
1743 BackendTransaction("remove_one1", 0, false);
1744 ASSERT_TRUE(success_) << "remove_one1";
1745 BackendTransaction("remove_one2", 0, false);
1746 ASSERT_TRUE(success_) << "remove_one2";
1747 BackendTransaction("remove_one3", 0, false);
1748 ASSERT_TRUE(success_) << "remove_one3";
1750 // Removing the head.
1751 BackendTransaction("remove_head1", 1, false);
1752 ASSERT_TRUE(success_) << "remove_head1";
1753 BackendTransaction("remove_head2", 1, false);
1754 ASSERT_TRUE(success_) << "remove_head2";
1755 BackendTransaction("remove_head3", 1, false);
1756 ASSERT_TRUE(success_) << "remove_head3";
1758 // Removing the tail.
1759 BackendTransaction("remove_tail1", 1, false);
1760 ASSERT_TRUE(success_) << "remove_tail1";
1761 BackendTransaction("remove_tail2", 1, false);
1762 ASSERT_TRUE(success_) << "remove_tail2";
1763 BackendTransaction("remove_tail3", 1, false);
1764 ASSERT_TRUE(success_) << "remove_tail3";
1766 // Removing with one hundred entries on the cache, tiny index.
1767 BackendTransaction("remove_load1", 100, true);
1768 ASSERT_TRUE(success_) << "remove_load1";
1769 BackendTransaction("remove_load2", 100, true);
1770 ASSERT_TRUE(success_) << "remove_load2";
1771 BackendTransaction("remove_load3", 100, true);
1772 ASSERT_TRUE(success_) << "remove_load3";
1774 // This case cannot be reverted.
1775 BackendTransaction("remove_one4", 0, false);
1776 ASSERT_TRUE(success_) << "remove_one4";
1777 BackendTransaction("remove_head4", 1, false);
1778 ASSERT_TRUE(success_) << "remove_head4";
1781 TEST_F(DiskCacheBackendTest, RecoverRemove) {
1782 BackendRecoverRemove();
1785 TEST_F(DiskCacheBackendTest, NewEvictionRecoverRemove) {
1786 SetNewEviction();
1787 BackendRecoverRemove();
1790 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1791 success_ = false;
1792 ASSERT_TRUE(CopyTestCache("insert_load1"));
1793 DisableFirstCleanup();
1795 SetMask(0xf);
1796 SetMaxSize(0x1000);
1798 // We should not crash here.
1799 InitCache();
1800 DisableIntegrityCheck();
1803 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
1804 BackendRecoverWithEviction();
1807 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
1808 SetNewEviction();
1809 BackendRecoverWithEviction();
1812 // Tests that the |BackendImpl| fails to start with the wrong cache version.
1813 TEST_F(DiskCacheTest, WrongVersion) {
1814 ASSERT_TRUE(CopyTestCache("wrong_version"));
1815 base::Thread cache_thread("CacheThread");
1816 ASSERT_TRUE(cache_thread.StartWithOptions(
1817 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1818 net::TestCompletionCallback cb;
1820 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1821 cache_path_, cache_thread.message_loop_proxy().get(), NULL));
1822 int rv = cache->Init(cb.callback());
1823 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
1826 class BadEntropyProvider : public base::FieldTrial::EntropyProvider {
1827 public:
1828 virtual ~BadEntropyProvider() {}
1830 virtual double GetEntropyForTrial(const std::string& trial_name,
1831 uint32 randomization_seed) const OVERRIDE {
1832 return 0.5;
1836 // Tests that the disk cache successfully joins the control group, dropping the
1837 // existing cache in favour of a new empty cache.
1838 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
1839 base::Thread cache_thread("CacheThread");
1840 ASSERT_TRUE(cache_thread.StartWithOptions(
1841 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1843 scoped_ptr<disk_cache::BackendImpl> cache =
1844 CreateExistingEntryCache(cache_thread, cache_path_);
1845 ASSERT_TRUE(cache.get());
1846 cache.reset();
1848 // Instantiate the SimpleCacheTrial, forcing this run into the
1849 // ExperimentControl group.
1850 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1851 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1852 "ExperimentControl");
1853 net::TestCompletionCallback cb;
1854 scoped_ptr<disk_cache::Backend> base_cache;
1855 int rv =
1856 disk_cache::CreateCacheBackend(net::DISK_CACHE,
1857 net::CACHE_BACKEND_BLOCKFILE,
1858 cache_path_,
1860 true,
1861 cache_thread.message_loop_proxy().get(),
1862 NULL,
1863 &base_cache,
1864 cb.callback());
1865 ASSERT_EQ(net::OK, cb.GetResult(rv));
1866 EXPECT_EQ(0, base_cache->GetEntryCount());
1869 // Tests that the disk cache can restart in the control group preserving
1870 // existing entries.
1871 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
1872 // Instantiate the SimpleCacheTrial, forcing this run into the
1873 // ExperimentControl group.
1874 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1875 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1876 "ExperimentControl");
1878 base::Thread cache_thread("CacheThread");
1879 ASSERT_TRUE(cache_thread.StartWithOptions(
1880 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1882 scoped_ptr<disk_cache::BackendImpl> cache =
1883 CreateExistingEntryCache(cache_thread, cache_path_);
1884 ASSERT_TRUE(cache.get());
1886 net::TestCompletionCallback cb;
1888 const int kRestartCount = 5;
1889 for (int i = 0; i < kRestartCount; ++i) {
1890 cache.reset(new disk_cache::BackendImpl(
1891 cache_path_, cache_thread.message_loop_proxy(), NULL));
1892 int rv = cache->Init(cb.callback());
1893 ASSERT_EQ(net::OK, cb.GetResult(rv));
1894 EXPECT_EQ(1, cache->GetEntryCount());
1896 disk_cache::Entry* entry = NULL;
1897 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1898 EXPECT_EQ(net::OK, cb.GetResult(rv));
1899 EXPECT_TRUE(entry);
1900 entry->Close();
1904 // Tests that the disk cache can leave the control group preserving existing
1905 // entries.
1906 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
1907 base::Thread cache_thread("CacheThread");
1908 ASSERT_TRUE(cache_thread.StartWithOptions(
1909 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1912 // Instantiate the SimpleCacheTrial, forcing this run into the
1913 // ExperimentControl group.
1914 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1915 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1916 "ExperimentControl");
1918 scoped_ptr<disk_cache::BackendImpl> cache =
1919 CreateExistingEntryCache(cache_thread, cache_path_);
1920 ASSERT_TRUE(cache.get());
1923 // Instantiate the SimpleCacheTrial, forcing this run into the
1924 // ExperimentNo group.
1925 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1926 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1927 net::TestCompletionCallback cb;
1929 const int kRestartCount = 5;
1930 for (int i = 0; i < kRestartCount; ++i) {
1931 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1932 cache_path_, cache_thread.message_loop_proxy(), NULL));
1933 int rv = cache->Init(cb.callback());
1934 ASSERT_EQ(net::OK, cb.GetResult(rv));
1935 EXPECT_EQ(1, cache->GetEntryCount());
1937 disk_cache::Entry* entry = NULL;
1938 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1939 EXPECT_EQ(net::OK, cb.GetResult(rv));
1940 EXPECT_TRUE(entry);
1941 entry->Close();
1945 // Tests that the cache is properly restarted on recovery error.
1946 TEST_F(DiskCacheBackendTest, DeleteOld) {
1947 ASSERT_TRUE(CopyTestCache("wrong_version"));
1948 SetNewEviction();
1949 base::Thread cache_thread("CacheThread");
1950 ASSERT_TRUE(cache_thread.StartWithOptions(
1951 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1953 net::TestCompletionCallback cb;
1954 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
1955 base::FilePath path(cache_path_);
1956 int rv =
1957 disk_cache::CreateCacheBackend(net::DISK_CACHE,
1958 net::CACHE_BACKEND_BLOCKFILE,
1959 path,
1961 true,
1962 cache_thread.message_loop_proxy().get(),
1963 NULL,
1964 &cache_,
1965 cb.callback());
1966 path.clear(); // Make sure path was captured by the previous call.
1967 ASSERT_EQ(net::OK, cb.GetResult(rv));
1968 base::ThreadRestrictions::SetIOAllowed(prev);
1969 cache_.reset();
1970 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
1973 // We want to be able to deal with messed up entries on disk.
1974 void DiskCacheBackendTest::BackendInvalidEntry2() {
1975 ASSERT_TRUE(CopyTestCache("bad_entry"));
1976 DisableFirstCleanup();
1977 InitCache();
1979 disk_cache::Entry *entry1, *entry2;
1980 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
1981 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
1982 entry1->Close();
1984 // CheckCacheIntegrity will fail at this point.
1985 DisableIntegrityCheck();
1988 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
1989 BackendInvalidEntry2();
1992 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
1993 SetNewEviction();
1994 BackendInvalidEntry2();
1997 // Tests that we don't crash or hang when enumerating this cache.
1998 void DiskCacheBackendTest::BackendInvalidEntry3() {
1999 SetMask(0x1); // 2-entry table.
2000 SetMaxSize(0x3000); // 12 kB.
2001 DisableFirstCleanup();
2002 InitCache();
2004 disk_cache::Entry* entry;
2005 void* iter = NULL;
2006 while (OpenNextEntry(&iter, &entry) == net::OK) {
2007 entry->Close();
2011 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2012 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2013 BackendInvalidEntry3();
2016 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2017 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2018 SetNewEviction();
2019 BackendInvalidEntry3();
2020 DisableIntegrityCheck();
2023 // Test that we handle a dirty entry on the LRU list, already replaced with
2024 // the same key, and with hash collisions.
2025 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2026 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2027 SetMask(0x1); // 2-entry table.
2028 SetMaxSize(0x3000); // 12 kB.
2029 DisableFirstCleanup();
2030 InitCache();
2032 TrimForTest(false);
2035 // Test that we handle a dirty entry on the deleted list, already replaced with
2036 // the same key, and with hash collisions.
2037 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2038 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2039 SetNewEviction();
2040 SetMask(0x1); // 2-entry table.
2041 SetMaxSize(0x3000); // 12 kB.
2042 DisableFirstCleanup();
2043 InitCache();
2045 TrimDeletedListForTest(false);
2048 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2049 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2050 SetMask(0x1); // 2-entry table.
2051 SetMaxSize(0x3000); // 12 kB.
2052 DisableFirstCleanup();
2053 InitCache();
2055 // There is a dirty entry (but marked as clean) at the end, pointing to a
2056 // deleted entry through the hash collision list. We should not re-insert the
2057 // deleted entry into the index table.
2059 TrimForTest(false);
2060 // The cache should be clean (as detected by CheckCacheIntegrity).
2063 // Tests that we don't hang when there is a loop on the hash collision list.
2064 // The test cache could be a result of bug 69135.
2065 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2066 ASSERT_TRUE(CopyTestCache("list_loop2"));
2067 SetMask(0x1); // 2-entry table.
2068 SetMaxSize(0x3000); // 12 kB.
2069 DisableFirstCleanup();
2070 InitCache();
2072 // The second entry points at itselft, and the first entry is not accessible
2073 // though the index, but it is at the head of the LRU.
2075 disk_cache::Entry* entry;
2076 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
2077 entry->Close();
2079 TrimForTest(false);
2080 TrimForTest(false);
2081 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
2082 entry->Close();
2083 EXPECT_EQ(1, cache_->GetEntryCount());
2086 // Tests that we don't hang when there is a loop on the hash collision list.
2087 // The test cache could be a result of bug 69135.
2088 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2089 ASSERT_TRUE(CopyTestCache("list_loop3"));
2090 SetMask(0x1); // 2-entry table.
2091 SetMaxSize(0x3000); // 12 kB.
2092 DisableFirstCleanup();
2093 InitCache();
2095 // There is a wide loop of 5 entries.
2097 disk_cache::Entry* entry;
2098 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2101 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2102 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2103 DisableFirstCleanup();
2104 SetNewEviction();
2105 InitCache();
2107 // The second entry is dirty, but removing it should not corrupt the list.
2108 disk_cache::Entry* entry;
2109 ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2110 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
2112 // This should not delete the cache.
2113 entry->Doom();
2114 FlushQueueForTest();
2115 entry->Close();
2117 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry));
2118 entry->Close();
2121 // Tests handling of corrupt entries by keeping the rankings node around, with
2122 // a fatal failure.
2123 void DiskCacheBackendTest::BackendInvalidEntry7() {
2124 const int kSize = 0x3000; // 12 kB.
2125 SetMaxSize(kSize * 10);
2126 InitCache();
2128 std::string first("some key");
2129 std::string second("something else");
2130 disk_cache::Entry* entry;
2131 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2132 entry->Close();
2133 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2135 // Corrupt this entry.
2136 disk_cache::EntryImpl* entry_impl =
2137 static_cast<disk_cache::EntryImpl*>(entry);
2139 entry_impl->rankings()->Data()->next = 0;
2140 entry_impl->rankings()->Store();
2141 entry->Close();
2142 FlushQueueForTest();
2143 EXPECT_EQ(2, cache_->GetEntryCount());
2145 // This should detect the bad entry.
2146 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2147 EXPECT_EQ(1, cache_->GetEntryCount());
2149 // We should delete the cache. The list still has a corrupt node.
2150 void* iter = NULL;
2151 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2152 FlushQueueForTest();
2153 EXPECT_EQ(0, cache_->GetEntryCount());
2156 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2157 BackendInvalidEntry7();
2160 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2161 SetNewEviction();
2162 BackendInvalidEntry7();
2165 // Tests handling of corrupt entries by keeping the rankings node around, with
2166 // a non fatal failure.
2167 void DiskCacheBackendTest::BackendInvalidEntry8() {
2168 const int kSize = 0x3000; // 12 kB
2169 SetMaxSize(kSize * 10);
2170 InitCache();
2172 std::string first("some key");
2173 std::string second("something else");
2174 disk_cache::Entry* entry;
2175 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2176 entry->Close();
2177 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2179 // Corrupt this entry.
2180 disk_cache::EntryImpl* entry_impl =
2181 static_cast<disk_cache::EntryImpl*>(entry);
2183 entry_impl->rankings()->Data()->contents = 0;
2184 entry_impl->rankings()->Store();
2185 entry->Close();
2186 FlushQueueForTest();
2187 EXPECT_EQ(2, cache_->GetEntryCount());
2189 // This should detect the bad entry.
2190 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2191 EXPECT_EQ(1, cache_->GetEntryCount());
2193 // We should not delete the cache.
2194 void* iter = NULL;
2195 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2196 entry->Close();
2197 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2198 EXPECT_EQ(1, cache_->GetEntryCount());
2201 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2202 BackendInvalidEntry8();
2205 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2206 SetNewEviction();
2207 BackendInvalidEntry8();
2210 // Tests handling of corrupt entries detected by enumerations. Note that these
2211 // tests (xx9 to xx11) are basically just going though slightly different
2212 // codepaths so they are tighlty coupled with the code, but that is better than
2213 // not testing error handling code.
2214 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2215 const int kSize = 0x3000; // 12 kB.
2216 SetMaxSize(kSize * 10);
2217 InitCache();
2219 std::string first("some key");
2220 std::string second("something else");
2221 disk_cache::Entry* entry;
2222 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2223 entry->Close();
2224 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2226 // Corrupt this entry.
2227 disk_cache::EntryImpl* entry_impl =
2228 static_cast<disk_cache::EntryImpl*>(entry);
2230 entry_impl->entry()->Data()->state = 0xbad;
2231 entry_impl->entry()->Store();
2232 entry->Close();
2233 FlushQueueForTest();
2234 EXPECT_EQ(2, cache_->GetEntryCount());
2236 if (eviction) {
2237 TrimForTest(false);
2238 EXPECT_EQ(1, cache_->GetEntryCount());
2239 TrimForTest(false);
2240 EXPECT_EQ(1, cache_->GetEntryCount());
2241 } else {
2242 // We should detect the problem through the list, but we should not delete
2243 // the entry, just fail the iteration.
2244 void* iter = NULL;
2245 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2247 // Now a full iteration will work, and return one entry.
2248 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2249 entry->Close();
2250 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2252 // This should detect what's left of the bad entry.
2253 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2254 EXPECT_EQ(2, cache_->GetEntryCount());
2256 DisableIntegrityCheck();
2259 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2260 BackendInvalidEntry9(false);
2263 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2264 SetNewEviction();
2265 BackendInvalidEntry9(false);
2268 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2269 BackendInvalidEntry9(true);
2272 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2273 SetNewEviction();
2274 BackendInvalidEntry9(true);
2277 // Tests handling of corrupt entries detected by enumerations.
2278 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2279 const int kSize = 0x3000; // 12 kB.
2280 SetMaxSize(kSize * 10);
2281 SetNewEviction();
2282 InitCache();
2284 std::string first("some key");
2285 std::string second("something else");
2286 disk_cache::Entry* entry;
2287 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2288 entry->Close();
2289 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2290 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2291 entry->Close();
2292 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2294 // Corrupt this entry.
2295 disk_cache::EntryImpl* entry_impl =
2296 static_cast<disk_cache::EntryImpl*>(entry);
2298 entry_impl->entry()->Data()->state = 0xbad;
2299 entry_impl->entry()->Store();
2300 entry->Close();
2301 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2302 entry->Close();
2303 EXPECT_EQ(3, cache_->GetEntryCount());
2305 // We have:
2306 // List 0: third -> second (bad).
2307 // List 1: first.
2309 if (eviction) {
2310 // Detection order: second -> first -> third.
2311 TrimForTest(false);
2312 EXPECT_EQ(3, cache_->GetEntryCount());
2313 TrimForTest(false);
2314 EXPECT_EQ(2, cache_->GetEntryCount());
2315 TrimForTest(false);
2316 EXPECT_EQ(1, cache_->GetEntryCount());
2317 } else {
2318 // Detection order: third -> second -> first.
2319 // We should detect the problem through the list, but we should not delete
2320 // the entry.
2321 void* iter = NULL;
2322 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2323 entry->Close();
2324 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2325 EXPECT_EQ(first, entry->GetKey());
2326 entry->Close();
2327 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2329 DisableIntegrityCheck();
2332 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2333 BackendInvalidEntry10(false);
2336 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
2337 BackendInvalidEntry10(true);
2340 // Tests handling of corrupt entries detected by enumerations.
2341 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
2342 const int kSize = 0x3000; // 12 kB.
2343 SetMaxSize(kSize * 10);
2344 SetNewEviction();
2345 InitCache();
2347 std::string first("some key");
2348 std::string second("something else");
2349 disk_cache::Entry* entry;
2350 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2351 entry->Close();
2352 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2353 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2354 entry->Close();
2355 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2356 entry->Close();
2357 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
2358 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2360 // Corrupt this entry.
2361 disk_cache::EntryImpl* entry_impl =
2362 static_cast<disk_cache::EntryImpl*>(entry);
2364 entry_impl->entry()->Data()->state = 0xbad;
2365 entry_impl->entry()->Store();
2366 entry->Close();
2367 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2368 entry->Close();
2369 FlushQueueForTest();
2370 EXPECT_EQ(3, cache_->GetEntryCount());
2372 // We have:
2373 // List 0: third.
2374 // List 1: second (bad) -> first.
2376 if (eviction) {
2377 // Detection order: third -> first -> second.
2378 TrimForTest(false);
2379 EXPECT_EQ(2, cache_->GetEntryCount());
2380 TrimForTest(false);
2381 EXPECT_EQ(1, cache_->GetEntryCount());
2382 TrimForTest(false);
2383 EXPECT_EQ(1, cache_->GetEntryCount());
2384 } else {
2385 // Detection order: third -> second.
2386 // We should detect the problem through the list, but we should not delete
2387 // the entry, just fail the iteration.
2388 void* iter = NULL;
2389 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2390 entry->Close();
2391 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2393 // Now a full iteration will work, and return two entries.
2394 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2395 entry->Close();
2396 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2397 entry->Close();
2398 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2400 DisableIntegrityCheck();
2403 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
2404 BackendInvalidEntry11(false);
2407 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
2408 BackendInvalidEntry11(true);
2411 // Tests handling of corrupt entries in the middle of a long eviction run.
2412 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2413 const int kSize = 0x3000; // 12 kB
2414 SetMaxSize(kSize * 10);
2415 InitCache();
2417 std::string first("some key");
2418 std::string second("something else");
2419 disk_cache::Entry* entry;
2420 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2421 entry->Close();
2422 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2424 // Corrupt this entry.
2425 disk_cache::EntryImpl* entry_impl =
2426 static_cast<disk_cache::EntryImpl*>(entry);
2428 entry_impl->entry()->Data()->state = 0xbad;
2429 entry_impl->entry()->Store();
2430 entry->Close();
2431 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2432 entry->Close();
2433 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
2434 TrimForTest(true);
2435 EXPECT_EQ(1, cache_->GetEntryCount());
2436 entry->Close();
2437 DisableIntegrityCheck();
2440 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
2441 BackendTrimInvalidEntry12();
2444 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
2445 SetNewEviction();
2446 BackendTrimInvalidEntry12();
2449 // We want to be able to deal with messed up entries on disk.
2450 void DiskCacheBackendTest::BackendInvalidRankings2() {
2451 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2452 DisableFirstCleanup();
2453 InitCache();
2455 disk_cache::Entry *entry1, *entry2;
2456 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
2457 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2));
2458 entry2->Close();
2460 // CheckCacheIntegrity will fail at this point.
2461 DisableIntegrityCheck();
2464 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
2465 BackendInvalidRankings2();
2468 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
2469 SetNewEviction();
2470 BackendInvalidRankings2();
2473 // If the LRU is corrupt, we delete the cache.
2474 void DiskCacheBackendTest::BackendInvalidRankings() {
2475 disk_cache::Entry* entry;
2476 void* iter = NULL;
2477 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2478 entry->Close();
2479 EXPECT_EQ(2, cache_->GetEntryCount());
2481 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2482 FlushQueueForTest(); // Allow the restart to finish.
2483 EXPECT_EQ(0, cache_->GetEntryCount());
2486 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
2487 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2488 DisableFirstCleanup();
2489 InitCache();
2490 BackendInvalidRankings();
2493 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
2494 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2495 DisableFirstCleanup();
2496 SetNewEviction();
2497 InitCache();
2498 BackendInvalidRankings();
2501 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
2502 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2503 DisableFirstCleanup();
2504 InitCache();
2505 SetTestMode(); // Fail cache reinitialization.
2506 BackendInvalidRankings();
2509 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
2510 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2511 DisableFirstCleanup();
2512 SetNewEviction();
2513 InitCache();
2514 SetTestMode(); // Fail cache reinitialization.
2515 BackendInvalidRankings();
2518 // If the LRU is corrupt and we have open entries, we disable the cache.
2519 void DiskCacheBackendTest::BackendDisable() {
2520 disk_cache::Entry *entry1, *entry2;
2521 void* iter = NULL;
2522 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2524 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
2525 EXPECT_EQ(0, cache_->GetEntryCount());
2526 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2528 entry1->Close();
2529 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2530 FlushQueueForTest(); // This one actually allows that task to complete.
2532 EXPECT_EQ(0, cache_->GetEntryCount());
2535 TEST_F(DiskCacheBackendTest, DisableSuccess) {
2536 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2537 DisableFirstCleanup();
2538 InitCache();
2539 BackendDisable();
2542 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
2543 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2544 DisableFirstCleanup();
2545 SetNewEviction();
2546 InitCache();
2547 BackendDisable();
2550 TEST_F(DiskCacheBackendTest, DisableFailure) {
2551 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2552 DisableFirstCleanup();
2553 InitCache();
2554 SetTestMode(); // Fail cache reinitialization.
2555 BackendDisable();
2558 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
2559 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2560 DisableFirstCleanup();
2561 SetNewEviction();
2562 InitCache();
2563 SetTestMode(); // Fail cache reinitialization.
2564 BackendDisable();
2567 // This is another type of corruption on the LRU; disable the cache.
2568 void DiskCacheBackendTest::BackendDisable2() {
2569 EXPECT_EQ(8, cache_->GetEntryCount());
2571 disk_cache::Entry* entry;
2572 void* iter = NULL;
2573 int count = 0;
2574 while (OpenNextEntry(&iter, &entry) == net::OK) {
2575 ASSERT_TRUE(NULL != entry);
2576 entry->Close();
2577 count++;
2578 ASSERT_LT(count, 9);
2581 FlushQueueForTest();
2582 EXPECT_EQ(0, cache_->GetEntryCount());
2585 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
2586 ASSERT_TRUE(CopyTestCache("list_loop"));
2587 DisableFirstCleanup();
2588 InitCache();
2589 BackendDisable2();
2592 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
2593 ASSERT_TRUE(CopyTestCache("list_loop"));
2594 DisableFirstCleanup();
2595 SetNewEviction();
2596 InitCache();
2597 BackendDisable2();
2600 TEST_F(DiskCacheBackendTest, DisableFailure2) {
2601 ASSERT_TRUE(CopyTestCache("list_loop"));
2602 DisableFirstCleanup();
2603 InitCache();
2604 SetTestMode(); // Fail cache reinitialization.
2605 BackendDisable2();
2608 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
2609 ASSERT_TRUE(CopyTestCache("list_loop"));
2610 DisableFirstCleanup();
2611 SetNewEviction();
2612 InitCache();
2613 SetTestMode(); // Fail cache reinitialization.
2614 BackendDisable2();
2617 // If the index size changes when we disable the cache, we should not crash.
2618 void DiskCacheBackendTest::BackendDisable3() {
2619 disk_cache::Entry *entry1, *entry2;
2620 void* iter = NULL;
2621 EXPECT_EQ(2, cache_->GetEntryCount());
2622 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2623 entry1->Close();
2625 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
2626 FlushQueueForTest();
2628 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2));
2629 entry2->Close();
2631 EXPECT_EQ(1, cache_->GetEntryCount());
2634 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
2635 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2636 DisableFirstCleanup();
2637 SetMaxSize(20 * 1024 * 1024);
2638 InitCache();
2639 BackendDisable3();
2642 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
2643 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2644 DisableFirstCleanup();
2645 SetMaxSize(20 * 1024 * 1024);
2646 SetNewEviction();
2647 InitCache();
2648 BackendDisable3();
2651 // If we disable the cache, already open entries should work as far as possible.
2652 void DiskCacheBackendTest::BackendDisable4() {
2653 disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
2654 void* iter = NULL;
2655 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2657 char key2[2000];
2658 char key3[20000];
2659 CacheTestFillBuffer(key2, sizeof(key2), true);
2660 CacheTestFillBuffer(key3, sizeof(key3), true);
2661 key2[sizeof(key2) - 1] = '\0';
2662 key3[sizeof(key3) - 1] = '\0';
2663 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2664 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3));
2666 const int kBufSize = 20000;
2667 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
2668 memset(buf->data(), 0, kBufSize);
2669 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2670 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2672 // This line should disable the cache but not delete it.
2673 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4));
2674 EXPECT_EQ(0, cache_->GetEntryCount());
2676 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
2678 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
2679 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2680 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
2682 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
2683 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2684 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
2686 std::string key = entry2->GetKey();
2687 EXPECT_EQ(sizeof(key2) - 1, key.size());
2688 key = entry3->GetKey();
2689 EXPECT_EQ(sizeof(key3) - 1, key.size());
2691 entry1->Close();
2692 entry2->Close();
2693 entry3->Close();
2694 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2695 FlushQueueForTest(); // This one actually allows that task to complete.
2697 EXPECT_EQ(0, cache_->GetEntryCount());
2700 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
2701 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2702 DisableFirstCleanup();
2703 InitCache();
2704 BackendDisable4();
2707 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
2708 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2709 DisableFirstCleanup();
2710 SetNewEviction();
2711 InitCache();
2712 BackendDisable4();
2715 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
2716 MessageLoopHelper helper;
2718 ASSERT_TRUE(CleanupCacheDir());
2719 scoped_ptr<disk_cache::BackendImpl> cache;
2720 cache.reset(new disk_cache::BackendImpl(
2721 cache_path_, base::MessageLoopProxy::current().get(), NULL));
2722 ASSERT_TRUE(NULL != cache.get());
2723 cache->SetUnitTestMode();
2724 ASSERT_EQ(net::OK, cache->SyncInit());
2726 // Wait for a callback that never comes... about 2 secs :). The message loop
2727 // has to run to allow invocation of the usage timer.
2728 helper.WaitUntilCacheIoFinished(1);
2731 TEST_F(DiskCacheBackendTest, TimerNotCreated) {
2732 ASSERT_TRUE(CopyTestCache("wrong_version"));
2734 scoped_ptr<disk_cache::BackendImpl> cache;
2735 cache.reset(new disk_cache::BackendImpl(
2736 cache_path_, base::MessageLoopProxy::current().get(), NULL));
2737 ASSERT_TRUE(NULL != cache.get());
2738 cache->SetUnitTestMode();
2739 ASSERT_NE(net::OK, cache->SyncInit());
2741 ASSERT_TRUE(NULL == cache->GetTimerForTest());
2743 DisableIntegrityCheck();
2746 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
2747 InitCache();
2748 disk_cache::Entry* entry;
2749 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
2750 entry->Close();
2751 FlushQueueForTest();
2753 disk_cache::StatsItems stats;
2754 cache_->GetStats(&stats);
2755 EXPECT_FALSE(stats.empty());
2757 disk_cache::StatsItems::value_type hits("Create hit", "0x1");
2758 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2760 cache_.reset();
2762 // Now open the cache and verify that the stats are still there.
2763 DisableFirstCleanup();
2764 InitCache();
2765 EXPECT_EQ(1, cache_->GetEntryCount());
2767 stats.clear();
2768 cache_->GetStats(&stats);
2769 EXPECT_FALSE(stats.empty());
2771 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2774 void DiskCacheBackendTest::BackendDoomAll() {
2775 InitCache();
2777 disk_cache::Entry *entry1, *entry2;
2778 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
2779 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
2780 entry1->Close();
2781 entry2->Close();
2783 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2784 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2786 ASSERT_EQ(4, cache_->GetEntryCount());
2787 EXPECT_EQ(net::OK, DoomAllEntries());
2788 ASSERT_EQ(0, cache_->GetEntryCount());
2790 // We should stop posting tasks at some point (if we post any).
2791 base::MessageLoop::current()->RunUntilIdle();
2793 disk_cache::Entry *entry3, *entry4;
2794 EXPECT_NE(net::OK, OpenEntry("third", &entry3));
2795 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
2796 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
2798 EXPECT_EQ(net::OK, DoomAllEntries());
2799 ASSERT_EQ(0, cache_->GetEntryCount());
2801 entry1->Close();
2802 entry2->Close();
2803 entry3->Doom(); // The entry should be already doomed, but this must work.
2804 entry3->Close();
2805 entry4->Close();
2807 // Now try with all references released.
2808 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2809 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2810 entry1->Close();
2811 entry2->Close();
2813 ASSERT_EQ(2, cache_->GetEntryCount());
2814 EXPECT_EQ(net::OK, DoomAllEntries());
2815 ASSERT_EQ(0, cache_->GetEntryCount());
2817 EXPECT_EQ(net::OK, DoomAllEntries());
2820 TEST_F(DiskCacheBackendTest, DoomAll) {
2821 BackendDoomAll();
2824 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
2825 SetNewEviction();
2826 BackendDoomAll();
2829 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
2830 SetMemoryOnlyMode();
2831 BackendDoomAll();
2834 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
2835 SetCacheType(net::APP_CACHE);
2836 BackendDoomAll();
2839 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
2840 SetCacheType(net::SHADER_CACHE);
2841 BackendDoomAll();
2844 // If the index size changes when we doom the cache, we should not crash.
2845 void DiskCacheBackendTest::BackendDoomAll2() {
2846 EXPECT_EQ(2, cache_->GetEntryCount());
2847 EXPECT_EQ(net::OK, DoomAllEntries());
2849 disk_cache::Entry* entry;
2850 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry));
2851 entry->Close();
2853 EXPECT_EQ(1, cache_->GetEntryCount());
2856 TEST_F(DiskCacheBackendTest, DoomAll2) {
2857 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2858 DisableFirstCleanup();
2859 SetMaxSize(20 * 1024 * 1024);
2860 InitCache();
2861 BackendDoomAll2();
2864 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
2865 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2866 DisableFirstCleanup();
2867 SetMaxSize(20 * 1024 * 1024);
2868 SetNewEviction();
2869 InitCache();
2870 BackendDoomAll2();
2873 // We should be able to create the same entry on multiple simultaneous instances
2874 // of the cache.
2875 TEST_F(DiskCacheTest, MultipleInstances) {
2876 base::ScopedTempDir store1, store2;
2877 ASSERT_TRUE(store1.CreateUniqueTempDir());
2878 ASSERT_TRUE(store2.CreateUniqueTempDir());
2880 base::Thread cache_thread("CacheThread");
2881 ASSERT_TRUE(cache_thread.StartWithOptions(
2882 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
2883 net::TestCompletionCallback cb;
2885 const int kNumberOfCaches = 2;
2886 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
2888 int rv =
2889 disk_cache::CreateCacheBackend(net::DISK_CACHE,
2890 net::CACHE_BACKEND_DEFAULT,
2891 store1.path(),
2893 false,
2894 cache_thread.message_loop_proxy().get(),
2895 NULL,
2896 &cache[0],
2897 cb.callback());
2898 ASSERT_EQ(net::OK, cb.GetResult(rv));
2899 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE,
2900 net::CACHE_BACKEND_DEFAULT,
2901 store2.path(),
2903 false,
2904 cache_thread.message_loop_proxy().get(),
2905 NULL,
2906 &cache[1],
2907 cb.callback());
2908 ASSERT_EQ(net::OK, cb.GetResult(rv));
2910 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
2912 std::string key("the first key");
2913 disk_cache::Entry* entry;
2914 for (int i = 0; i < kNumberOfCaches; i++) {
2915 rv = cache[i]->CreateEntry(key, &entry, cb.callback());
2916 ASSERT_EQ(net::OK, cb.GetResult(rv));
2917 entry->Close();
2921 // Test the six regions of the curve that determines the max cache size.
2922 TEST_F(DiskCacheTest, AutomaticMaxSize) {
2923 using disk_cache::kDefaultCacheSize;
2924 int64 large_size = kDefaultCacheSize;
2926 // Region 1: expected = available * 0.8
2927 EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
2928 disk_cache::PreferredCacheSize(large_size - 1));
2929 EXPECT_EQ(kDefaultCacheSize * 8 / 10,
2930 disk_cache::PreferredCacheSize(large_size));
2931 EXPECT_EQ(kDefaultCacheSize - 1,
2932 disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
2934 // Region 2: expected = default_size
2935 EXPECT_EQ(kDefaultCacheSize,
2936 disk_cache::PreferredCacheSize(large_size * 10 / 8));
2937 EXPECT_EQ(kDefaultCacheSize,
2938 disk_cache::PreferredCacheSize(large_size * 10 - 1));
2940 // Region 3: expected = available * 0.1
2941 EXPECT_EQ(kDefaultCacheSize,
2942 disk_cache::PreferredCacheSize(large_size * 10));
2943 EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
2944 disk_cache::PreferredCacheSize(large_size * 25 - 1));
2946 // Region 4: expected = default_size * 2.5
2947 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2948 disk_cache::PreferredCacheSize(large_size * 25));
2949 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2950 disk_cache::PreferredCacheSize(large_size * 100 - 1));
2951 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2952 disk_cache::PreferredCacheSize(large_size * 100));
2953 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2954 disk_cache::PreferredCacheSize(large_size * 250 - 1));
2956 // Region 5: expected = available * 0.1
2957 int64 largest_size = kDefaultCacheSize * 4;
2958 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2959 disk_cache::PreferredCacheSize(large_size * 250));
2960 EXPECT_EQ(largest_size - 1,
2961 disk_cache::PreferredCacheSize(largest_size * 100 - 1));
2963 // Region 6: expected = largest possible size
2964 EXPECT_EQ(largest_size,
2965 disk_cache::PreferredCacheSize(largest_size * 100));
2966 EXPECT_EQ(largest_size,
2967 disk_cache::PreferredCacheSize(largest_size * 10000));
2970 // Tests that we can "migrate" a running instance from one experiment group to
2971 // another.
2972 TEST_F(DiskCacheBackendTest, Histograms) {
2973 InitCache();
2974 disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro.
2976 for (int i = 1; i < 3; i++) {
2977 CACHE_UMA(HOURS, "FillupTime", i, 28);
2981 // Make sure that we keep the total memory used by the internal buffers under
2982 // control.
2983 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
2984 InitCache();
2985 std::string key("the first key");
2986 disk_cache::Entry* entry;
2987 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2989 const int kSize = 200;
2990 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
2991 CacheTestFillBuffer(buffer->data(), kSize, true);
2993 for (int i = 0; i < 10; i++) {
2994 SCOPED_TRACE(i);
2995 // Allocate 2MB for this entry.
2996 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
2997 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
2998 EXPECT_EQ(kSize,
2999 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
3000 EXPECT_EQ(kSize,
3001 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
3003 // Delete one of the buffers and truncate the other.
3004 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
3005 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
3007 // Delete the second buffer, writing 10 bytes to disk.
3008 entry->Close();
3009 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3012 entry->Close();
3013 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
3016 // This test assumes at least 150MB of system memory.
3017 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
3018 InitCache();
3020 const int kOneMB = 1024 * 1024;
3021 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3022 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3024 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3025 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3027 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3028 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3030 cache_impl_->BufferDeleted(kOneMB);
3031 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3033 // Check the upper limit.
3034 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3036 for (int i = 0; i < 30; i++)
3037 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
3039 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3042 // Tests that sharing of external files works and we are able to delete the
3043 // files when we need to.
3044 TEST_F(DiskCacheBackendTest, FileSharing) {
3045 InitCache();
3047 disk_cache::Addr address(0x80000001);
3048 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3049 base::FilePath name = cache_impl_->GetFileName(address);
3051 scoped_refptr<disk_cache::File> file(new disk_cache::File(false));
3052 file->Init(name);
3054 #if defined(OS_WIN)
3055 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3056 DWORD access = GENERIC_READ | GENERIC_WRITE;
3057 base::win::ScopedHandle file2(CreateFile(
3058 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
3059 EXPECT_FALSE(file2.IsValid());
3061 sharing |= FILE_SHARE_DELETE;
3062 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL,
3063 OPEN_EXISTING, 0, NULL));
3064 EXPECT_TRUE(file2.IsValid());
3065 #endif
3067 EXPECT_TRUE(base::DeleteFile(name, false));
3069 // We should be able to use the file.
3070 const int kSize = 200;
3071 char buffer1[kSize];
3072 char buffer2[kSize];
3073 memset(buffer1, 't', kSize);
3074 memset(buffer2, 0, kSize);
3075 EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3076 EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3077 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3079 EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
3082 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3083 InitCache();
3085 disk_cache::Entry* entry;
3087 for (int i = 0; i < 2; ++i) {
3088 std::string key = base::StringPrintf("key%d", i);
3089 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3090 entry->Close();
3093 // Ping the oldest entry.
3094 cache_->OnExternalCacheHit("key0");
3096 TrimForTest(false);
3098 // Make sure the older key remains.
3099 EXPECT_EQ(1, cache_->GetEntryCount());
3100 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3101 entry->Close();
3104 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3105 SetCacheType(net::SHADER_CACHE);
3106 InitCache();
3108 disk_cache::Entry* entry;
3110 for (int i = 0; i < 2; ++i) {
3111 std::string key = base::StringPrintf("key%d", i);
3112 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3113 entry->Close();
3116 // Ping the oldest entry.
3117 cache_->OnExternalCacheHit("key0");
3119 TrimForTest(false);
3121 // Make sure the older key remains.
3122 EXPECT_EQ(1, cache_->GetEntryCount());
3123 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3124 entry->Close();
3127 void DiskCacheBackendTest::TracingBackendBasics() {
3128 InitCache();
3129 cache_.reset(new disk_cache::TracingCacheBackend(cache_.Pass()));
3130 cache_impl_ = NULL;
3131 EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
3132 if (!simple_cache_mode_) {
3133 EXPECT_EQ(0, cache_->GetEntryCount());
3136 net::TestCompletionCallback cb;
3137 disk_cache::Entry* entry = NULL;
3138 EXPECT_NE(net::OK, OpenEntry("key", &entry));
3139 EXPECT_TRUE(NULL == entry);
3141 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3142 EXPECT_TRUE(NULL != entry);
3144 disk_cache::Entry* same_entry = NULL;
3145 ASSERT_EQ(net::OK, OpenEntry("key", &same_entry));
3146 EXPECT_TRUE(NULL != same_entry);
3148 if (!simple_cache_mode_) {
3149 EXPECT_EQ(1, cache_->GetEntryCount());
3151 entry->Close();
3152 entry = NULL;
3153 same_entry->Close();
3154 same_entry = NULL;
3157 TEST_F(DiskCacheBackendTest, TracingBackendBasics) {
3158 TracingBackendBasics();
3161 // The Simple Cache backend requires a few guarantees from the filesystem like
3162 // atomic renaming of recently open files. Those guarantees are not provided in
3163 // general on Windows.
3164 #if defined(OS_POSIX)
3166 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3167 SetCacheType(net::APP_CACHE);
3168 SetSimpleCacheMode();
3169 BackendShutdownWithPendingCreate(false);
3172 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3173 SetCacheType(net::APP_CACHE);
3174 SetSimpleCacheMode();
3175 BackendShutdownWithPendingFileIO(false);
3178 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3179 SetSimpleCacheMode();
3180 BackendBasics();
3183 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3184 SetCacheType(net::APP_CACHE);
3185 SetSimpleCacheMode();
3186 BackendBasics();
3189 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3190 SetSimpleCacheMode();
3191 BackendKeying();
3194 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3195 SetSimpleCacheMode();
3196 SetCacheType(net::APP_CACHE);
3197 BackendKeying();
3200 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) {
3201 SetSimpleCacheMode();
3202 BackendSetSize();
3205 // MacOS has a default open file limit of 256 files, which is incompatible with
3206 // this simple cache test.
3207 #if defined(OS_MACOSX)
3208 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3209 #else
3210 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3211 #endif
3213 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) {
3214 SetMaxSize(0x100000);
3215 SetSimpleCacheMode();
3216 BackendLoad();
3219 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) {
3220 SetCacheType(net::APP_CACHE);
3221 SetSimpleCacheMode();
3222 SetMaxSize(0x100000);
3223 BackendLoad();
3226 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
3227 SetSimpleCacheMode();
3228 BackendDoomRecent();
3231 TEST_F(DiskCacheBackendTest, SimpleDoomBetween) {
3232 SetSimpleCacheMode();
3233 BackendDoomBetween();
3236 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
3237 SetSimpleCacheMode();
3238 BackendDoomAll();
3241 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
3242 SetCacheType(net::APP_CACHE);
3243 SetSimpleCacheMode();
3244 BackendDoomAll();
3247 TEST_F(DiskCacheBackendTest, SimpleCacheTracingBackendBasics) {
3248 SetSimpleCacheMode();
3249 TracingBackendBasics();
3250 // TODO(pasko): implement integrity checking on the Simple Backend.
3251 DisableIntegrityCheck();
3254 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
3255 SetSimpleCacheMode();
3256 InitCache();
3258 const char* key = "the first key";
3259 disk_cache::Entry* entry = NULL;
3261 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3262 ASSERT_TRUE(entry != NULL);
3263 entry->Close();
3264 entry = NULL;
3266 // To make sure the file creation completed we need to call open again so that
3267 // we block until it actually created the files.
3268 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3269 ASSERT_TRUE(entry != NULL);
3270 entry->Close();
3271 entry = NULL;
3273 // Delete one of the files in the entry.
3274 base::FilePath to_delete_file = cache_path_.AppendASCII(
3275 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3276 EXPECT_TRUE(base::PathExists(to_delete_file));
3277 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
3279 // Failing to open the entry should delete the rest of these files.
3280 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3282 // Confirm the rest of the files are gone.
3283 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
3284 base::FilePath should_be_gone_file(cache_path_.AppendASCII(
3285 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
3286 EXPECT_FALSE(base::PathExists(should_be_gone_file));
3290 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
3291 SetSimpleCacheMode();
3292 InitCache();
3294 const char* key = "the first key";
3295 disk_cache::Entry* entry = NULL;
3297 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3298 disk_cache::Entry* null = NULL;
3299 ASSERT_NE(null, entry);
3300 entry->Close();
3301 entry = NULL;
3303 // To make sure the file creation completed we need to call open again so that
3304 // we block until it actually created the files.
3305 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3306 ASSERT_NE(null, entry);
3307 entry->Close();
3308 entry = NULL;
3310 // Write an invalid header for stream 0 and stream 1.
3311 base::FilePath entry_file1_path = cache_path_.AppendASCII(
3312 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3314 disk_cache::SimpleFileHeader header;
3315 header.initial_magic_number = GG_UINT64_C(0xbadf00d);
3316 EXPECT_EQ(
3317 implicit_cast<int>(sizeof(header)),
3318 file_util::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
3319 sizeof(header)));
3320 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3323 // Tests that the Simple Cache Backend fails to initialize with non-matching
3324 // file structure on disk.
3325 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
3326 // Create a cache structure with the |BackendImpl|.
3327 InitCache();
3328 disk_cache::Entry* entry;
3329 const int kSize = 50;
3330 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3331 CacheTestFillBuffer(buffer->data(), kSize, false);
3332 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3333 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3334 entry->Close();
3335 cache_.reset();
3337 // Check that the |SimpleBackendImpl| does not favor this structure.
3338 base::Thread cache_thread("CacheThread");
3339 ASSERT_TRUE(cache_thread.StartWithOptions(
3340 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3341 disk_cache::SimpleBackendImpl* simple_cache =
3342 new disk_cache::SimpleBackendImpl(cache_path_,
3344 net::DISK_CACHE,
3345 cache_thread.message_loop_proxy().get(),
3346 NULL);
3347 net::TestCompletionCallback cb;
3348 int rv = simple_cache->Init(cb.callback());
3349 EXPECT_NE(net::OK, cb.GetResult(rv));
3350 delete simple_cache;
3351 DisableIntegrityCheck();
3354 // Tests that the |BackendImpl| refuses to initialize on top of the files
3355 // generated by the Simple Cache Backend.
3356 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
3357 // Create a cache structure with the |SimpleBackendImpl|.
3358 SetSimpleCacheMode();
3359 InitCache();
3360 disk_cache::Entry* entry;
3361 const int kSize = 50;
3362 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3363 CacheTestFillBuffer(buffer->data(), kSize, false);
3364 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3365 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3366 entry->Close();
3367 cache_.reset();
3369 // Check that the |BackendImpl| does not favor this structure.
3370 base::Thread cache_thread("CacheThread");
3371 ASSERT_TRUE(cache_thread.StartWithOptions(
3372 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3373 disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(
3374 cache_path_, base::MessageLoopProxy::current().get(), NULL);
3375 cache->SetUnitTestMode();
3376 net::TestCompletionCallback cb;
3377 int rv = cache->Init(cb.callback());
3378 EXPECT_NE(net::OK, cb.GetResult(rv));
3379 delete cache;
3380 DisableIntegrityCheck();
3383 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
3384 SetSimpleCacheMode();
3385 BackendFixEnumerators();
3388 // Tests basic functionality of the SimpleBackend implementation of the
3389 // enumeration API.
3390 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
3391 SetSimpleCacheMode();
3392 InitCache();
3393 std::set<std::string> key_pool;
3394 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3396 // Check that enumeration returns all entries.
3397 std::set<std::string> keys_to_match(key_pool);
3398 void* iter = NULL;
3399 size_t count = 0;
3400 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3401 cache_->EndEnumeration(&iter);
3402 EXPECT_EQ(key_pool.size(), count);
3403 EXPECT_TRUE(keys_to_match.empty());
3405 // Check that opening entries does not affect enumeration.
3406 keys_to_match = key_pool;
3407 iter = NULL;
3408 count = 0;
3409 disk_cache::Entry* entry_opened_before;
3410 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
3411 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3412 &iter,
3413 &keys_to_match,
3414 &count));
3416 disk_cache::Entry* entry_opened_middle;
3417 ASSERT_EQ(net::OK,
3418 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
3419 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3420 cache_->EndEnumeration(&iter);
3421 entry_opened_before->Close();
3422 entry_opened_middle->Close();
3424 EXPECT_EQ(key_pool.size(), count);
3425 EXPECT_TRUE(keys_to_match.empty());
3428 // Tests that the enumerations are not affected by dooming an entry in the
3429 // middle.
3430 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
3431 SetSimpleCacheMode();
3432 InitCache();
3433 std::set<std::string> key_pool;
3434 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3436 // Check that enumeration returns all entries but the doomed one.
3437 std::set<std::string> keys_to_match(key_pool);
3438 void* iter = NULL;
3439 size_t count = 0;
3440 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3441 &iter,
3442 &keys_to_match,
3443 &count));
3445 std::string key_to_delete = *(keys_to_match.begin());
3446 DoomEntry(key_to_delete);
3447 keys_to_match.erase(key_to_delete);
3448 key_pool.erase(key_to_delete);
3449 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3450 cache_->EndEnumeration(&iter);
3452 EXPECT_EQ(key_pool.size(), count);
3453 EXPECT_TRUE(keys_to_match.empty());
3456 // Tests that enumerations are not affected by corrupt files.
3457 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
3458 SetSimpleCacheMode();
3459 InitCache();
3460 std::set<std::string> key_pool;
3461 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3463 // Create a corrupt entry. The write/read sequence ensures that the entry will
3464 // have been created before corrupting the platform files, in the case of
3465 // optimistic operations.
3466 const std::string key = "the key";
3467 disk_cache::Entry* corrupted_entry;
3469 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
3470 ASSERT_TRUE(corrupted_entry);
3471 const int kSize = 50;
3472 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3473 CacheTestFillBuffer(buffer->data(), kSize, false);
3474 ASSERT_EQ(kSize,
3475 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
3476 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
3477 corrupted_entry->Close();
3479 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3480 key, cache_path_));
3481 EXPECT_EQ(key_pool.size() + 1,
3482 implicit_cast<size_t>(cache_->GetEntryCount()));
3484 // Check that enumeration returns all entries but the corrupt one.
3485 std::set<std::string> keys_to_match(key_pool);
3486 void* iter = NULL;
3487 size_t count = 0;
3488 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3489 cache_->EndEnumeration(&iter);
3491 EXPECT_EQ(key_pool.size(), count);
3492 EXPECT_TRUE(keys_to_match.empty());
3495 #endif // defined(OS_POSIX)