Revert "Reland c91b178b07b0d - Delete dead signin code (SigninGlobalError)"
[chromium-blink-merge.git] / net / disk_cache / backend_unittest.cc
blob3739676e20b605f3002b1922d9af2e9e469413ff
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <stdint.h>
7 #include "base/basictypes.h"
8 #include "base/files/file_util.h"
9 #include "base/metrics/field_trial.h"
10 #include "base/run_loop.h"
11 #include "base/strings/string_split.h"
12 #include "base/strings/string_util.h"
13 #include "base/strings/stringprintf.h"
14 #include "base/test/mock_entropy_provider.h"
15 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
16 #include "base/thread_task_runner_handle.h"
17 #include "base/threading/platform_thread.h"
18 #include "base/threading/thread_restrictions.h"
19 #include "net/base/cache_type.h"
20 #include "net/base/io_buffer.h"
21 #include "net/base/net_errors.h"
22 #include "net/base/test_completion_callback.h"
23 #include "net/disk_cache/blockfile/backend_impl.h"
24 #include "net/disk_cache/blockfile/entry_impl.h"
25 #include "net/disk_cache/blockfile/experiments.h"
26 #include "net/disk_cache/blockfile/histogram_macros.h"
27 #include "net/disk_cache/blockfile/mapped_file.h"
28 #include "net/disk_cache/cache_util.h"
29 #include "net/disk_cache/disk_cache_test_base.h"
30 #include "net/disk_cache/disk_cache_test_util.h"
31 #include "net/disk_cache/memory/mem_backend_impl.h"
32 #include "net/disk_cache/simple/simple_backend_impl.h"
33 #include "net/disk_cache/simple/simple_entry_format.h"
34 #include "net/disk_cache/simple/simple_test_util.h"
35 #include "net/disk_cache/simple/simple_util.h"
36 #include "testing/gtest/include/gtest/gtest.h"
38 #if defined(OS_WIN)
39 #include "base/win/scoped_handle.h"
40 #endif
42 // Provide a BackendImpl object to macros from histogram_macros.h.
43 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
45 using base::Time;
47 namespace {
49 const char kExistingEntryKey[] = "existing entry key";
51 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
52 const base::Thread& cache_thread,
53 base::FilePath& cache_path) {
54 net::TestCompletionCallback cb;
56 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
57 cache_path, cache_thread.task_runner(), NULL));
58 int rv = cache->Init(cb.callback());
59 if (cb.GetResult(rv) != net::OK)
60 return scoped_ptr<disk_cache::BackendImpl>();
62 disk_cache::Entry* entry = NULL;
63 rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
64 if (cb.GetResult(rv) != net::OK)
65 return scoped_ptr<disk_cache::BackendImpl>();
66 entry->Close();
68 return cache.Pass();
71 } // namespace
73 // Tests that can run with different types of caches.
74 class DiskCacheBackendTest : public DiskCacheTestWithCache {
75 protected:
76 // Some utility methods:
78 // Perform IO operations on the cache until there is pending IO.
79 int GeneratePendingIO(net::TestCompletionCallback* cb);
81 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
82 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
83 // There are 4 entries after doomed_start and 2 after doomed_end.
84 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
86 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
87 bool EnumerateAndMatchKeys(int max_to_open,
88 TestIterator* iter,
89 std::set<std::string>* keys_to_match,
90 size_t* count);
92 // Actual tests:
93 void BackendBasics();
94 void BackendKeying();
95 void BackendShutdownWithPendingFileIO(bool fast);
96 void BackendShutdownWithPendingIO(bool fast);
97 void BackendShutdownWithPendingCreate(bool fast);
98 void BackendSetSize();
99 void BackendLoad();
100 void BackendChain();
101 void BackendValidEntry();
102 void BackendInvalidEntry();
103 void BackendInvalidEntryRead();
104 void BackendInvalidEntryWithLoad();
105 void BackendTrimInvalidEntry();
106 void BackendTrimInvalidEntry2();
107 void BackendEnumerations();
108 void BackendEnumerations2();
109 void BackendInvalidEntryEnumeration();
110 void BackendFixEnumerators();
111 void BackendDoomRecent();
112 void BackendDoomBetween();
113 void BackendTransaction(const std::string& name, int num_entries, bool load);
114 void BackendRecoverInsert();
115 void BackendRecoverRemove();
116 void BackendRecoverWithEviction();
117 void BackendInvalidEntry2();
118 void BackendInvalidEntry3();
119 void BackendInvalidEntry7();
120 void BackendInvalidEntry8();
121 void BackendInvalidEntry9(bool eviction);
122 void BackendInvalidEntry10(bool eviction);
123 void BackendInvalidEntry11(bool eviction);
124 void BackendTrimInvalidEntry12();
125 void BackendDoomAll();
126 void BackendDoomAll2();
127 void BackendInvalidRankings();
128 void BackendInvalidRankings2();
129 void BackendDisable();
130 void BackendDisable2();
131 void BackendDisable3();
132 void BackendDisable4();
133 void BackendDisabledAPI();
136 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
137 if (!use_current_thread_) {
138 ADD_FAILURE();
139 return net::ERR_FAILED;
142 disk_cache::Entry* entry;
143 int rv = cache_->CreateEntry("some key", &entry, cb->callback());
144 if (cb->GetResult(rv) != net::OK)
145 return net::ERR_CACHE_CREATE_FAILURE;
147 const int kSize = 25000;
148 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
149 CacheTestFillBuffer(buffer->data(), kSize, false);
151 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
152 // We are using the current thread as the cache thread because we want to
153 // be able to call directly this method to make sure that the OS (instead
154 // of us switching thread) is returning IO pending.
155 if (!simple_cache_mode_) {
156 rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
157 0, i, buffer.get(), kSize, cb->callback(), false);
158 } else {
159 rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
162 if (rv == net::ERR_IO_PENDING)
163 break;
164 if (rv != kSize)
165 rv = net::ERR_FAILED;
168 // Don't call Close() to avoid going through the queue or we'll deadlock
169 // waiting for the operation to finish.
170 if (!simple_cache_mode_)
171 static_cast<disk_cache::EntryImpl*>(entry)->Release();
172 else
173 entry->Close();
175 return rv;
178 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
179 base::Time* doomed_end) {
180 InitCache();
182 const int kSize = 50;
183 // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
184 const int kOffset = 10 + 1024 * 1024;
186 disk_cache::Entry* entry0 = NULL;
187 disk_cache::Entry* entry1 = NULL;
188 disk_cache::Entry* entry2 = NULL;
190 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
191 CacheTestFillBuffer(buffer->data(), kSize, false);
193 ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
194 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
195 ASSERT_EQ(kSize,
196 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
197 entry0->Close();
199 FlushQueueForTest();
200 AddDelay();
201 if (doomed_start)
202 *doomed_start = base::Time::Now();
204 // Order in rankings list:
205 // first_part1, first_part2, second_part1, second_part2
206 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
207 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
208 ASSERT_EQ(kSize,
209 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
210 entry1->Close();
212 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
213 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
214 ASSERT_EQ(kSize,
215 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
216 entry2->Close();
218 FlushQueueForTest();
219 AddDelay();
220 if (doomed_end)
221 *doomed_end = base::Time::Now();
223 // Order in rankings list:
224 // third_part1, fourth_part1, third_part2, fourth_part2
225 disk_cache::Entry* entry3 = NULL;
226 disk_cache::Entry* entry4 = NULL;
227 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
228 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
229 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
230 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
231 ASSERT_EQ(kSize,
232 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
233 ASSERT_EQ(kSize,
234 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
235 entry3->Close();
236 entry4->Close();
238 FlushQueueForTest();
239 AddDelay();
242 // Creates entries based on random keys. Stores these keys in |key_pool|.
243 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
244 std::set<std::string>* key_pool) {
245 const int kNumEntries = 10;
247 for (int i = 0; i < kNumEntries; ++i) {
248 std::string key = GenerateKey(true);
249 disk_cache::Entry* entry;
250 if (CreateEntry(key, &entry) != net::OK)
251 return false;
252 key_pool->insert(key);
253 entry->Close();
255 return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
258 // Performs iteration over the backend and checks that the keys of entries
259 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
260 // will be opened, if it is positive. Otherwise, iteration will continue until
261 // OpenNextEntry stops returning net::OK.
262 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
263 int max_to_open,
264 TestIterator* iter,
265 std::set<std::string>* keys_to_match,
266 size_t* count) {
267 disk_cache::Entry* entry;
269 if (!iter)
270 return false;
271 while (iter->OpenNextEntry(&entry) == net::OK) {
272 if (!entry)
273 return false;
274 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
275 entry->Close();
276 ++(*count);
277 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
278 break;
281 return true;
284 void DiskCacheBackendTest::BackendBasics() {
285 InitCache();
286 disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
287 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
288 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
289 ASSERT_TRUE(NULL != entry1);
290 entry1->Close();
291 entry1 = NULL;
293 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
294 ASSERT_TRUE(NULL != entry1);
295 entry1->Close();
296 entry1 = NULL;
298 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
299 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
300 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
301 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
302 ASSERT_TRUE(NULL != entry1);
303 ASSERT_TRUE(NULL != entry2);
304 EXPECT_EQ(2, cache_->GetEntryCount());
306 disk_cache::Entry* entry3 = NULL;
307 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3));
308 ASSERT_TRUE(NULL != entry3);
309 EXPECT_TRUE(entry2 == entry3);
310 EXPECT_EQ(2, cache_->GetEntryCount());
312 EXPECT_EQ(net::OK, DoomEntry("some other key"));
313 EXPECT_EQ(1, cache_->GetEntryCount());
314 entry1->Close();
315 entry2->Close();
316 entry3->Close();
318 EXPECT_EQ(net::OK, DoomEntry("the first key"));
319 EXPECT_EQ(0, cache_->GetEntryCount());
321 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
322 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
323 entry1->Doom();
324 entry1->Close();
325 EXPECT_EQ(net::OK, DoomEntry("some other key"));
326 EXPECT_EQ(0, cache_->GetEntryCount());
327 entry2->Close();
330 TEST_F(DiskCacheBackendTest, Basics) {
331 BackendBasics();
334 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
335 SetNewEviction();
336 BackendBasics();
339 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
340 SetMemoryOnlyMode();
341 BackendBasics();
344 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
345 SetCacheType(net::APP_CACHE);
346 BackendBasics();
349 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
350 SetCacheType(net::SHADER_CACHE);
351 BackendBasics();
354 void DiskCacheBackendTest::BackendKeying() {
355 InitCache();
356 const char kName1[] = "the first key";
357 const char kName2[] = "the first Key";
358 disk_cache::Entry *entry1, *entry2;
359 ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1));
361 ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2));
362 EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
363 entry2->Close();
365 char buffer[30];
366 base::strlcpy(buffer, kName1, arraysize(buffer));
367 ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2));
368 EXPECT_TRUE(entry1 == entry2);
369 entry2->Close();
371 base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
372 ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2));
373 EXPECT_TRUE(entry1 == entry2);
374 entry2->Close();
376 base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3);
377 ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2));
378 EXPECT_TRUE(entry1 == entry2);
379 entry2->Close();
381 // Now verify long keys.
382 char buffer2[20000];
383 memset(buffer2, 's', sizeof(buffer2));
384 buffer2[1023] = '\0';
385 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
386 entry2->Close();
388 buffer2[1023] = 'g';
389 buffer2[19999] = '\0';
390 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
391 entry2->Close();
392 entry1->Close();
395 TEST_F(DiskCacheBackendTest, Keying) {
396 BackendKeying();
399 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
400 SetNewEviction();
401 BackendKeying();
404 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
405 SetMemoryOnlyMode();
406 BackendKeying();
409 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
410 SetCacheType(net::APP_CACHE);
411 BackendKeying();
414 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
415 SetCacheType(net::SHADER_CACHE);
416 BackendKeying();
419 TEST_F(DiskCacheTest, CreateBackend) {
420 net::TestCompletionCallback cb;
423 ASSERT_TRUE(CleanupCacheDir());
424 base::Thread cache_thread("CacheThread");
425 ASSERT_TRUE(cache_thread.StartWithOptions(
426 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
428 // Test the private factory method(s).
429 scoped_ptr<disk_cache::Backend> cache;
430 cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
431 ASSERT_TRUE(cache.get());
432 cache.reset();
434 // Now test the public API.
435 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
436 net::CACHE_BACKEND_DEFAULT,
437 cache_path_,
439 false,
440 cache_thread.task_runner(),
441 NULL,
442 &cache,
443 cb.callback());
444 ASSERT_EQ(net::OK, cb.GetResult(rv));
445 ASSERT_TRUE(cache.get());
446 cache.reset();
448 rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
449 net::CACHE_BACKEND_DEFAULT,
450 base::FilePath(), 0,
451 false, NULL, NULL, &cache,
452 cb.callback());
453 ASSERT_EQ(net::OK, cb.GetResult(rv));
454 ASSERT_TRUE(cache.get());
455 cache.reset();
458 base::MessageLoop::current()->RunUntilIdle();
461 // Tests that |BackendImpl| fails to initialize with a missing file.
462 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
463 ASSERT_TRUE(CopyTestCache("bad_entry"));
464 base::FilePath filename = cache_path_.AppendASCII("data_1");
465 base::DeleteFile(filename, false);
466 base::Thread cache_thread("CacheThread");
467 ASSERT_TRUE(cache_thread.StartWithOptions(
468 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
469 net::TestCompletionCallback cb;
471 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
472 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
473 cache_path_, cache_thread.task_runner(), NULL));
474 int rv = cache->Init(cb.callback());
475 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
476 base::ThreadRestrictions::SetIOAllowed(prev);
478 cache.reset();
479 DisableIntegrityCheck();
482 TEST_F(DiskCacheBackendTest, ExternalFiles) {
483 InitCache();
484 // First, let's create a file on the folder.
485 base::FilePath filename = cache_path_.AppendASCII("f_000001");
487 const int kSize = 50;
488 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
489 CacheTestFillBuffer(buffer1->data(), kSize, false);
490 ASSERT_EQ(kSize, base::WriteFile(filename, buffer1->data(), kSize));
492 // Now let's create a file with the cache.
493 disk_cache::Entry* entry;
494 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
495 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
496 entry->Close();
498 // And verify that the first file is still there.
499 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
500 ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
501 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
504 // Tests that we deal with file-level pending operations at destruction time.
505 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
506 ASSERT_TRUE(CleanupCacheDir());
507 uint32 flags = disk_cache::kNoBuffering;
508 if (!fast)
509 flags |= disk_cache::kNoRandom;
511 UseCurrentThread();
512 CreateBackend(flags, NULL);
514 net::TestCompletionCallback cb;
515 int rv = GeneratePendingIO(&cb);
517 // The cache destructor will see one pending operation here.
518 cache_.reset();
520 if (rv == net::ERR_IO_PENDING) {
521 if (fast || simple_cache_mode_)
522 EXPECT_FALSE(cb.have_result());
523 else
524 EXPECT_TRUE(cb.have_result());
527 base::MessageLoop::current()->RunUntilIdle();
529 #if !defined(OS_IOS)
530 // Wait for the actual operation to complete, or we'll keep a file handle that
531 // may cause issues later. Note that on iOS systems even though this test
532 // uses a single thread, the actual IO is posted to a worker thread and the
533 // cache destructor breaks the link to reach cb when the operation completes.
534 rv = cb.GetResult(rv);
535 #endif
538 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
539 BackendShutdownWithPendingFileIO(false);
542 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
543 // builds because they contain a lot of intentional memory leaks.
544 // The wrapper scripts used to run tests under Valgrind Memcheck will also
545 // disable these tests. See:
546 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
547 #if !defined(LEAK_SANITIZER)
548 // We'll be leaking from this test.
549 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
550 // The integrity test sets kNoRandom so there's a version mismatch if we don't
551 // force new eviction.
552 SetNewEviction();
553 BackendShutdownWithPendingFileIO(true);
555 #endif
557 // See crbug.com/330074
558 #if !defined(OS_IOS)
559 // Tests that one cache instance is not affected by another one going away.
560 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
561 base::ScopedTempDir store;
562 ASSERT_TRUE(store.CreateUniqueTempDir());
564 net::TestCompletionCallback cb;
565 scoped_ptr<disk_cache::Backend> extra_cache;
566 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
567 net::CACHE_BACKEND_DEFAULT,
568 store.path(),
570 false,
571 base::ThreadTaskRunnerHandle::Get(),
572 NULL,
573 &extra_cache,
574 cb.callback());
575 ASSERT_EQ(net::OK, cb.GetResult(rv));
576 ASSERT_TRUE(extra_cache.get() != NULL);
578 ASSERT_TRUE(CleanupCacheDir());
579 SetNewEviction(); // Match the expected behavior for integrity verification.
580 UseCurrentThread();
582 CreateBackend(disk_cache::kNoBuffering, NULL);
583 rv = GeneratePendingIO(&cb);
585 // cache_ has a pending operation, and extra_cache will go away.
586 extra_cache.reset();
588 if (rv == net::ERR_IO_PENDING)
589 EXPECT_FALSE(cb.have_result());
591 base::MessageLoop::current()->RunUntilIdle();
593 // Wait for the actual operation to complete, or we'll keep a file handle that
594 // may cause issues later.
595 rv = cb.GetResult(rv);
597 #endif
599 // Tests that we deal with background-thread pending operations.
600 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
601 net::TestCompletionCallback cb;
604 ASSERT_TRUE(CleanupCacheDir());
605 base::Thread cache_thread("CacheThread");
606 ASSERT_TRUE(cache_thread.StartWithOptions(
607 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
609 uint32 flags = disk_cache::kNoBuffering;
610 if (!fast)
611 flags |= disk_cache::kNoRandom;
613 CreateBackend(flags, &cache_thread);
615 disk_cache::Entry* entry;
616 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
617 ASSERT_EQ(net::OK, cb.GetResult(rv));
619 entry->Close();
621 // The cache destructor will see one pending operation here.
622 cache_.reset();
625 base::MessageLoop::current()->RunUntilIdle();
628 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
629 BackendShutdownWithPendingIO(false);
632 #if !defined(LEAK_SANITIZER)
633 // We'll be leaking from this test.
634 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
635 // The integrity test sets kNoRandom so there's a version mismatch if we don't
636 // force new eviction.
637 SetNewEviction();
638 BackendShutdownWithPendingIO(true);
640 #endif
642 // Tests that we deal with create-type pending operations.
643 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
644 net::TestCompletionCallback cb;
647 ASSERT_TRUE(CleanupCacheDir());
648 base::Thread cache_thread("CacheThread");
649 ASSERT_TRUE(cache_thread.StartWithOptions(
650 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
652 disk_cache::BackendFlags flags =
653 fast ? disk_cache::kNone : disk_cache::kNoRandom;
654 CreateBackend(flags, &cache_thread);
656 disk_cache::Entry* entry;
657 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
658 ASSERT_EQ(net::ERR_IO_PENDING, rv);
660 cache_.reset();
661 EXPECT_FALSE(cb.have_result());
664 base::MessageLoop::current()->RunUntilIdle();
667 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
668 BackendShutdownWithPendingCreate(false);
671 #if !defined(LEAK_SANITIZER)
672 // We'll be leaking an entry from this test.
673 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
674 // The integrity test sets kNoRandom so there's a version mismatch if we don't
675 // force new eviction.
676 SetNewEviction();
677 BackendShutdownWithPendingCreate(true);
679 #endif
681 // Disabled on android since this test requires cache creator to create
682 // blockfile caches.
683 #if !defined(OS_ANDROID)
684 TEST_F(DiskCacheTest, TruncatedIndex) {
685 ASSERT_TRUE(CleanupCacheDir());
686 base::FilePath index = cache_path_.AppendASCII("index");
687 ASSERT_EQ(5, base::WriteFile(index, "hello", 5));
689 base::Thread cache_thread("CacheThread");
690 ASSERT_TRUE(cache_thread.StartWithOptions(
691 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
692 net::TestCompletionCallback cb;
694 scoped_ptr<disk_cache::Backend> backend;
695 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
696 net::CACHE_BACKEND_BLOCKFILE,
697 cache_path_,
699 false,
700 cache_thread.task_runner(),
701 NULL,
702 &backend,
703 cb.callback());
704 ASSERT_NE(net::OK, cb.GetResult(rv));
706 ASSERT_FALSE(backend);
708 #endif
710 void DiskCacheBackendTest::BackendSetSize() {
711 const int cache_size = 0x10000; // 64 kB
712 SetMaxSize(cache_size);
713 InitCache();
715 std::string first("some key");
716 std::string second("something else");
717 disk_cache::Entry* entry;
718 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
720 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
721 memset(buffer->data(), 0, cache_size);
722 EXPECT_EQ(cache_size / 10,
723 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
724 << "normal file";
726 EXPECT_EQ(net::ERR_FAILED,
727 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
728 << "file size above the limit";
730 // By doubling the total size, we make this file cacheable.
731 SetMaxSize(cache_size * 2);
732 EXPECT_EQ(cache_size / 5,
733 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
735 // Let's fill up the cache!.
736 SetMaxSize(cache_size * 10);
737 EXPECT_EQ(cache_size * 3 / 4,
738 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
739 entry->Close();
740 FlushQueueForTest();
742 SetMaxSize(cache_size);
744 // The cache is 95% full.
746 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
747 EXPECT_EQ(cache_size / 10,
748 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
750 disk_cache::Entry* entry2;
751 ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
752 EXPECT_EQ(cache_size / 10,
753 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
754 entry2->Close(); // This will trigger the cache trim.
756 EXPECT_NE(net::OK, OpenEntry(first, &entry2));
758 FlushQueueForTest(); // Make sure that we are done trimming the cache.
759 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
761 entry->Close();
762 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
763 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
764 entry->Close();
767 TEST_F(DiskCacheBackendTest, SetSize) {
768 BackendSetSize();
771 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
772 SetNewEviction();
773 BackendSetSize();
776 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
777 SetMemoryOnlyMode();
778 BackendSetSize();
781 void DiskCacheBackendTest::BackendLoad() {
782 InitCache();
783 int seed = static_cast<int>(Time::Now().ToInternalValue());
784 srand(seed);
786 disk_cache::Entry* entries[100];
787 for (int i = 0; i < 100; i++) {
788 std::string key = GenerateKey(true);
789 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
791 EXPECT_EQ(100, cache_->GetEntryCount());
793 for (int i = 0; i < 100; i++) {
794 int source1 = rand() % 100;
795 int source2 = rand() % 100;
796 disk_cache::Entry* temp = entries[source1];
797 entries[source1] = entries[source2];
798 entries[source2] = temp;
801 for (int i = 0; i < 100; i++) {
802 disk_cache::Entry* entry;
803 ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry));
804 EXPECT_TRUE(entry == entries[i]);
805 entry->Close();
806 entries[i]->Doom();
807 entries[i]->Close();
809 FlushQueueForTest();
810 EXPECT_EQ(0, cache_->GetEntryCount());
813 TEST_F(DiskCacheBackendTest, Load) {
814 // Work with a tiny index table (16 entries)
815 SetMask(0xf);
816 SetMaxSize(0x100000);
817 BackendLoad();
820 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
821 SetNewEviction();
822 // Work with a tiny index table (16 entries)
823 SetMask(0xf);
824 SetMaxSize(0x100000);
825 BackendLoad();
828 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
829 SetMaxSize(0x100000);
830 SetMemoryOnlyMode();
831 BackendLoad();
834 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
835 SetCacheType(net::APP_CACHE);
836 // Work with a tiny index table (16 entries)
837 SetMask(0xf);
838 SetMaxSize(0x100000);
839 BackendLoad();
842 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
843 SetCacheType(net::SHADER_CACHE);
844 // Work with a tiny index table (16 entries)
845 SetMask(0xf);
846 SetMaxSize(0x100000);
847 BackendLoad();
850 // Tests the chaining of an entry to the current head.
851 void DiskCacheBackendTest::BackendChain() {
852 SetMask(0x1); // 2-entry table.
853 SetMaxSize(0x3000); // 12 kB.
854 InitCache();
856 disk_cache::Entry* entry;
857 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
858 entry->Close();
859 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry));
860 entry->Close();
863 TEST_F(DiskCacheBackendTest, Chain) {
864 BackendChain();
867 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
868 SetNewEviction();
869 BackendChain();
872 TEST_F(DiskCacheBackendTest, AppCacheChain) {
873 SetCacheType(net::APP_CACHE);
874 BackendChain();
877 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
878 SetCacheType(net::SHADER_CACHE);
879 BackendChain();
882 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
883 SetNewEviction();
884 InitCache();
886 disk_cache::Entry* entry;
887 for (int i = 0; i < 100; i++) {
888 std::string name(base::StringPrintf("Key %d", i));
889 ASSERT_EQ(net::OK, CreateEntry(name, &entry));
890 entry->Close();
891 if (i < 90) {
892 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
893 ASSERT_EQ(net::OK, OpenEntry(name, &entry));
894 entry->Close();
898 // The first eviction must come from list 1 (10% limit), the second must come
899 // from list 0.
900 TrimForTest(false);
901 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
902 TrimForTest(false);
903 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
905 // Double check that we still have the list tails.
906 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry));
907 entry->Close();
908 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry));
909 entry->Close();
912 // Before looking for invalid entries, let's check a valid entry.
913 void DiskCacheBackendTest::BackendValidEntry() {
914 InitCache();
916 std::string key("Some key");
917 disk_cache::Entry* entry;
918 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
920 const int kSize = 50;
921 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
922 memset(buffer1->data(), 0, kSize);
923 base::strlcpy(buffer1->data(), "And the data to save", kSize);
924 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
925 entry->Close();
926 SimulateCrash();
928 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
930 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
931 memset(buffer2->data(), 0, kSize);
932 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
933 entry->Close();
934 EXPECT_STREQ(buffer1->data(), buffer2->data());
937 TEST_F(DiskCacheBackendTest, ValidEntry) {
938 BackendValidEntry();
941 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
942 SetNewEviction();
943 BackendValidEntry();
946 // The same logic of the previous test (ValidEntry), but this time force the
947 // entry to be invalid, simulating a crash in the middle.
948 // We'll be leaking memory from this test.
949 void DiskCacheBackendTest::BackendInvalidEntry() {
950 InitCache();
952 std::string key("Some key");
953 disk_cache::Entry* entry;
954 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
956 const int kSize = 50;
957 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
958 memset(buffer->data(), 0, kSize);
959 base::strlcpy(buffer->data(), "And the data to save", kSize);
960 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
961 SimulateCrash();
963 EXPECT_NE(net::OK, OpenEntry(key, &entry));
964 EXPECT_EQ(0, cache_->GetEntryCount());
967 #if !defined(LEAK_SANITIZER)
968 // We'll be leaking memory from this test.
969 TEST_F(DiskCacheBackendTest, InvalidEntry) {
970 BackendInvalidEntry();
973 // We'll be leaking memory from this test.
974 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
975 SetNewEviction();
976 BackendInvalidEntry();
979 // We'll be leaking memory from this test.
980 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
981 SetCacheType(net::APP_CACHE);
982 BackendInvalidEntry();
985 // We'll be leaking memory from this test.
986 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
987 SetCacheType(net::SHADER_CACHE);
988 BackendInvalidEntry();
991 // Almost the same test, but this time crash the cache after reading an entry.
992 // We'll be leaking memory from this test.
993 void DiskCacheBackendTest::BackendInvalidEntryRead() {
994 InitCache();
996 std::string key("Some key");
997 disk_cache::Entry* entry;
998 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1000 const int kSize = 50;
1001 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1002 memset(buffer->data(), 0, kSize);
1003 base::strlcpy(buffer->data(), "And the data to save", kSize);
1004 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1005 entry->Close();
1006 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1007 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
1009 SimulateCrash();
1011 if (type_ == net::APP_CACHE) {
1012 // Reading an entry and crashing should not make it dirty.
1013 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1014 EXPECT_EQ(1, cache_->GetEntryCount());
1015 entry->Close();
1016 } else {
1017 EXPECT_NE(net::OK, OpenEntry(key, &entry));
1018 EXPECT_EQ(0, cache_->GetEntryCount());
1022 // We'll be leaking memory from this test.
1023 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1024 BackendInvalidEntryRead();
1027 // We'll be leaking memory from this test.
1028 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1029 SetNewEviction();
1030 BackendInvalidEntryRead();
1033 // We'll be leaking memory from this test.
1034 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1035 SetCacheType(net::APP_CACHE);
1036 BackendInvalidEntryRead();
1039 // We'll be leaking memory from this test.
1040 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1041 SetCacheType(net::SHADER_CACHE);
1042 BackendInvalidEntryRead();
1045 // We'll be leaking memory from this test.
1046 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1047 // Work with a tiny index table (16 entries)
1048 SetMask(0xf);
1049 SetMaxSize(0x100000);
1050 InitCache();
1052 int seed = static_cast<int>(Time::Now().ToInternalValue());
1053 srand(seed);
1055 const int kNumEntries = 100;
1056 disk_cache::Entry* entries[kNumEntries];
1057 for (int i = 0; i < kNumEntries; i++) {
1058 std::string key = GenerateKey(true);
1059 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
1061 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1063 for (int i = 0; i < kNumEntries; i++) {
1064 int source1 = rand() % kNumEntries;
1065 int source2 = rand() % kNumEntries;
1066 disk_cache::Entry* temp = entries[source1];
1067 entries[source1] = entries[source2];
1068 entries[source2] = temp;
1071 std::string keys[kNumEntries];
1072 for (int i = 0; i < kNumEntries; i++) {
1073 keys[i] = entries[i]->GetKey();
1074 if (i < kNumEntries / 2)
1075 entries[i]->Close();
1078 SimulateCrash();
1080 for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1081 disk_cache::Entry* entry;
1082 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1085 for (int i = 0; i < kNumEntries / 2; i++) {
1086 disk_cache::Entry* entry;
1087 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry));
1088 entry->Close();
1091 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1094 // We'll be leaking memory from this test.
1095 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1096 BackendInvalidEntryWithLoad();
1099 // We'll be leaking memory from this test.
1100 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1101 SetNewEviction();
1102 BackendInvalidEntryWithLoad();
1105 // We'll be leaking memory from this test.
1106 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1107 SetCacheType(net::APP_CACHE);
1108 BackendInvalidEntryWithLoad();
1111 // We'll be leaking memory from this test.
1112 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1113 SetCacheType(net::SHADER_CACHE);
1114 BackendInvalidEntryWithLoad();
1117 // We'll be leaking memory from this test.
1118 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1119 const int kSize = 0x3000; // 12 kB
1120 SetMaxSize(kSize * 10);
1121 InitCache();
1123 std::string first("some key");
1124 std::string second("something else");
1125 disk_cache::Entry* entry;
1126 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
1128 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1129 memset(buffer->data(), 0, kSize);
1130 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1132 // Simulate a crash.
1133 SimulateCrash();
1135 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
1136 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1138 EXPECT_EQ(2, cache_->GetEntryCount());
1139 SetMaxSize(kSize);
1140 entry->Close(); // Trim the cache.
1141 FlushQueueForTest();
1143 // If we evicted the entry in less than 20mS, we have one entry in the cache;
1144 // if it took more than that, we posted a task and we'll delete the second
1145 // entry too.
1146 base::MessageLoop::current()->RunUntilIdle();
1148 // This may be not thread-safe in general, but for now it's OK so add some
1149 // ThreadSanitizer annotations to ignore data races on cache_.
1150 // See http://crbug.com/55970
1151 ANNOTATE_IGNORE_READS_BEGIN();
1152 EXPECT_GE(1, cache_->GetEntryCount());
1153 ANNOTATE_IGNORE_READS_END();
1155 EXPECT_NE(net::OK, OpenEntry(first, &entry));
1158 // We'll be leaking memory from this test.
1159 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1160 BackendTrimInvalidEntry();
1163 // We'll be leaking memory from this test.
1164 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1165 SetNewEviction();
1166 BackendTrimInvalidEntry();
1169 // We'll be leaking memory from this test.
1170 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1171 SetMask(0xf); // 16-entry table.
1173 const int kSize = 0x3000; // 12 kB
1174 SetMaxSize(kSize * 40);
1175 InitCache();
1177 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1178 memset(buffer->data(), 0, kSize);
1179 disk_cache::Entry* entry;
1181 // Writing 32 entries to this cache chains most of them.
1182 for (int i = 0; i < 32; i++) {
1183 std::string key(base::StringPrintf("some key %d", i));
1184 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1185 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1186 entry->Close();
1187 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1188 // Note that we are not closing the entries.
1191 // Simulate a crash.
1192 SimulateCrash();
1194 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
1195 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1197 FlushQueueForTest();
1198 EXPECT_EQ(33, cache_->GetEntryCount());
1199 SetMaxSize(kSize);
1201 // For the new eviction code, all corrupt entries are on the second list so
1202 // they are not going away that easy.
1203 if (new_eviction_) {
1204 EXPECT_EQ(net::OK, DoomAllEntries());
1207 entry->Close(); // Trim the cache.
1208 FlushQueueForTest();
1210 // We may abort the eviction before cleaning up everything.
1211 base::MessageLoop::current()->RunUntilIdle();
1212 FlushQueueForTest();
1213 // If it's not clear enough: we may still have eviction tasks running at this
1214 // time, so the number of entries is changing while we read it.
1215 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1216 EXPECT_GE(30, cache_->GetEntryCount());
1217 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1220 // We'll be leaking memory from this test.
1221 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1222 BackendTrimInvalidEntry2();
1225 // We'll be leaking memory from this test.
1226 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1227 SetNewEviction();
1228 BackendTrimInvalidEntry2();
1230 #endif // !defined(LEAK_SANITIZER)
1232 void DiskCacheBackendTest::BackendEnumerations() {
1233 InitCache();
1234 Time initial = Time::Now();
1236 const int kNumEntries = 100;
1237 for (int i = 0; i < kNumEntries; i++) {
1238 std::string key = GenerateKey(true);
1239 disk_cache::Entry* entry;
1240 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1241 entry->Close();
1243 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1244 Time final = Time::Now();
1246 disk_cache::Entry* entry;
1247 scoped_ptr<TestIterator> iter = CreateIterator();
1248 int count = 0;
1249 Time last_modified[kNumEntries];
1250 Time last_used[kNumEntries];
1251 while (iter->OpenNextEntry(&entry) == net::OK) {
1252 ASSERT_TRUE(NULL != entry);
1253 if (count < kNumEntries) {
1254 last_modified[count] = entry->GetLastModified();
1255 last_used[count] = entry->GetLastUsed();
1256 EXPECT_TRUE(initial <= last_modified[count]);
1257 EXPECT_TRUE(final >= last_modified[count]);
1260 entry->Close();
1261 count++;
1263 EXPECT_EQ(kNumEntries, count);
1265 iter = CreateIterator();
1266 count = 0;
1267 // The previous enumeration should not have changed the timestamps.
1268 while (iter->OpenNextEntry(&entry) == net::OK) {
1269 ASSERT_TRUE(NULL != entry);
1270 if (count < kNumEntries) {
1271 EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1272 EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1274 entry->Close();
1275 count++;
1277 EXPECT_EQ(kNumEntries, count);
1280 TEST_F(DiskCacheBackendTest, Enumerations) {
1281 BackendEnumerations();
1284 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1285 SetNewEviction();
1286 BackendEnumerations();
1289 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1290 SetMemoryOnlyMode();
1291 BackendEnumerations();
1294 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1295 SetCacheType(net::SHADER_CACHE);
1296 BackendEnumerations();
1299 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1300 SetCacheType(net::APP_CACHE);
1301 BackendEnumerations();
1304 // Verifies enumerations while entries are open.
1305 void DiskCacheBackendTest::BackendEnumerations2() {
1306 InitCache();
1307 const std::string first("first");
1308 const std::string second("second");
1309 disk_cache::Entry *entry1, *entry2;
1310 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1311 entry1->Close();
1312 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1313 entry2->Close();
1314 FlushQueueForTest();
1316 // Make sure that the timestamp is not the same.
1317 AddDelay();
1318 ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
1319 scoped_ptr<TestIterator> iter = CreateIterator();
1320 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1321 EXPECT_EQ(entry2->GetKey(), second);
1323 // Two entries and the iterator pointing at "first".
1324 entry1->Close();
1325 entry2->Close();
1327 // The iterator should still be valid, so we should not crash.
1328 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1329 EXPECT_EQ(entry2->GetKey(), first);
1330 entry2->Close();
1331 iter = CreateIterator();
1333 // Modify the oldest entry and get the newest element.
1334 ASSERT_EQ(net::OK, OpenEntry(first, &entry1));
1335 EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false));
1336 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1337 if (type_ == net::APP_CACHE) {
1338 // The list is not updated.
1339 EXPECT_EQ(entry2->GetKey(), second);
1340 } else {
1341 EXPECT_EQ(entry2->GetKey(), first);
1344 entry1->Close();
1345 entry2->Close();
1348 TEST_F(DiskCacheBackendTest, Enumerations2) {
1349 BackendEnumerations2();
1352 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1353 SetNewEviction();
1354 BackendEnumerations2();
1357 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
1358 SetMemoryOnlyMode();
1359 BackendEnumerations2();
1362 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1363 SetCacheType(net::APP_CACHE);
1364 BackendEnumerations2();
1367 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1368 SetCacheType(net::SHADER_CACHE);
1369 BackendEnumerations2();
1372 // Verify that ReadData calls do not update the LRU cache
1373 // when using the SHADER_CACHE type.
1374 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1375 SetCacheType(net::SHADER_CACHE);
1376 InitCache();
1377 const std::string first("first");
1378 const std::string second("second");
1379 disk_cache::Entry *entry1, *entry2;
1380 const int kSize = 50;
1381 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1383 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1384 memset(buffer1->data(), 0, kSize);
1385 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1386 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1388 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1389 entry2->Close();
1391 FlushQueueForTest();
1393 // Make sure that the timestamp is not the same.
1394 AddDelay();
1396 // Read from the last item in the LRU.
1397 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1398 entry1->Close();
1400 scoped_ptr<TestIterator> iter = CreateIterator();
1401 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1402 EXPECT_EQ(entry2->GetKey(), second);
1403 entry2->Close();
1406 #if !defined(LEAK_SANITIZER)
1407 // Verify handling of invalid entries while doing enumerations.
1408 // We'll be leaking memory from this test.
1409 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1410 InitCache();
1412 std::string key("Some key");
1413 disk_cache::Entry *entry, *entry1, *entry2;
1414 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1416 const int kSize = 50;
1417 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1418 memset(buffer1->data(), 0, kSize);
1419 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1420 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1421 entry1->Close();
1422 ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1423 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1425 std::string key2("Another key");
1426 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1427 entry2->Close();
1428 ASSERT_EQ(2, cache_->GetEntryCount());
1430 SimulateCrash();
1432 scoped_ptr<TestIterator> iter = CreateIterator();
1433 int count = 0;
1434 while (iter->OpenNextEntry(&entry) == net::OK) {
1435 ASSERT_TRUE(NULL != entry);
1436 EXPECT_EQ(key2, entry->GetKey());
1437 entry->Close();
1438 count++;
1440 EXPECT_EQ(1, count);
1441 EXPECT_EQ(1, cache_->GetEntryCount());
1444 // We'll be leaking memory from this test.
1445 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1446 BackendInvalidEntryEnumeration();
1449 // We'll be leaking memory from this test.
1450 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1451 SetNewEviction();
1452 BackendInvalidEntryEnumeration();
1454 #endif // !defined(LEAK_SANITIZER)
1456 // Tests that if for some reason entries are modified close to existing cache
1457 // iterators, we don't generate fatal errors or reset the cache.
1458 void DiskCacheBackendTest::BackendFixEnumerators() {
1459 InitCache();
1461 int seed = static_cast<int>(Time::Now().ToInternalValue());
1462 srand(seed);
1464 const int kNumEntries = 10;
1465 for (int i = 0; i < kNumEntries; i++) {
1466 std::string key = GenerateKey(true);
1467 disk_cache::Entry* entry;
1468 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1469 entry->Close();
1471 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1473 disk_cache::Entry *entry1, *entry2;
1474 scoped_ptr<TestIterator> iter1 = CreateIterator(), iter2 = CreateIterator();
1475 ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1));
1476 ASSERT_TRUE(NULL != entry1);
1477 entry1->Close();
1478 entry1 = NULL;
1480 // Let's go to the middle of the list.
1481 for (int i = 0; i < kNumEntries / 2; i++) {
1482 if (entry1)
1483 entry1->Close();
1484 ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1));
1485 ASSERT_TRUE(NULL != entry1);
1487 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1488 ASSERT_TRUE(NULL != entry2);
1489 entry2->Close();
1492 // Messing up with entry1 will modify entry2->next.
1493 entry1->Doom();
1494 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1495 ASSERT_TRUE(NULL != entry2);
1497 // The link entry2->entry1 should be broken.
1498 EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1499 entry1->Close();
1500 entry2->Close();
1502 // And the second iterator should keep working.
1503 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1504 ASSERT_TRUE(NULL != entry2);
1505 entry2->Close();
1508 TEST_F(DiskCacheBackendTest, FixEnumerators) {
1509 BackendFixEnumerators();
1512 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1513 SetNewEviction();
1514 BackendFixEnumerators();
1517 void DiskCacheBackendTest::BackendDoomRecent() {
1518 InitCache();
1520 disk_cache::Entry *entry;
1521 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1522 entry->Close();
1523 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1524 entry->Close();
1525 FlushQueueForTest();
1527 AddDelay();
1528 Time middle = Time::Now();
1530 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1531 entry->Close();
1532 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1533 entry->Close();
1534 FlushQueueForTest();
1536 AddDelay();
1537 Time final = Time::Now();
1539 ASSERT_EQ(4, cache_->GetEntryCount());
1540 EXPECT_EQ(net::OK, DoomEntriesSince(final));
1541 ASSERT_EQ(4, cache_->GetEntryCount());
1543 EXPECT_EQ(net::OK, DoomEntriesSince(middle));
1544 ASSERT_EQ(2, cache_->GetEntryCount());
1546 ASSERT_EQ(net::OK, OpenEntry("second", &entry));
1547 entry->Close();
1550 TEST_F(DiskCacheBackendTest, DoomRecent) {
1551 BackendDoomRecent();
1554 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
1555 SetNewEviction();
1556 BackendDoomRecent();
1559 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
1560 SetMemoryOnlyMode();
1561 BackendDoomRecent();
1564 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
1565 SetMemoryOnlyMode();
1566 base::Time start;
1567 InitSparseCache(&start, NULL);
1568 DoomEntriesSince(start);
1569 EXPECT_EQ(1, cache_->GetEntryCount());
1572 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
1573 base::Time start;
1574 InitSparseCache(&start, NULL);
1575 DoomEntriesSince(start);
1576 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1577 // MemBackendImpl does not. Thats why expected value differs here from
1578 // MemoryOnlyDoomEntriesSinceSparse.
1579 EXPECT_EQ(3, cache_->GetEntryCount());
1582 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
1583 SetMemoryOnlyMode();
1584 InitSparseCache(NULL, NULL);
1585 EXPECT_EQ(net::OK, DoomAllEntries());
1586 EXPECT_EQ(0, cache_->GetEntryCount());
1589 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
1590 InitSparseCache(NULL, NULL);
1591 EXPECT_EQ(net::OK, DoomAllEntries());
1592 EXPECT_EQ(0, cache_->GetEntryCount());
1595 void DiskCacheBackendTest::BackendDoomBetween() {
1596 InitCache();
1598 disk_cache::Entry *entry;
1599 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1600 entry->Close();
1601 FlushQueueForTest();
1603 AddDelay();
1604 Time middle_start = Time::Now();
1606 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1607 entry->Close();
1608 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1609 entry->Close();
1610 FlushQueueForTest();
1612 AddDelay();
1613 Time middle_end = Time::Now();
1615 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1616 entry->Close();
1617 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1618 entry->Close();
1619 FlushQueueForTest();
1621 AddDelay();
1622 Time final = Time::Now();
1624 ASSERT_EQ(4, cache_->GetEntryCount());
1625 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end));
1626 ASSERT_EQ(2, cache_->GetEntryCount());
1628 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1629 entry->Close();
1631 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final));
1632 ASSERT_EQ(1, cache_->GetEntryCount());
1634 ASSERT_EQ(net::OK, OpenEntry("first", &entry));
1635 entry->Close();
1638 TEST_F(DiskCacheBackendTest, DoomBetween) {
1639 BackendDoomBetween();
1642 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
1643 SetNewEviction();
1644 BackendDoomBetween();
1647 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
1648 SetMemoryOnlyMode();
1649 BackendDoomBetween();
1652 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
1653 SetMemoryOnlyMode();
1654 base::Time start, end;
1655 InitSparseCache(&start, &end);
1656 DoomEntriesBetween(start, end);
1657 EXPECT_EQ(3, cache_->GetEntryCount());
1659 start = end;
1660 end = base::Time::Now();
1661 DoomEntriesBetween(start, end);
1662 EXPECT_EQ(1, cache_->GetEntryCount());
1665 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
1666 base::Time start, end;
1667 InitSparseCache(&start, &end);
1668 DoomEntriesBetween(start, end);
1669 EXPECT_EQ(9, cache_->GetEntryCount());
1671 start = end;
1672 end = base::Time::Now();
1673 DoomEntriesBetween(start, end);
1674 EXPECT_EQ(3, cache_->GetEntryCount());
1677 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
1678 int num_entries, bool load) {
1679 success_ = false;
1680 ASSERT_TRUE(CopyTestCache(name));
1681 DisableFirstCleanup();
1683 uint32 mask;
1684 if (load) {
1685 mask = 0xf;
1686 SetMaxSize(0x100000);
1687 } else {
1688 // Clear the settings from the previous run.
1689 mask = 0;
1690 SetMaxSize(0);
1692 SetMask(mask);
1694 InitCache();
1695 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
1697 std::string key("the first key");
1698 disk_cache::Entry* entry1;
1699 ASSERT_NE(net::OK, OpenEntry(key, &entry1));
1701 int actual = cache_->GetEntryCount();
1702 if (num_entries != actual) {
1703 ASSERT_TRUE(load);
1704 // If there is a heavy load, inserting an entry will make another entry
1705 // dirty (on the hash bucket) so two entries are removed.
1706 ASSERT_EQ(num_entries - 1, actual);
1709 cache_.reset();
1710 cache_impl_ = NULL;
1712 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask));
1713 success_ = true;
1716 void DiskCacheBackendTest::BackendRecoverInsert() {
1717 // Tests with an empty cache.
1718 BackendTransaction("insert_empty1", 0, false);
1719 ASSERT_TRUE(success_) << "insert_empty1";
1720 BackendTransaction("insert_empty2", 0, false);
1721 ASSERT_TRUE(success_) << "insert_empty2";
1722 BackendTransaction("insert_empty3", 0, false);
1723 ASSERT_TRUE(success_) << "insert_empty3";
1725 // Tests with one entry on the cache.
1726 BackendTransaction("insert_one1", 1, false);
1727 ASSERT_TRUE(success_) << "insert_one1";
1728 BackendTransaction("insert_one2", 1, false);
1729 ASSERT_TRUE(success_) << "insert_one2";
1730 BackendTransaction("insert_one3", 1, false);
1731 ASSERT_TRUE(success_) << "insert_one3";
1733 // Tests with one hundred entries on the cache, tiny index.
1734 BackendTransaction("insert_load1", 100, true);
1735 ASSERT_TRUE(success_) << "insert_load1";
1736 BackendTransaction("insert_load2", 100, true);
1737 ASSERT_TRUE(success_) << "insert_load2";
1740 TEST_F(DiskCacheBackendTest, RecoverInsert) {
1741 BackendRecoverInsert();
1744 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
1745 SetNewEviction();
1746 BackendRecoverInsert();
1749 void DiskCacheBackendTest::BackendRecoverRemove() {
1750 // Removing the only element.
1751 BackendTransaction("remove_one1", 0, false);
1752 ASSERT_TRUE(success_) << "remove_one1";
1753 BackendTransaction("remove_one2", 0, false);
1754 ASSERT_TRUE(success_) << "remove_one2";
1755 BackendTransaction("remove_one3", 0, false);
1756 ASSERT_TRUE(success_) << "remove_one3";
1758 // Removing the head.
1759 BackendTransaction("remove_head1", 1, false);
1760 ASSERT_TRUE(success_) << "remove_head1";
1761 BackendTransaction("remove_head2", 1, false);
1762 ASSERT_TRUE(success_) << "remove_head2";
1763 BackendTransaction("remove_head3", 1, false);
1764 ASSERT_TRUE(success_) << "remove_head3";
1766 // Removing the tail.
1767 BackendTransaction("remove_tail1", 1, false);
1768 ASSERT_TRUE(success_) << "remove_tail1";
1769 BackendTransaction("remove_tail2", 1, false);
1770 ASSERT_TRUE(success_) << "remove_tail2";
1771 BackendTransaction("remove_tail3", 1, false);
1772 ASSERT_TRUE(success_) << "remove_tail3";
1774 // Removing with one hundred entries on the cache, tiny index.
1775 BackendTransaction("remove_load1", 100, true);
1776 ASSERT_TRUE(success_) << "remove_load1";
1777 BackendTransaction("remove_load2", 100, true);
1778 ASSERT_TRUE(success_) << "remove_load2";
1779 BackendTransaction("remove_load3", 100, true);
1780 ASSERT_TRUE(success_) << "remove_load3";
1782 // This case cannot be reverted.
1783 BackendTransaction("remove_one4", 0, false);
1784 ASSERT_TRUE(success_) << "remove_one4";
1785 BackendTransaction("remove_head4", 1, false);
1786 ASSERT_TRUE(success_) << "remove_head4";
1789 #if defined(OS_WIN)
1790 // http://crbug.com/396392
1791 #define MAYBE_RecoverRemove DISABLED_RecoverRemove
1792 #else
1793 #define MAYBE_RecoverRemove RecoverRemove
1794 #endif
1795 TEST_F(DiskCacheBackendTest, MAYBE_RecoverRemove) {
1796 BackendRecoverRemove();
1799 #if defined(OS_WIN)
1800 // http://crbug.com/396392
1801 #define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove
1802 #else
1803 #define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove
1804 #endif
1805 TEST_F(DiskCacheBackendTest, MAYBE_NewEvictionRecoverRemove) {
1806 SetNewEviction();
1807 BackendRecoverRemove();
1810 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1811 success_ = false;
1812 ASSERT_TRUE(CopyTestCache("insert_load1"));
1813 DisableFirstCleanup();
1815 SetMask(0xf);
1816 SetMaxSize(0x1000);
1818 // We should not crash here.
1819 InitCache();
1820 DisableIntegrityCheck();
1823 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
1824 BackendRecoverWithEviction();
1827 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
1828 SetNewEviction();
1829 BackendRecoverWithEviction();
1832 // Tests that the |BackendImpl| fails to start with the wrong cache version.
1833 TEST_F(DiskCacheTest, WrongVersion) {
1834 ASSERT_TRUE(CopyTestCache("wrong_version"));
1835 base::Thread cache_thread("CacheThread");
1836 ASSERT_TRUE(cache_thread.StartWithOptions(
1837 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1838 net::TestCompletionCallback cb;
1840 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1841 cache_path_, cache_thread.task_runner(), NULL));
1842 int rv = cache->Init(cb.callback());
1843 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
1846 // Tests that the disk cache successfully joins the control group, dropping the
1847 // existing cache in favour of a new empty cache.
1848 // Disabled on android since this test requires cache creator to create
1849 // blockfile caches.
1850 #if !defined(OS_ANDROID)
1851 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
1852 base::Thread cache_thread("CacheThread");
1853 ASSERT_TRUE(cache_thread.StartWithOptions(
1854 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1856 scoped_ptr<disk_cache::BackendImpl> cache =
1857 CreateExistingEntryCache(cache_thread, cache_path_);
1858 ASSERT_TRUE(cache.get());
1859 cache.reset();
1861 // Instantiate the SimpleCacheTrial, forcing this run into the
1862 // ExperimentControl group.
1863 base::FieldTrialList field_trial_list(new base::MockEntropyProvider());
1864 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1865 "ExperimentControl");
1866 net::TestCompletionCallback cb;
1867 scoped_ptr<disk_cache::Backend> base_cache;
1868 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
1869 net::CACHE_BACKEND_BLOCKFILE,
1870 cache_path_,
1872 true,
1873 cache_thread.task_runner(),
1874 NULL,
1875 &base_cache,
1876 cb.callback());
1877 ASSERT_EQ(net::OK, cb.GetResult(rv));
1878 EXPECT_EQ(0, base_cache->GetEntryCount());
1880 #endif
1882 // Tests that the disk cache can restart in the control group preserving
1883 // existing entries.
1884 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
1885 // Instantiate the SimpleCacheTrial, forcing this run into the
1886 // ExperimentControl group.
1887 base::FieldTrialList field_trial_list(new base::MockEntropyProvider());
1888 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1889 "ExperimentControl");
1891 base::Thread cache_thread("CacheThread");
1892 ASSERT_TRUE(cache_thread.StartWithOptions(
1893 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1895 scoped_ptr<disk_cache::BackendImpl> cache =
1896 CreateExistingEntryCache(cache_thread, cache_path_);
1897 ASSERT_TRUE(cache.get());
1899 net::TestCompletionCallback cb;
1901 const int kRestartCount = 5;
1902 for (int i = 0; i < kRestartCount; ++i) {
1903 cache.reset(new disk_cache::BackendImpl(cache_path_,
1904 cache_thread.task_runner(), NULL));
1905 int rv = cache->Init(cb.callback());
1906 ASSERT_EQ(net::OK, cb.GetResult(rv));
1907 EXPECT_EQ(1, cache->GetEntryCount());
1909 disk_cache::Entry* entry = NULL;
1910 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1911 EXPECT_EQ(net::OK, cb.GetResult(rv));
1912 EXPECT_TRUE(entry);
1913 entry->Close();
1917 // Tests that the disk cache can leave the control group preserving existing
1918 // entries.
1919 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
1920 base::Thread cache_thread("CacheThread");
1921 ASSERT_TRUE(cache_thread.StartWithOptions(
1922 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1925 // Instantiate the SimpleCacheTrial, forcing this run into the
1926 // ExperimentControl group.
1927 base::FieldTrialList field_trial_list(new base::MockEntropyProvider());
1928 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1929 "ExperimentControl");
1931 scoped_ptr<disk_cache::BackendImpl> cache =
1932 CreateExistingEntryCache(cache_thread, cache_path_);
1933 ASSERT_TRUE(cache.get());
1936 // Instantiate the SimpleCacheTrial, forcing this run into the
1937 // ExperimentNo group.
1938 base::FieldTrialList field_trial_list(new base::MockEntropyProvider());
1939 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1940 net::TestCompletionCallback cb;
1942 const int kRestartCount = 5;
1943 for (int i = 0; i < kRestartCount; ++i) {
1944 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1945 cache_path_, cache_thread.task_runner(), NULL));
1946 int rv = cache->Init(cb.callback());
1947 ASSERT_EQ(net::OK, cb.GetResult(rv));
1948 EXPECT_EQ(1, cache->GetEntryCount());
1950 disk_cache::Entry* entry = NULL;
1951 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1952 EXPECT_EQ(net::OK, cb.GetResult(rv));
1953 EXPECT_TRUE(entry);
1954 entry->Close();
1958 // Tests that the cache is properly restarted on recovery error.
1959 // Disabled on android since this test requires cache creator to create
1960 // blockfile caches.
1961 #if !defined(OS_ANDROID)
1962 TEST_F(DiskCacheBackendTest, DeleteOld) {
1963 ASSERT_TRUE(CopyTestCache("wrong_version"));
1964 SetNewEviction();
1965 base::Thread cache_thread("CacheThread");
1966 ASSERT_TRUE(cache_thread.StartWithOptions(
1967 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1969 net::TestCompletionCallback cb;
1970 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
1971 base::FilePath path(cache_path_);
1972 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
1973 net::CACHE_BACKEND_BLOCKFILE,
1974 path,
1976 true,
1977 cache_thread.task_runner(),
1978 NULL,
1979 &cache_,
1980 cb.callback());
1981 path.clear(); // Make sure path was captured by the previous call.
1982 ASSERT_EQ(net::OK, cb.GetResult(rv));
1983 base::ThreadRestrictions::SetIOAllowed(prev);
1984 cache_.reset();
1985 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
1987 #endif
1989 // We want to be able to deal with messed up entries on disk.
1990 void DiskCacheBackendTest::BackendInvalidEntry2() {
1991 ASSERT_TRUE(CopyTestCache("bad_entry"));
1992 DisableFirstCleanup();
1993 InitCache();
1995 disk_cache::Entry *entry1, *entry2;
1996 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
1997 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
1998 entry1->Close();
2000 // CheckCacheIntegrity will fail at this point.
2001 DisableIntegrityCheck();
2004 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
2005 BackendInvalidEntry2();
2008 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
2009 SetNewEviction();
2010 BackendInvalidEntry2();
2013 // Tests that we don't crash or hang when enumerating this cache.
2014 void DiskCacheBackendTest::BackendInvalidEntry3() {
2015 SetMask(0x1); // 2-entry table.
2016 SetMaxSize(0x3000); // 12 kB.
2017 DisableFirstCleanup();
2018 InitCache();
2020 disk_cache::Entry* entry;
2021 scoped_ptr<TestIterator> iter = CreateIterator();
2022 while (iter->OpenNextEntry(&entry) == net::OK) {
2023 entry->Close();
2027 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2028 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2029 BackendInvalidEntry3();
2032 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2033 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2034 SetNewEviction();
2035 BackendInvalidEntry3();
2036 DisableIntegrityCheck();
2039 // Test that we handle a dirty entry on the LRU list, already replaced with
2040 // the same key, and with hash collisions.
2041 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2042 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2043 SetMask(0x1); // 2-entry table.
2044 SetMaxSize(0x3000); // 12 kB.
2045 DisableFirstCleanup();
2046 InitCache();
2048 TrimForTest(false);
2051 // Test that we handle a dirty entry on the deleted list, already replaced with
2052 // the same key, and with hash collisions.
2053 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2054 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2055 SetNewEviction();
2056 SetMask(0x1); // 2-entry table.
2057 SetMaxSize(0x3000); // 12 kB.
2058 DisableFirstCleanup();
2059 InitCache();
2061 TrimDeletedListForTest(false);
2064 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2065 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2066 SetMask(0x1); // 2-entry table.
2067 SetMaxSize(0x3000); // 12 kB.
2068 DisableFirstCleanup();
2069 InitCache();
2071 // There is a dirty entry (but marked as clean) at the end, pointing to a
2072 // deleted entry through the hash collision list. We should not re-insert the
2073 // deleted entry into the index table.
2075 TrimForTest(false);
2076 // The cache should be clean (as detected by CheckCacheIntegrity).
2079 // Tests that we don't hang when there is a loop on the hash collision list.
2080 // The test cache could be a result of bug 69135.
2081 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2082 ASSERT_TRUE(CopyTestCache("list_loop2"));
2083 SetMask(0x1); // 2-entry table.
2084 SetMaxSize(0x3000); // 12 kB.
2085 DisableFirstCleanup();
2086 InitCache();
2088 // The second entry points at itselft, and the first entry is not accessible
2089 // though the index, but it is at the head of the LRU.
2091 disk_cache::Entry* entry;
2092 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
2093 entry->Close();
2095 TrimForTest(false);
2096 TrimForTest(false);
2097 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
2098 entry->Close();
2099 EXPECT_EQ(1, cache_->GetEntryCount());
2102 // Tests that we don't hang when there is a loop on the hash collision list.
2103 // The test cache could be a result of bug 69135.
2104 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2105 ASSERT_TRUE(CopyTestCache("list_loop3"));
2106 SetMask(0x1); // 2-entry table.
2107 SetMaxSize(0x3000); // 12 kB.
2108 DisableFirstCleanup();
2109 InitCache();
2111 // There is a wide loop of 5 entries.
2113 disk_cache::Entry* entry;
2114 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2117 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2118 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2119 DisableFirstCleanup();
2120 SetNewEviction();
2121 InitCache();
2123 // The second entry is dirty, but removing it should not corrupt the list.
2124 disk_cache::Entry* entry;
2125 ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2126 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
2128 // This should not delete the cache.
2129 entry->Doom();
2130 FlushQueueForTest();
2131 entry->Close();
2133 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry));
2134 entry->Close();
2137 // Tests handling of corrupt entries by keeping the rankings node around, with
2138 // a fatal failure.
2139 void DiskCacheBackendTest::BackendInvalidEntry7() {
2140 const int kSize = 0x3000; // 12 kB.
2141 SetMaxSize(kSize * 10);
2142 InitCache();
2144 std::string first("some key");
2145 std::string second("something else");
2146 disk_cache::Entry* entry;
2147 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2148 entry->Close();
2149 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2151 // Corrupt this entry.
2152 disk_cache::EntryImpl* entry_impl =
2153 static_cast<disk_cache::EntryImpl*>(entry);
2155 entry_impl->rankings()->Data()->next = 0;
2156 entry_impl->rankings()->Store();
2157 entry->Close();
2158 FlushQueueForTest();
2159 EXPECT_EQ(2, cache_->GetEntryCount());
2161 // This should detect the bad entry.
2162 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2163 EXPECT_EQ(1, cache_->GetEntryCount());
2165 // We should delete the cache. The list still has a corrupt node.
2166 scoped_ptr<TestIterator> iter = CreateIterator();
2167 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2168 FlushQueueForTest();
2169 EXPECT_EQ(0, cache_->GetEntryCount());
2172 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2173 BackendInvalidEntry7();
2176 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2177 SetNewEviction();
2178 BackendInvalidEntry7();
2181 // Tests handling of corrupt entries by keeping the rankings node around, with
2182 // a non fatal failure.
2183 void DiskCacheBackendTest::BackendInvalidEntry8() {
2184 const int kSize = 0x3000; // 12 kB
2185 SetMaxSize(kSize * 10);
2186 InitCache();
2188 std::string first("some key");
2189 std::string second("something else");
2190 disk_cache::Entry* entry;
2191 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2192 entry->Close();
2193 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2195 // Corrupt this entry.
2196 disk_cache::EntryImpl* entry_impl =
2197 static_cast<disk_cache::EntryImpl*>(entry);
2199 entry_impl->rankings()->Data()->contents = 0;
2200 entry_impl->rankings()->Store();
2201 entry->Close();
2202 FlushQueueForTest();
2203 EXPECT_EQ(2, cache_->GetEntryCount());
2205 // This should detect the bad entry.
2206 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2207 EXPECT_EQ(1, cache_->GetEntryCount());
2209 // We should not delete the cache.
2210 scoped_ptr<TestIterator> iter = CreateIterator();
2211 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2212 entry->Close();
2213 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2214 EXPECT_EQ(1, cache_->GetEntryCount());
2217 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2218 BackendInvalidEntry8();
2221 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2222 SetNewEviction();
2223 BackendInvalidEntry8();
2226 // Tests handling of corrupt entries detected by enumerations. Note that these
2227 // tests (xx9 to xx11) are basically just going though slightly different
2228 // codepaths so they are tighlty coupled with the code, but that is better than
2229 // not testing error handling code.
2230 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2231 const int kSize = 0x3000; // 12 kB.
2232 SetMaxSize(kSize * 10);
2233 InitCache();
2235 std::string first("some key");
2236 std::string second("something else");
2237 disk_cache::Entry* entry;
2238 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2239 entry->Close();
2240 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2242 // Corrupt this entry.
2243 disk_cache::EntryImpl* entry_impl =
2244 static_cast<disk_cache::EntryImpl*>(entry);
2246 entry_impl->entry()->Data()->state = 0xbad;
2247 entry_impl->entry()->Store();
2248 entry->Close();
2249 FlushQueueForTest();
2250 EXPECT_EQ(2, cache_->GetEntryCount());
2252 if (eviction) {
2253 TrimForTest(false);
2254 EXPECT_EQ(1, cache_->GetEntryCount());
2255 TrimForTest(false);
2256 EXPECT_EQ(1, cache_->GetEntryCount());
2257 } else {
2258 // We should detect the problem through the list, but we should not delete
2259 // the entry, just fail the iteration.
2260 scoped_ptr<TestIterator> iter = CreateIterator();
2261 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2263 // Now a full iteration will work, and return one entry.
2264 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2265 entry->Close();
2266 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2268 // This should detect what's left of the bad entry.
2269 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2270 EXPECT_EQ(2, cache_->GetEntryCount());
2272 DisableIntegrityCheck();
2275 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2276 BackendInvalidEntry9(false);
2279 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2280 SetNewEviction();
2281 BackendInvalidEntry9(false);
2284 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2285 BackendInvalidEntry9(true);
2288 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2289 SetNewEviction();
2290 BackendInvalidEntry9(true);
2293 // Tests handling of corrupt entries detected by enumerations.
2294 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2295 const int kSize = 0x3000; // 12 kB.
2296 SetMaxSize(kSize * 10);
2297 SetNewEviction();
2298 InitCache();
2300 std::string first("some key");
2301 std::string second("something else");
2302 disk_cache::Entry* entry;
2303 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2304 entry->Close();
2305 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2306 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2307 entry->Close();
2308 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2310 // Corrupt this entry.
2311 disk_cache::EntryImpl* entry_impl =
2312 static_cast<disk_cache::EntryImpl*>(entry);
2314 entry_impl->entry()->Data()->state = 0xbad;
2315 entry_impl->entry()->Store();
2316 entry->Close();
2317 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2318 entry->Close();
2319 EXPECT_EQ(3, cache_->GetEntryCount());
2321 // We have:
2322 // List 0: third -> second (bad).
2323 // List 1: first.
2325 if (eviction) {
2326 // Detection order: second -> first -> third.
2327 TrimForTest(false);
2328 EXPECT_EQ(3, cache_->GetEntryCount());
2329 TrimForTest(false);
2330 EXPECT_EQ(2, cache_->GetEntryCount());
2331 TrimForTest(false);
2332 EXPECT_EQ(1, cache_->GetEntryCount());
2333 } else {
2334 // Detection order: third -> second -> first.
2335 // We should detect the problem through the list, but we should not delete
2336 // the entry.
2337 scoped_ptr<TestIterator> iter = CreateIterator();
2338 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2339 entry->Close();
2340 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2341 EXPECT_EQ(first, entry->GetKey());
2342 entry->Close();
2343 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2345 DisableIntegrityCheck();
2348 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2349 BackendInvalidEntry10(false);
2352 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
2353 BackendInvalidEntry10(true);
2356 // Tests handling of corrupt entries detected by enumerations.
2357 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
2358 const int kSize = 0x3000; // 12 kB.
2359 SetMaxSize(kSize * 10);
2360 SetNewEviction();
2361 InitCache();
2363 std::string first("some key");
2364 std::string second("something else");
2365 disk_cache::Entry* entry;
2366 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2367 entry->Close();
2368 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2369 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2370 entry->Close();
2371 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2372 entry->Close();
2373 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
2374 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2376 // Corrupt this entry.
2377 disk_cache::EntryImpl* entry_impl =
2378 static_cast<disk_cache::EntryImpl*>(entry);
2380 entry_impl->entry()->Data()->state = 0xbad;
2381 entry_impl->entry()->Store();
2382 entry->Close();
2383 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2384 entry->Close();
2385 FlushQueueForTest();
2386 EXPECT_EQ(3, cache_->GetEntryCount());
2388 // We have:
2389 // List 0: third.
2390 // List 1: second (bad) -> first.
2392 if (eviction) {
2393 // Detection order: third -> first -> second.
2394 TrimForTest(false);
2395 EXPECT_EQ(2, cache_->GetEntryCount());
2396 TrimForTest(false);
2397 EXPECT_EQ(1, cache_->GetEntryCount());
2398 TrimForTest(false);
2399 EXPECT_EQ(1, cache_->GetEntryCount());
2400 } else {
2401 // Detection order: third -> second.
2402 // We should detect the problem through the list, but we should not delete
2403 // the entry, just fail the iteration.
2404 scoped_ptr<TestIterator> iter = CreateIterator();
2405 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2406 entry->Close();
2407 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2409 // Now a full iteration will work, and return two entries.
2410 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2411 entry->Close();
2412 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2413 entry->Close();
2414 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2416 DisableIntegrityCheck();
2419 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
2420 BackendInvalidEntry11(false);
2423 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
2424 BackendInvalidEntry11(true);
2427 // Tests handling of corrupt entries in the middle of a long eviction run.
2428 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2429 const int kSize = 0x3000; // 12 kB
2430 SetMaxSize(kSize * 10);
2431 InitCache();
2433 std::string first("some key");
2434 std::string second("something else");
2435 disk_cache::Entry* entry;
2436 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2437 entry->Close();
2438 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2440 // Corrupt this entry.
2441 disk_cache::EntryImpl* entry_impl =
2442 static_cast<disk_cache::EntryImpl*>(entry);
2444 entry_impl->entry()->Data()->state = 0xbad;
2445 entry_impl->entry()->Store();
2446 entry->Close();
2447 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2448 entry->Close();
2449 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
2450 TrimForTest(true);
2451 EXPECT_EQ(1, cache_->GetEntryCount());
2452 entry->Close();
2453 DisableIntegrityCheck();
2456 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
2457 BackendTrimInvalidEntry12();
2460 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
2461 SetNewEviction();
2462 BackendTrimInvalidEntry12();
2465 // We want to be able to deal with messed up entries on disk.
2466 void DiskCacheBackendTest::BackendInvalidRankings2() {
2467 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2468 DisableFirstCleanup();
2469 InitCache();
2471 disk_cache::Entry *entry1, *entry2;
2472 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
2473 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2));
2474 entry2->Close();
2476 // CheckCacheIntegrity will fail at this point.
2477 DisableIntegrityCheck();
2480 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
2481 BackendInvalidRankings2();
2484 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
2485 SetNewEviction();
2486 BackendInvalidRankings2();
2489 // If the LRU is corrupt, we delete the cache.
2490 void DiskCacheBackendTest::BackendInvalidRankings() {
2491 disk_cache::Entry* entry;
2492 scoped_ptr<TestIterator> iter = CreateIterator();
2493 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2494 entry->Close();
2495 EXPECT_EQ(2, cache_->GetEntryCount());
2497 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2498 FlushQueueForTest(); // Allow the restart to finish.
2499 EXPECT_EQ(0, cache_->GetEntryCount());
2502 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
2503 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2504 DisableFirstCleanup();
2505 InitCache();
2506 BackendInvalidRankings();
2509 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
2510 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2511 DisableFirstCleanup();
2512 SetNewEviction();
2513 InitCache();
2514 BackendInvalidRankings();
2517 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
2518 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2519 DisableFirstCleanup();
2520 InitCache();
2521 SetTestMode(); // Fail cache reinitialization.
2522 BackendInvalidRankings();
2525 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
2526 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2527 DisableFirstCleanup();
2528 SetNewEviction();
2529 InitCache();
2530 SetTestMode(); // Fail cache reinitialization.
2531 BackendInvalidRankings();
2534 // If the LRU is corrupt and we have open entries, we disable the cache.
2535 void DiskCacheBackendTest::BackendDisable() {
2536 disk_cache::Entry *entry1, *entry2;
2537 scoped_ptr<TestIterator> iter = CreateIterator();
2538 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2540 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2541 EXPECT_EQ(0, cache_->GetEntryCount());
2542 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2544 entry1->Close();
2545 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2546 FlushQueueForTest(); // This one actually allows that task to complete.
2548 EXPECT_EQ(0, cache_->GetEntryCount());
2551 TEST_F(DiskCacheBackendTest, DisableSuccess) {
2552 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2553 DisableFirstCleanup();
2554 InitCache();
2555 BackendDisable();
2558 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
2559 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2560 DisableFirstCleanup();
2561 SetNewEviction();
2562 InitCache();
2563 BackendDisable();
2566 TEST_F(DiskCacheBackendTest, DisableFailure) {
2567 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2568 DisableFirstCleanup();
2569 InitCache();
2570 SetTestMode(); // Fail cache reinitialization.
2571 BackendDisable();
2574 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
2575 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2576 DisableFirstCleanup();
2577 SetNewEviction();
2578 InitCache();
2579 SetTestMode(); // Fail cache reinitialization.
2580 BackendDisable();
2583 // This is another type of corruption on the LRU; disable the cache.
2584 void DiskCacheBackendTest::BackendDisable2() {
2585 EXPECT_EQ(8, cache_->GetEntryCount());
2587 disk_cache::Entry* entry;
2588 scoped_ptr<TestIterator> iter = CreateIterator();
2589 int count = 0;
2590 while (iter->OpenNextEntry(&entry) == net::OK) {
2591 ASSERT_TRUE(NULL != entry);
2592 entry->Close();
2593 count++;
2594 ASSERT_LT(count, 9);
2597 FlushQueueForTest();
2598 EXPECT_EQ(0, cache_->GetEntryCount());
2601 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
2602 ASSERT_TRUE(CopyTestCache("list_loop"));
2603 DisableFirstCleanup();
2604 InitCache();
2605 BackendDisable2();
2608 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
2609 ASSERT_TRUE(CopyTestCache("list_loop"));
2610 DisableFirstCleanup();
2611 SetNewEviction();
2612 InitCache();
2613 BackendDisable2();
2616 TEST_F(DiskCacheBackendTest, DisableFailure2) {
2617 ASSERT_TRUE(CopyTestCache("list_loop"));
2618 DisableFirstCleanup();
2619 InitCache();
2620 SetTestMode(); // Fail cache reinitialization.
2621 BackendDisable2();
2624 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
2625 ASSERT_TRUE(CopyTestCache("list_loop"));
2626 DisableFirstCleanup();
2627 SetNewEviction();
2628 InitCache();
2629 SetTestMode(); // Fail cache reinitialization.
2630 BackendDisable2();
2633 // If the index size changes when we disable the cache, we should not crash.
2634 void DiskCacheBackendTest::BackendDisable3() {
2635 disk_cache::Entry *entry1, *entry2;
2636 scoped_ptr<TestIterator> iter = CreateIterator();
2637 EXPECT_EQ(2, cache_->GetEntryCount());
2638 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2639 entry1->Close();
2641 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2642 FlushQueueForTest();
2644 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2));
2645 entry2->Close();
2647 EXPECT_EQ(1, cache_->GetEntryCount());
2650 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
2651 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2652 DisableFirstCleanup();
2653 SetMaxSize(20 * 1024 * 1024);
2654 InitCache();
2655 BackendDisable3();
2658 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
2659 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2660 DisableFirstCleanup();
2661 SetMaxSize(20 * 1024 * 1024);
2662 SetNewEviction();
2663 InitCache();
2664 BackendDisable3();
2667 // If we disable the cache, already open entries should work as far as possible.
2668 void DiskCacheBackendTest::BackendDisable4() {
2669 disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
2670 scoped_ptr<TestIterator> iter = CreateIterator();
2671 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2673 char key2[2000];
2674 char key3[20000];
2675 CacheTestFillBuffer(key2, sizeof(key2), true);
2676 CacheTestFillBuffer(key3, sizeof(key3), true);
2677 key2[sizeof(key2) - 1] = '\0';
2678 key3[sizeof(key3) - 1] = '\0';
2679 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2680 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3));
2682 const int kBufSize = 20000;
2683 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
2684 memset(buf->data(), 0, kBufSize);
2685 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2686 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2688 // This line should disable the cache but not delete it.
2689 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry4));
2690 EXPECT_EQ(0, cache_->GetEntryCount());
2692 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
2694 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
2695 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2696 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
2698 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
2699 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2700 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
2702 std::string key = entry2->GetKey();
2703 EXPECT_EQ(sizeof(key2) - 1, key.size());
2704 key = entry3->GetKey();
2705 EXPECT_EQ(sizeof(key3) - 1, key.size());
2707 entry1->Close();
2708 entry2->Close();
2709 entry3->Close();
2710 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2711 FlushQueueForTest(); // This one actually allows that task to complete.
2713 EXPECT_EQ(0, cache_->GetEntryCount());
2716 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
2717 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2718 DisableFirstCleanup();
2719 InitCache();
2720 BackendDisable4();
2723 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
2724 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2725 DisableFirstCleanup();
2726 SetNewEviction();
2727 InitCache();
2728 BackendDisable4();
2731 // Tests the exposed API with a disabled cache.
2732 void DiskCacheBackendTest::BackendDisabledAPI() {
2733 cache_impl_->SetUnitTestMode(); // Simulate failure restarting the cache.
2735 disk_cache::Entry* entry1, *entry2;
2736 scoped_ptr<TestIterator> iter = CreateIterator();
2737 EXPECT_EQ(2, cache_->GetEntryCount());
2738 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2739 entry1->Close();
2740 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2741 FlushQueueForTest();
2742 // The cache should be disabled.
2744 EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
2745 EXPECT_EQ(0, cache_->GetEntryCount());
2746 EXPECT_NE(net::OK, OpenEntry("First", &entry2));
2747 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2748 EXPECT_NE(net::OK, DoomEntry("First"));
2749 EXPECT_NE(net::OK, DoomAllEntries());
2750 EXPECT_NE(net::OK, DoomEntriesBetween(Time(), Time::Now()));
2751 EXPECT_NE(net::OK, DoomEntriesSince(Time()));
2752 iter = CreateIterator();
2753 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2755 base::StringPairs stats;
2756 cache_->GetStats(&stats);
2757 EXPECT_TRUE(stats.empty());
2758 cache_->OnExternalCacheHit("First");
2761 TEST_F(DiskCacheBackendTest, DisabledAPI) {
2762 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2763 DisableFirstCleanup();
2764 InitCache();
2765 BackendDisabledAPI();
2768 TEST_F(DiskCacheBackendTest, NewEvictionDisabledAPI) {
2769 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2770 DisableFirstCleanup();
2771 SetNewEviction();
2772 InitCache();
2773 BackendDisabledAPI();
2776 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
2777 MessageLoopHelper helper;
2779 ASSERT_TRUE(CleanupCacheDir());
2780 scoped_ptr<disk_cache::BackendImpl> cache;
2781 cache.reset(new disk_cache::BackendImpl(
2782 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
2783 ASSERT_TRUE(NULL != cache.get());
2784 cache->SetUnitTestMode();
2785 ASSERT_EQ(net::OK, cache->SyncInit());
2787 // Wait for a callback that never comes... about 2 secs :). The message loop
2788 // has to run to allow invocation of the usage timer.
2789 helper.WaitUntilCacheIoFinished(1);
2792 TEST_F(DiskCacheBackendTest, TimerNotCreated) {
2793 ASSERT_TRUE(CopyTestCache("wrong_version"));
2795 scoped_ptr<disk_cache::BackendImpl> cache;
2796 cache.reset(new disk_cache::BackendImpl(
2797 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
2798 ASSERT_TRUE(NULL != cache.get());
2799 cache->SetUnitTestMode();
2800 ASSERT_NE(net::OK, cache->SyncInit());
2802 ASSERT_TRUE(NULL == cache->GetTimerForTest());
2804 DisableIntegrityCheck();
2807 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
2808 InitCache();
2809 disk_cache::Entry* entry;
2810 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
2811 entry->Close();
2812 FlushQueueForTest();
2814 disk_cache::StatsItems stats;
2815 cache_->GetStats(&stats);
2816 EXPECT_FALSE(stats.empty());
2818 disk_cache::StatsItems::value_type hits("Create hit", "0x1");
2819 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2821 cache_.reset();
2823 // Now open the cache and verify that the stats are still there.
2824 DisableFirstCleanup();
2825 InitCache();
2826 EXPECT_EQ(1, cache_->GetEntryCount());
2828 stats.clear();
2829 cache_->GetStats(&stats);
2830 EXPECT_FALSE(stats.empty());
2832 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2835 void DiskCacheBackendTest::BackendDoomAll() {
2836 InitCache();
2838 disk_cache::Entry *entry1, *entry2;
2839 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
2840 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
2841 entry1->Close();
2842 entry2->Close();
2844 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2845 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2847 ASSERT_EQ(4, cache_->GetEntryCount());
2848 EXPECT_EQ(net::OK, DoomAllEntries());
2849 ASSERT_EQ(0, cache_->GetEntryCount());
2851 // We should stop posting tasks at some point (if we post any).
2852 base::MessageLoop::current()->RunUntilIdle();
2854 disk_cache::Entry *entry3, *entry4;
2855 EXPECT_NE(net::OK, OpenEntry("third", &entry3));
2856 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
2857 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
2859 EXPECT_EQ(net::OK, DoomAllEntries());
2860 ASSERT_EQ(0, cache_->GetEntryCount());
2862 entry1->Close();
2863 entry2->Close();
2864 entry3->Doom(); // The entry should be already doomed, but this must work.
2865 entry3->Close();
2866 entry4->Close();
2868 // Now try with all references released.
2869 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2870 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2871 entry1->Close();
2872 entry2->Close();
2874 ASSERT_EQ(2, cache_->GetEntryCount());
2875 EXPECT_EQ(net::OK, DoomAllEntries());
2876 ASSERT_EQ(0, cache_->GetEntryCount());
2878 EXPECT_EQ(net::OK, DoomAllEntries());
2881 TEST_F(DiskCacheBackendTest, DoomAll) {
2882 BackendDoomAll();
2885 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
2886 SetNewEviction();
2887 BackendDoomAll();
2890 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
2891 SetMemoryOnlyMode();
2892 BackendDoomAll();
2895 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
2896 SetCacheType(net::APP_CACHE);
2897 BackendDoomAll();
2900 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
2901 SetCacheType(net::SHADER_CACHE);
2902 BackendDoomAll();
2905 // If the index size changes when we doom the cache, we should not crash.
2906 void DiskCacheBackendTest::BackendDoomAll2() {
2907 EXPECT_EQ(2, cache_->GetEntryCount());
2908 EXPECT_EQ(net::OK, DoomAllEntries());
2910 disk_cache::Entry* entry;
2911 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry));
2912 entry->Close();
2914 EXPECT_EQ(1, cache_->GetEntryCount());
2917 TEST_F(DiskCacheBackendTest, DoomAll2) {
2918 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2919 DisableFirstCleanup();
2920 SetMaxSize(20 * 1024 * 1024);
2921 InitCache();
2922 BackendDoomAll2();
2925 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
2926 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2927 DisableFirstCleanup();
2928 SetMaxSize(20 * 1024 * 1024);
2929 SetNewEviction();
2930 InitCache();
2931 BackendDoomAll2();
2934 // We should be able to create the same entry on multiple simultaneous instances
2935 // of the cache.
2936 TEST_F(DiskCacheTest, MultipleInstances) {
2937 base::ScopedTempDir store1, store2;
2938 ASSERT_TRUE(store1.CreateUniqueTempDir());
2939 ASSERT_TRUE(store2.CreateUniqueTempDir());
2941 base::Thread cache_thread("CacheThread");
2942 ASSERT_TRUE(cache_thread.StartWithOptions(
2943 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
2944 net::TestCompletionCallback cb;
2946 const int kNumberOfCaches = 2;
2947 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
2949 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
2950 net::CACHE_BACKEND_DEFAULT,
2951 store1.path(),
2953 false,
2954 cache_thread.task_runner(),
2955 NULL,
2956 &cache[0],
2957 cb.callback());
2958 ASSERT_EQ(net::OK, cb.GetResult(rv));
2959 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE,
2960 net::CACHE_BACKEND_DEFAULT,
2961 store2.path(),
2963 false,
2964 cache_thread.task_runner(),
2965 NULL,
2966 &cache[1],
2967 cb.callback());
2968 ASSERT_EQ(net::OK, cb.GetResult(rv));
2970 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
2972 std::string key("the first key");
2973 disk_cache::Entry* entry;
2974 for (int i = 0; i < kNumberOfCaches; i++) {
2975 rv = cache[i]->CreateEntry(key, &entry, cb.callback());
2976 ASSERT_EQ(net::OK, cb.GetResult(rv));
2977 entry->Close();
2981 // Test the six regions of the curve that determines the max cache size.
2982 TEST_F(DiskCacheTest, AutomaticMaxSize) {
2983 using disk_cache::kDefaultCacheSize;
2984 int64 large_size = kDefaultCacheSize;
2986 // Region 1: expected = available * 0.8
2987 EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
2988 disk_cache::PreferredCacheSize(large_size - 1));
2989 EXPECT_EQ(kDefaultCacheSize * 8 / 10,
2990 disk_cache::PreferredCacheSize(large_size));
2991 EXPECT_EQ(kDefaultCacheSize - 1,
2992 disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
2994 // Region 2: expected = default_size
2995 EXPECT_EQ(kDefaultCacheSize,
2996 disk_cache::PreferredCacheSize(large_size * 10 / 8));
2997 EXPECT_EQ(kDefaultCacheSize,
2998 disk_cache::PreferredCacheSize(large_size * 10 - 1));
3000 // Region 3: expected = available * 0.1
3001 EXPECT_EQ(kDefaultCacheSize,
3002 disk_cache::PreferredCacheSize(large_size * 10));
3003 EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
3004 disk_cache::PreferredCacheSize(large_size * 25 - 1));
3006 // Region 4: expected = default_size * 2.5
3007 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3008 disk_cache::PreferredCacheSize(large_size * 25));
3009 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3010 disk_cache::PreferredCacheSize(large_size * 100 - 1));
3011 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3012 disk_cache::PreferredCacheSize(large_size * 100));
3013 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3014 disk_cache::PreferredCacheSize(large_size * 250 - 1));
3016 // Region 5: expected = available * 0.1
3017 int64 largest_size = kDefaultCacheSize * 4;
3018 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3019 disk_cache::PreferredCacheSize(large_size * 250));
3020 EXPECT_EQ(largest_size - 1,
3021 disk_cache::PreferredCacheSize(largest_size * 100 - 1));
3023 // Region 6: expected = largest possible size
3024 EXPECT_EQ(largest_size,
3025 disk_cache::PreferredCacheSize(largest_size * 100));
3026 EXPECT_EQ(largest_size,
3027 disk_cache::PreferredCacheSize(largest_size * 10000));
3030 // Tests that we can "migrate" a running instance from one experiment group to
3031 // another.
3032 TEST_F(DiskCacheBackendTest, Histograms) {
3033 InitCache();
3034 disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro.
3036 for (int i = 1; i < 3; i++) {
3037 CACHE_UMA(HOURS, "FillupTime", i, 28);
3041 // Make sure that we keep the total memory used by the internal buffers under
3042 // control.
3043 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
3044 InitCache();
3045 std::string key("the first key");
3046 disk_cache::Entry* entry;
3047 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3049 const int kSize = 200;
3050 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3051 CacheTestFillBuffer(buffer->data(), kSize, true);
3053 for (int i = 0; i < 10; i++) {
3054 SCOPED_TRACE(i);
3055 // Allocate 2MB for this entry.
3056 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
3057 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
3058 EXPECT_EQ(kSize,
3059 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
3060 EXPECT_EQ(kSize,
3061 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
3063 // Delete one of the buffers and truncate the other.
3064 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
3065 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
3067 // Delete the second buffer, writing 10 bytes to disk.
3068 entry->Close();
3069 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3072 entry->Close();
3073 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
3076 // This test assumes at least 150MB of system memory.
3077 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
3078 InitCache();
3080 const int kOneMB = 1024 * 1024;
3081 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3082 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3084 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3085 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3087 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3088 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3090 cache_impl_->BufferDeleted(kOneMB);
3091 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3093 // Check the upper limit.
3094 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3096 for (int i = 0; i < 30; i++)
3097 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
3099 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3102 // Tests that sharing of external files works and we are able to delete the
3103 // files when we need to.
3104 TEST_F(DiskCacheBackendTest, FileSharing) {
3105 InitCache();
3107 disk_cache::Addr address(0x80000001);
3108 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3109 base::FilePath name = cache_impl_->GetFileName(address);
3111 scoped_refptr<disk_cache::File> file(new disk_cache::File(false));
3112 file->Init(name);
3114 #if defined(OS_WIN)
3115 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3116 DWORD access = GENERIC_READ | GENERIC_WRITE;
3117 base::win::ScopedHandle file2(CreateFile(
3118 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
3119 EXPECT_FALSE(file2.IsValid());
3121 sharing |= FILE_SHARE_DELETE;
3122 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL,
3123 OPEN_EXISTING, 0, NULL));
3124 EXPECT_TRUE(file2.IsValid());
3125 #endif
3127 EXPECT_TRUE(base::DeleteFile(name, false));
3129 // We should be able to use the file.
3130 const int kSize = 200;
3131 char buffer1[kSize];
3132 char buffer2[kSize];
3133 memset(buffer1, 't', kSize);
3134 memset(buffer2, 0, kSize);
3135 EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3136 EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3137 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3139 EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
3142 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3143 InitCache();
3145 disk_cache::Entry* entry;
3147 for (int i = 0; i < 2; ++i) {
3148 std::string key = base::StringPrintf("key%d", i);
3149 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3150 entry->Close();
3153 // Ping the oldest entry.
3154 cache_->OnExternalCacheHit("key0");
3156 TrimForTest(false);
3158 // Make sure the older key remains.
3159 EXPECT_EQ(1, cache_->GetEntryCount());
3160 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3161 entry->Close();
3164 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3165 SetCacheType(net::SHADER_CACHE);
3166 InitCache();
3168 disk_cache::Entry* entry;
3170 for (int i = 0; i < 2; ++i) {
3171 std::string key = base::StringPrintf("key%d", i);
3172 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3173 entry->Close();
3176 // Ping the oldest entry.
3177 cache_->OnExternalCacheHit("key0");
3179 TrimForTest(false);
3181 // Make sure the older key remains.
3182 EXPECT_EQ(1, cache_->GetEntryCount());
3183 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3184 entry->Close();
3187 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3188 SetCacheType(net::APP_CACHE);
3189 SetSimpleCacheMode();
3190 BackendShutdownWithPendingCreate(false);
3193 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3194 SetCacheType(net::APP_CACHE);
3195 SetSimpleCacheMode();
3196 BackendShutdownWithPendingFileIO(false);
3199 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3200 SetSimpleCacheMode();
3201 BackendBasics();
3204 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3205 SetCacheType(net::APP_CACHE);
3206 SetSimpleCacheMode();
3207 BackendBasics();
3210 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3211 SetSimpleCacheMode();
3212 BackendKeying();
3215 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3216 SetSimpleCacheMode();
3217 SetCacheType(net::APP_CACHE);
3218 BackendKeying();
3221 // MacOS has a default open file limit of 256 files, which is incompatible with
3222 // this simple cache test.
3223 #if defined(OS_MACOSX)
3224 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3225 #else
3226 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3227 #endif
3229 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) {
3230 SetMaxSize(0x100000);
3231 SetSimpleCacheMode();
3232 BackendLoad();
3235 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) {
3236 SetCacheType(net::APP_CACHE);
3237 SetSimpleCacheMode();
3238 SetMaxSize(0x100000);
3239 BackendLoad();
3242 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
3243 SetSimpleCacheMode();
3244 BackendDoomRecent();
3247 // crbug.com/330926, crbug.com/370677
3248 TEST_F(DiskCacheBackendTest, DISABLED_SimpleDoomBetween) {
3249 SetSimpleCacheMode();
3250 BackendDoomBetween();
3253 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
3254 SetSimpleCacheMode();
3255 BackendDoomAll();
3258 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
3259 SetCacheType(net::APP_CACHE);
3260 SetSimpleCacheMode();
3261 BackendDoomAll();
3264 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
3265 SetSimpleCacheMode();
3266 InitCache();
3268 const char key[] = "the first key";
3269 disk_cache::Entry* entry = NULL;
3271 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3272 ASSERT_TRUE(entry != NULL);
3273 entry->Close();
3274 entry = NULL;
3276 // To make sure the file creation completed we need to call open again so that
3277 // we block until it actually created the files.
3278 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3279 ASSERT_TRUE(entry != NULL);
3280 entry->Close();
3281 entry = NULL;
3283 // Delete one of the files in the entry.
3284 base::FilePath to_delete_file = cache_path_.AppendASCII(
3285 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3286 EXPECT_TRUE(base::PathExists(to_delete_file));
3287 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
3289 // Failing to open the entry should delete the rest of these files.
3290 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3292 // Confirm the rest of the files are gone.
3293 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
3294 base::FilePath should_be_gone_file(cache_path_.AppendASCII(
3295 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
3296 EXPECT_FALSE(base::PathExists(should_be_gone_file));
3300 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
3301 SetSimpleCacheMode();
3302 InitCache();
3304 const char key[] = "the first key";
3305 disk_cache::Entry* entry = NULL;
3307 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3308 disk_cache::Entry* null = NULL;
3309 ASSERT_NE(null, entry);
3310 entry->Close();
3311 entry = NULL;
3313 // To make sure the file creation completed we need to call open again so that
3314 // we block until it actually created the files.
3315 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3316 ASSERT_NE(null, entry);
3317 entry->Close();
3318 entry = NULL;
3320 // The entry is being closed on the Simple Cache worker pool
3321 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
3322 base::RunLoop().RunUntilIdle();
3324 // Write an invalid header for stream 0 and stream 1.
3325 base::FilePath entry_file1_path = cache_path_.AppendASCII(
3326 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3328 disk_cache::SimpleFileHeader header;
3329 header.initial_magic_number = UINT64_C(0xbadf00d);
3330 EXPECT_EQ(
3331 implicit_cast<int>(sizeof(header)),
3332 base::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
3333 sizeof(header)));
3334 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3337 // Tests that the Simple Cache Backend fails to initialize with non-matching
3338 // file structure on disk.
3339 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
3340 // Create a cache structure with the |BackendImpl|.
3341 InitCache();
3342 disk_cache::Entry* entry;
3343 const int kSize = 50;
3344 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3345 CacheTestFillBuffer(buffer->data(), kSize, false);
3346 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3347 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3348 entry->Close();
3349 cache_.reset();
3351 // Check that the |SimpleBackendImpl| does not favor this structure.
3352 base::Thread cache_thread("CacheThread");
3353 ASSERT_TRUE(cache_thread.StartWithOptions(
3354 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3355 disk_cache::SimpleBackendImpl* simple_cache =
3356 new disk_cache::SimpleBackendImpl(
3357 cache_path_, 0, net::DISK_CACHE, cache_thread.task_runner(), NULL);
3358 net::TestCompletionCallback cb;
3359 int rv = simple_cache->Init(cb.callback());
3360 EXPECT_NE(net::OK, cb.GetResult(rv));
3361 delete simple_cache;
3362 DisableIntegrityCheck();
3365 // Tests that the |BackendImpl| refuses to initialize on top of the files
3366 // generated by the Simple Cache Backend.
3367 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
3368 // Create a cache structure with the |SimpleBackendImpl|.
3369 SetSimpleCacheMode();
3370 InitCache();
3371 disk_cache::Entry* entry;
3372 const int kSize = 50;
3373 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3374 CacheTestFillBuffer(buffer->data(), kSize, false);
3375 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3376 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3377 entry->Close();
3378 cache_.reset();
3380 // Check that the |BackendImpl| does not favor this structure.
3381 base::Thread cache_thread("CacheThread");
3382 ASSERT_TRUE(cache_thread.StartWithOptions(
3383 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3384 disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(
3385 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL);
3386 cache->SetUnitTestMode();
3387 net::TestCompletionCallback cb;
3388 int rv = cache->Init(cb.callback());
3389 EXPECT_NE(net::OK, cb.GetResult(rv));
3390 delete cache;
3391 DisableIntegrityCheck();
3394 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
3395 SetSimpleCacheMode();
3396 BackendFixEnumerators();
3399 // Tests basic functionality of the SimpleBackend implementation of the
3400 // enumeration API.
3401 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
3402 SetSimpleCacheMode();
3403 InitCache();
3404 std::set<std::string> key_pool;
3405 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3407 // Check that enumeration returns all entries.
3408 std::set<std::string> keys_to_match(key_pool);
3409 scoped_ptr<TestIterator> iter = CreateIterator();
3410 size_t count = 0;
3411 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3412 iter.reset();
3413 EXPECT_EQ(key_pool.size(), count);
3414 EXPECT_TRUE(keys_to_match.empty());
3416 // Check that opening entries does not affect enumeration.
3417 keys_to_match = key_pool;
3418 iter = CreateIterator();
3419 count = 0;
3420 disk_cache::Entry* entry_opened_before;
3421 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
3422 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3423 iter.get(),
3424 &keys_to_match,
3425 &count));
3427 disk_cache::Entry* entry_opened_middle;
3428 ASSERT_EQ(net::OK,
3429 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
3430 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3431 iter.reset();
3432 entry_opened_before->Close();
3433 entry_opened_middle->Close();
3435 EXPECT_EQ(key_pool.size(), count);
3436 EXPECT_TRUE(keys_to_match.empty());
3439 // Tests that the enumerations are not affected by dooming an entry in the
3440 // middle.
3441 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
3442 SetSimpleCacheMode();
3443 InitCache();
3444 std::set<std::string> key_pool;
3445 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3447 // Check that enumeration returns all entries but the doomed one.
3448 std::set<std::string> keys_to_match(key_pool);
3449 scoped_ptr<TestIterator> iter = CreateIterator();
3450 size_t count = 0;
3451 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3452 iter.get(),
3453 &keys_to_match,
3454 &count));
3456 std::string key_to_delete = *(keys_to_match.begin());
3457 DoomEntry(key_to_delete);
3458 keys_to_match.erase(key_to_delete);
3459 key_pool.erase(key_to_delete);
3460 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3461 iter.reset();
3463 EXPECT_EQ(key_pool.size(), count);
3464 EXPECT_TRUE(keys_to_match.empty());
3467 // Tests that enumerations are not affected by corrupt files.
3468 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
3469 SetSimpleCacheMode();
3470 InitCache();
3471 std::set<std::string> key_pool;
3472 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3474 // Create a corrupt entry. The write/read sequence ensures that the entry will
3475 // have been created before corrupting the platform files, in the case of
3476 // optimistic operations.
3477 const std::string key = "the key";
3478 disk_cache::Entry* corrupted_entry;
3480 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
3481 ASSERT_TRUE(corrupted_entry);
3482 const int kSize = 50;
3483 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3484 CacheTestFillBuffer(buffer->data(), kSize, false);
3485 ASSERT_EQ(kSize,
3486 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
3487 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
3488 corrupted_entry->Close();
3490 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3491 key, cache_path_));
3492 EXPECT_EQ(key_pool.size() + 1,
3493 implicit_cast<size_t>(cache_->GetEntryCount()));
3495 // Check that enumeration returns all entries but the corrupt one.
3496 std::set<std::string> keys_to_match(key_pool);
3497 scoped_ptr<TestIterator> iter = CreateIterator();
3498 size_t count = 0;
3499 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3500 iter.reset();
3502 EXPECT_EQ(key_pool.size(), count);
3503 EXPECT_TRUE(keys_to_match.empty());
3506 // Tests that enumerations don't leak memory when the backend is destructed
3507 // mid-enumeration.
3508 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) {
3509 SetSimpleCacheMode();
3510 InitCache();
3511 std::set<std::string> key_pool;
3512 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3514 scoped_ptr<TestIterator> iter = CreateIterator();
3515 disk_cache::Entry* entry = NULL;
3516 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
3517 EXPECT_TRUE(entry);
3518 disk_cache::ScopedEntryPtr entry_closer(entry);
3520 cache_.reset();
3521 // This test passes if we don't leak memory.
3524 // Tests that a SimpleCache doesn't crash when files are deleted very quickly
3525 // after closing.
3526 // NOTE: IF THIS TEST IS FLAKY THEN IT IS FAILING. See https://crbug.com/416940
3527 TEST_F(DiskCacheBackendTest, SimpleCacheDeleteQuickly) {
3528 SetSimpleCacheMode();
3529 for (int i = 0; i < 100; ++i) {
3530 InitCache();
3531 cache_.reset();
3532 EXPECT_TRUE(CleanupCacheDir());