Bluetooth: add Device events, and cleanup JS API
[chromium-blink-merge.git] / net / disk_cache / backend_unittest.cc
blob68da572debb3697dda9f301952106f4617175671
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/file_util.h"
7 #include "base/metrics/field_trial.h"
8 #include "base/port.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
12 #include "base/threading/platform_thread.h"
13 #include "base/threading/thread_restrictions.h"
14 #include "net/base/cache_type.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/net_errors.h"
17 #include "net/base/test_completion_callback.h"
18 #include "net/disk_cache/blockfile/backend_impl.h"
19 #include "net/disk_cache/blockfile/entry_impl.h"
20 #include "net/disk_cache/blockfile/experiments.h"
21 #include "net/disk_cache/blockfile/histogram_macros.h"
22 #include "net/disk_cache/blockfile/mapped_file.h"
23 #include "net/disk_cache/cache_util.h"
24 #include "net/disk_cache/disk_cache_test_base.h"
25 #include "net/disk_cache/disk_cache_test_util.h"
26 #include "net/disk_cache/memory/mem_backend_impl.h"
27 #include "net/disk_cache/simple/simple_backend_impl.h"
28 #include "net/disk_cache/simple/simple_entry_format.h"
29 #include "net/disk_cache/simple/simple_test_util.h"
30 #include "net/disk_cache/simple/simple_util.h"
31 #include "net/disk_cache/tracing/tracing_cache_backend.h"
32 #include "testing/gtest/include/gtest/gtest.h"
34 #if defined(OS_WIN)
35 #include "base/win/scoped_handle.h"
36 #endif
38 // Provide a BackendImpl object to macros from histogram_macros.h.
39 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
41 using base::Time;
43 namespace {
45 const char kExistingEntryKey[] = "existing entry key";
47 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
48 const base::Thread& cache_thread,
49 base::FilePath& cache_path) {
50 net::TestCompletionCallback cb;
52 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
53 cache_path, cache_thread.message_loop_proxy(), NULL));
54 int rv = cache->Init(cb.callback());
55 if (cb.GetResult(rv) != net::OK)
56 return scoped_ptr<disk_cache::BackendImpl>();
58 disk_cache::Entry* entry = NULL;
59 rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
60 if (cb.GetResult(rv) != net::OK)
61 return scoped_ptr<disk_cache::BackendImpl>();
62 entry->Close();
64 return cache.Pass();
67 } // namespace
69 // Tests that can run with different types of caches.
70 class DiskCacheBackendTest : public DiskCacheTestWithCache {
71 protected:
72 // Some utility methods:
74 // Perform IO operations on the cache until there is pending IO.
75 int GeneratePendingIO(net::TestCompletionCallback* cb);
77 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
78 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
79 // There are 4 entries after doomed_start and 2 after doomed_end.
80 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
82 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
83 bool EnumerateAndMatchKeys(int max_to_open,
84 void** iter,
85 std::set<std::string>* keys_to_match,
86 size_t* count);
88 // Actual tests:
89 void BackendBasics();
90 void BackendKeying();
91 void BackendShutdownWithPendingFileIO(bool fast);
92 void BackendShutdownWithPendingIO(bool fast);
93 void BackendShutdownWithPendingCreate(bool fast);
94 void BackendSetSize();
95 void BackendLoad();
96 void BackendChain();
97 void BackendValidEntry();
98 void BackendInvalidEntry();
99 void BackendInvalidEntryRead();
100 void BackendInvalidEntryWithLoad();
101 void BackendTrimInvalidEntry();
102 void BackendTrimInvalidEntry2();
103 void BackendEnumerations();
104 void BackendEnumerations2();
105 void BackendInvalidEntryEnumeration();
106 void BackendFixEnumerators();
107 void BackendDoomRecent();
108 void BackendDoomBetween();
109 void BackendTransaction(const std::string& name, int num_entries, bool load);
110 void BackendRecoverInsert();
111 void BackendRecoverRemove();
112 void BackendRecoverWithEviction();
113 void BackendInvalidEntry2();
114 void BackendInvalidEntry3();
115 void BackendInvalidEntry7();
116 void BackendInvalidEntry8();
117 void BackendInvalidEntry9(bool eviction);
118 void BackendInvalidEntry10(bool eviction);
119 void BackendInvalidEntry11(bool eviction);
120 void BackendTrimInvalidEntry12();
121 void BackendDoomAll();
122 void BackendDoomAll2();
123 void BackendInvalidRankings();
124 void BackendInvalidRankings2();
125 void BackendDisable();
126 void BackendDisable2();
127 void BackendDisable3();
128 void BackendDisable4();
129 void TracingBackendBasics();
132 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
133 if (!use_current_thread_) {
134 ADD_FAILURE();
135 return net::ERR_FAILED;
138 disk_cache::Entry* entry;
139 int rv = cache_->CreateEntry("some key", &entry, cb->callback());
140 if (cb->GetResult(rv) != net::OK)
141 return net::ERR_CACHE_CREATE_FAILURE;
143 const int kSize = 25000;
144 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
145 CacheTestFillBuffer(buffer->data(), kSize, false);
147 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
148 // We are using the current thread as the cache thread because we want to
149 // be able to call directly this method to make sure that the OS (instead
150 // of us switching thread) is returning IO pending.
151 if (!simple_cache_mode_) {
152 rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
153 0, i, buffer.get(), kSize, cb->callback(), false);
154 } else {
155 rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
158 if (rv == net::ERR_IO_PENDING)
159 break;
160 if (rv != kSize)
161 rv = net::ERR_FAILED;
164 // Don't call Close() to avoid going through the queue or we'll deadlock
165 // waiting for the operation to finish.
166 if (!simple_cache_mode_)
167 static_cast<disk_cache::EntryImpl*>(entry)->Release();
168 else
169 entry->Close();
171 return rv;
174 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
175 base::Time* doomed_end) {
176 InitCache();
178 const int kSize = 50;
179 // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
180 const int kOffset = 10 + 1024 * 1024;
182 disk_cache::Entry* entry0 = NULL;
183 disk_cache::Entry* entry1 = NULL;
184 disk_cache::Entry* entry2 = NULL;
186 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
187 CacheTestFillBuffer(buffer->data(), kSize, false);
189 ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
190 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
191 ASSERT_EQ(kSize,
192 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
193 entry0->Close();
195 FlushQueueForTest();
196 AddDelay();
197 if (doomed_start)
198 *doomed_start = base::Time::Now();
200 // Order in rankings list:
201 // first_part1, first_part2, second_part1, second_part2
202 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
203 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
204 ASSERT_EQ(kSize,
205 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
206 entry1->Close();
208 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
209 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
210 ASSERT_EQ(kSize,
211 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
212 entry2->Close();
214 FlushQueueForTest();
215 AddDelay();
216 if (doomed_end)
217 *doomed_end = base::Time::Now();
219 // Order in rankings list:
220 // third_part1, fourth_part1, third_part2, fourth_part2
221 disk_cache::Entry* entry3 = NULL;
222 disk_cache::Entry* entry4 = NULL;
223 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
224 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
225 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
226 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
227 ASSERT_EQ(kSize,
228 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
229 ASSERT_EQ(kSize,
230 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
231 entry3->Close();
232 entry4->Close();
234 FlushQueueForTest();
235 AddDelay();
238 // Creates entries based on random keys. Stores these keys in |key_pool|.
239 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
240 std::set<std::string>* key_pool) {
241 const int kNumEntries = 10;
243 for (int i = 0; i < kNumEntries; ++i) {
244 std::string key = GenerateKey(true);
245 disk_cache::Entry* entry;
246 if (CreateEntry(key, &entry) != net::OK)
247 return false;
248 key_pool->insert(key);
249 entry->Close();
251 return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
254 // Performs iteration over the backend and checks that the keys of entries
255 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
256 // will be opened, if it is positive. Otherwise, iteration will continue until
257 // OpenNextEntry stops returning net::OK.
258 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
259 int max_to_open,
260 void** iter,
261 std::set<std::string>* keys_to_match,
262 size_t* count) {
263 disk_cache::Entry* entry;
265 while (OpenNextEntry(iter, &entry) == net::OK) {
266 if (!entry)
267 return false;
268 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
269 entry->Close();
270 ++(*count);
271 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
272 break;
275 return true;
278 void DiskCacheBackendTest::BackendBasics() {
279 InitCache();
280 disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
281 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
282 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
283 ASSERT_TRUE(NULL != entry1);
284 entry1->Close();
285 entry1 = NULL;
287 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
288 ASSERT_TRUE(NULL != entry1);
289 entry1->Close();
290 entry1 = NULL;
292 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
293 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
294 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
295 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
296 ASSERT_TRUE(NULL != entry1);
297 ASSERT_TRUE(NULL != entry2);
298 EXPECT_EQ(2, cache_->GetEntryCount());
300 disk_cache::Entry* entry3 = NULL;
301 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3));
302 ASSERT_TRUE(NULL != entry3);
303 EXPECT_TRUE(entry2 == entry3);
304 EXPECT_EQ(2, cache_->GetEntryCount());
306 EXPECT_EQ(net::OK, DoomEntry("some other key"));
307 EXPECT_EQ(1, cache_->GetEntryCount());
308 entry1->Close();
309 entry2->Close();
310 entry3->Close();
312 EXPECT_EQ(net::OK, DoomEntry("the first key"));
313 EXPECT_EQ(0, cache_->GetEntryCount());
315 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
316 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
317 entry1->Doom();
318 entry1->Close();
319 EXPECT_EQ(net::OK, DoomEntry("some other key"));
320 EXPECT_EQ(0, cache_->GetEntryCount());
321 entry2->Close();
324 TEST_F(DiskCacheBackendTest, Basics) {
325 BackendBasics();
328 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
329 SetNewEviction();
330 BackendBasics();
333 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
334 SetMemoryOnlyMode();
335 BackendBasics();
338 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
339 SetCacheType(net::APP_CACHE);
340 BackendBasics();
343 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
344 SetCacheType(net::SHADER_CACHE);
345 BackendBasics();
348 void DiskCacheBackendTest::BackendKeying() {
349 InitCache();
350 const char* kName1 = "the first key";
351 const char* kName2 = "the first Key";
352 disk_cache::Entry *entry1, *entry2;
353 ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1));
355 ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2));
356 EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
357 entry2->Close();
359 char buffer[30];
360 base::strlcpy(buffer, kName1, arraysize(buffer));
361 ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2));
362 EXPECT_TRUE(entry1 == entry2);
363 entry2->Close();
365 base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
366 ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2));
367 EXPECT_TRUE(entry1 == entry2);
368 entry2->Close();
370 base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3);
371 ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2));
372 EXPECT_TRUE(entry1 == entry2);
373 entry2->Close();
375 // Now verify long keys.
376 char buffer2[20000];
377 memset(buffer2, 's', sizeof(buffer2));
378 buffer2[1023] = '\0';
379 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
380 entry2->Close();
382 buffer2[1023] = 'g';
383 buffer2[19999] = '\0';
384 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
385 entry2->Close();
386 entry1->Close();
389 TEST_F(DiskCacheBackendTest, Keying) {
390 BackendKeying();
393 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
394 SetNewEviction();
395 BackendKeying();
398 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
399 SetMemoryOnlyMode();
400 BackendKeying();
403 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
404 SetCacheType(net::APP_CACHE);
405 BackendKeying();
408 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
409 SetCacheType(net::SHADER_CACHE);
410 BackendKeying();
413 TEST_F(DiskCacheTest, CreateBackend) {
414 net::TestCompletionCallback cb;
417 ASSERT_TRUE(CleanupCacheDir());
418 base::Thread cache_thread("CacheThread");
419 ASSERT_TRUE(cache_thread.StartWithOptions(
420 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
422 // Test the private factory method(s).
423 scoped_ptr<disk_cache::Backend> cache;
424 cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
425 ASSERT_TRUE(cache.get());
426 cache.reset();
428 // Now test the public API.
429 int rv =
430 disk_cache::CreateCacheBackend(net::DISK_CACHE,
431 net::CACHE_BACKEND_DEFAULT,
432 cache_path_,
434 false,
435 cache_thread.message_loop_proxy().get(),
436 NULL,
437 &cache,
438 cb.callback());
439 ASSERT_EQ(net::OK, cb.GetResult(rv));
440 ASSERT_TRUE(cache.get());
441 cache.reset();
443 rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
444 net::CACHE_BACKEND_DEFAULT,
445 base::FilePath(), 0,
446 false, NULL, NULL, &cache,
447 cb.callback());
448 ASSERT_EQ(net::OK, cb.GetResult(rv));
449 ASSERT_TRUE(cache.get());
450 cache.reset();
453 base::MessageLoop::current()->RunUntilIdle();
456 // Tests that |BackendImpl| fails to initialize with a missing file.
457 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
458 ASSERT_TRUE(CopyTestCache("bad_entry"));
459 base::FilePath filename = cache_path_.AppendASCII("data_1");
460 base::DeleteFile(filename, false);
461 base::Thread cache_thread("CacheThread");
462 ASSERT_TRUE(cache_thread.StartWithOptions(
463 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
464 net::TestCompletionCallback cb;
466 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
467 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
468 cache_path_, cache_thread.message_loop_proxy().get(), NULL));
469 int rv = cache->Init(cb.callback());
470 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
471 base::ThreadRestrictions::SetIOAllowed(prev);
473 cache.reset();
474 DisableIntegrityCheck();
477 TEST_F(DiskCacheBackendTest, ExternalFiles) {
478 InitCache();
479 // First, let's create a file on the folder.
480 base::FilePath filename = cache_path_.AppendASCII("f_000001");
482 const int kSize = 50;
483 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
484 CacheTestFillBuffer(buffer1->data(), kSize, false);
485 ASSERT_EQ(kSize, base::WriteFile(filename, buffer1->data(), kSize));
487 // Now let's create a file with the cache.
488 disk_cache::Entry* entry;
489 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
490 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
491 entry->Close();
493 // And verify that the first file is still there.
494 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
495 ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
496 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
499 // Tests that we deal with file-level pending operations at destruction time.
500 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
501 ASSERT_TRUE(CleanupCacheDir());
502 uint32 flags = disk_cache::kNoBuffering;
503 if (!fast)
504 flags |= disk_cache::kNoRandom;
506 UseCurrentThread();
507 CreateBackend(flags, NULL);
509 net::TestCompletionCallback cb;
510 int rv = GeneratePendingIO(&cb);
512 // The cache destructor will see one pending operation here.
513 cache_.reset();
515 if (rv == net::ERR_IO_PENDING) {
516 if (fast || simple_cache_mode_)
517 EXPECT_FALSE(cb.have_result());
518 else
519 EXPECT_TRUE(cb.have_result());
522 base::MessageLoop::current()->RunUntilIdle();
524 #if !defined(OS_IOS)
525 // Wait for the actual operation to complete, or we'll keep a file handle that
526 // may cause issues later. Note that on iOS systems even though this test
527 // uses a single thread, the actual IO is posted to a worker thread and the
528 // cache destructor breaks the link to reach cb when the operation completes.
529 rv = cb.GetResult(rv);
530 #endif
533 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
534 BackendShutdownWithPendingFileIO(false);
537 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
538 // builds because they contain a lot of intentional memory leaks.
539 // The wrapper scripts used to run tests under Valgrind Memcheck will also
540 // disable these tests. See:
541 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
542 #if !defined(LEAK_SANITIZER)
543 // We'll be leaking from this test.
544 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
545 // The integrity test sets kNoRandom so there's a version mismatch if we don't
546 // force new eviction.
547 SetNewEviction();
548 BackendShutdownWithPendingFileIO(true);
550 #endif
552 // See crbug.com/330074
553 #if !defined(OS_IOS)
554 // Tests that one cache instance is not affected by another one going away.
555 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
556 base::ScopedTempDir store;
557 ASSERT_TRUE(store.CreateUniqueTempDir());
559 net::TestCompletionCallback cb;
560 scoped_ptr<disk_cache::Backend> extra_cache;
561 int rv = disk_cache::CreateCacheBackend(
562 net::DISK_CACHE, net::CACHE_BACKEND_DEFAULT, store.path(), 0,
563 false, base::MessageLoopProxy::current().get(), NULL,
564 &extra_cache, cb.callback());
565 ASSERT_EQ(net::OK, cb.GetResult(rv));
566 ASSERT_TRUE(extra_cache.get() != NULL);
568 ASSERT_TRUE(CleanupCacheDir());
569 SetNewEviction(); // Match the expected behavior for integrity verification.
570 UseCurrentThread();
572 CreateBackend(disk_cache::kNoBuffering, NULL);
573 rv = GeneratePendingIO(&cb);
575 // cache_ has a pending operation, and extra_cache will go away.
576 extra_cache.reset();
578 if (rv == net::ERR_IO_PENDING)
579 EXPECT_FALSE(cb.have_result());
581 base::MessageLoop::current()->RunUntilIdle();
583 // Wait for the actual operation to complete, or we'll keep a file handle that
584 // may cause issues later.
585 rv = cb.GetResult(rv);
587 #endif
589 // Tests that we deal with background-thread pending operations.
590 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
591 net::TestCompletionCallback cb;
594 ASSERT_TRUE(CleanupCacheDir());
595 base::Thread cache_thread("CacheThread");
596 ASSERT_TRUE(cache_thread.StartWithOptions(
597 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
599 uint32 flags = disk_cache::kNoBuffering;
600 if (!fast)
601 flags |= disk_cache::kNoRandom;
603 CreateBackend(flags, &cache_thread);
605 disk_cache::Entry* entry;
606 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
607 ASSERT_EQ(net::OK, cb.GetResult(rv));
609 entry->Close();
611 // The cache destructor will see one pending operation here.
612 cache_.reset();
615 base::MessageLoop::current()->RunUntilIdle();
618 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
619 BackendShutdownWithPendingIO(false);
622 #if !defined(LEAK_SANITIZER)
623 // We'll be leaking from this test.
624 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
625 // The integrity test sets kNoRandom so there's a version mismatch if we don't
626 // force new eviction.
627 SetNewEviction();
628 BackendShutdownWithPendingIO(true);
630 #endif
632 // Tests that we deal with create-type pending operations.
633 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
634 net::TestCompletionCallback cb;
637 ASSERT_TRUE(CleanupCacheDir());
638 base::Thread cache_thread("CacheThread");
639 ASSERT_TRUE(cache_thread.StartWithOptions(
640 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
642 disk_cache::BackendFlags flags =
643 fast ? disk_cache::kNone : disk_cache::kNoRandom;
644 CreateBackend(flags, &cache_thread);
646 disk_cache::Entry* entry;
647 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
648 ASSERT_EQ(net::ERR_IO_PENDING, rv);
650 cache_.reset();
651 EXPECT_FALSE(cb.have_result());
654 base::MessageLoop::current()->RunUntilIdle();
657 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
658 BackendShutdownWithPendingCreate(false);
661 #if !defined(LEAK_SANITIZER)
662 // We'll be leaking an entry from this test.
663 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
664 // The integrity test sets kNoRandom so there's a version mismatch if we don't
665 // force new eviction.
666 SetNewEviction();
667 BackendShutdownWithPendingCreate(true);
669 #endif
671 TEST_F(DiskCacheTest, TruncatedIndex) {
672 ASSERT_TRUE(CleanupCacheDir());
673 base::FilePath index = cache_path_.AppendASCII("index");
674 ASSERT_EQ(5, base::WriteFile(index, "hello", 5));
676 base::Thread cache_thread("CacheThread");
677 ASSERT_TRUE(cache_thread.StartWithOptions(
678 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
679 net::TestCompletionCallback cb;
681 scoped_ptr<disk_cache::Backend> backend;
682 int rv =
683 disk_cache::CreateCacheBackend(net::DISK_CACHE,
684 net::CACHE_BACKEND_BLOCKFILE,
685 cache_path_,
687 false,
688 cache_thread.message_loop_proxy().get(),
689 NULL,
690 &backend,
691 cb.callback());
692 ASSERT_NE(net::OK, cb.GetResult(rv));
694 ASSERT_FALSE(backend);
697 void DiskCacheBackendTest::BackendSetSize() {
698 const int cache_size = 0x10000; // 64 kB
699 SetMaxSize(cache_size);
700 InitCache();
702 std::string first("some key");
703 std::string second("something else");
704 disk_cache::Entry* entry;
705 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
707 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
708 memset(buffer->data(), 0, cache_size);
709 EXPECT_EQ(cache_size / 10,
710 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
711 << "normal file";
713 EXPECT_EQ(net::ERR_FAILED,
714 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
715 << "file size above the limit";
717 // By doubling the total size, we make this file cacheable.
718 SetMaxSize(cache_size * 2);
719 EXPECT_EQ(cache_size / 5,
720 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
722 // Let's fill up the cache!.
723 SetMaxSize(cache_size * 10);
724 EXPECT_EQ(cache_size * 3 / 4,
725 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
726 entry->Close();
727 FlushQueueForTest();
729 SetMaxSize(cache_size);
731 // The cache is 95% full.
733 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
734 EXPECT_EQ(cache_size / 10,
735 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
737 disk_cache::Entry* entry2;
738 ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
739 EXPECT_EQ(cache_size / 10,
740 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
741 entry2->Close(); // This will trigger the cache trim.
743 EXPECT_NE(net::OK, OpenEntry(first, &entry2));
745 FlushQueueForTest(); // Make sure that we are done trimming the cache.
746 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
748 entry->Close();
749 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
750 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
751 entry->Close();
754 TEST_F(DiskCacheBackendTest, SetSize) {
755 BackendSetSize();
758 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
759 SetNewEviction();
760 BackendSetSize();
763 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
764 SetMemoryOnlyMode();
765 BackendSetSize();
768 void DiskCacheBackendTest::BackendLoad() {
769 InitCache();
770 int seed = static_cast<int>(Time::Now().ToInternalValue());
771 srand(seed);
773 disk_cache::Entry* entries[100];
774 for (int i = 0; i < 100; i++) {
775 std::string key = GenerateKey(true);
776 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
778 EXPECT_EQ(100, cache_->GetEntryCount());
780 for (int i = 0; i < 100; i++) {
781 int source1 = rand() % 100;
782 int source2 = rand() % 100;
783 disk_cache::Entry* temp = entries[source1];
784 entries[source1] = entries[source2];
785 entries[source2] = temp;
788 for (int i = 0; i < 100; i++) {
789 disk_cache::Entry* entry;
790 ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry));
791 EXPECT_TRUE(entry == entries[i]);
792 entry->Close();
793 entries[i]->Doom();
794 entries[i]->Close();
796 FlushQueueForTest();
797 EXPECT_EQ(0, cache_->GetEntryCount());
800 TEST_F(DiskCacheBackendTest, Load) {
801 // Work with a tiny index table (16 entries)
802 SetMask(0xf);
803 SetMaxSize(0x100000);
804 BackendLoad();
807 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
808 SetNewEviction();
809 // Work with a tiny index table (16 entries)
810 SetMask(0xf);
811 SetMaxSize(0x100000);
812 BackendLoad();
815 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
816 SetMaxSize(0x100000);
817 SetMemoryOnlyMode();
818 BackendLoad();
821 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
822 SetCacheType(net::APP_CACHE);
823 // Work with a tiny index table (16 entries)
824 SetMask(0xf);
825 SetMaxSize(0x100000);
826 BackendLoad();
829 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
830 SetCacheType(net::SHADER_CACHE);
831 // Work with a tiny index table (16 entries)
832 SetMask(0xf);
833 SetMaxSize(0x100000);
834 BackendLoad();
837 // Tests the chaining of an entry to the current head.
838 void DiskCacheBackendTest::BackendChain() {
839 SetMask(0x1); // 2-entry table.
840 SetMaxSize(0x3000); // 12 kB.
841 InitCache();
843 disk_cache::Entry* entry;
844 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
845 entry->Close();
846 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry));
847 entry->Close();
850 TEST_F(DiskCacheBackendTest, Chain) {
851 BackendChain();
854 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
855 SetNewEviction();
856 BackendChain();
859 TEST_F(DiskCacheBackendTest, AppCacheChain) {
860 SetCacheType(net::APP_CACHE);
861 BackendChain();
864 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
865 SetCacheType(net::SHADER_CACHE);
866 BackendChain();
869 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
870 SetNewEviction();
871 InitCache();
873 disk_cache::Entry* entry;
874 for (int i = 0; i < 100; i++) {
875 std::string name(base::StringPrintf("Key %d", i));
876 ASSERT_EQ(net::OK, CreateEntry(name, &entry));
877 entry->Close();
878 if (i < 90) {
879 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
880 ASSERT_EQ(net::OK, OpenEntry(name, &entry));
881 entry->Close();
885 // The first eviction must come from list 1 (10% limit), the second must come
886 // from list 0.
887 TrimForTest(false);
888 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
889 TrimForTest(false);
890 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
892 // Double check that we still have the list tails.
893 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry));
894 entry->Close();
895 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry));
896 entry->Close();
899 // Before looking for invalid entries, let's check a valid entry.
900 void DiskCacheBackendTest::BackendValidEntry() {
901 InitCache();
903 std::string key("Some key");
904 disk_cache::Entry* entry;
905 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
907 const int kSize = 50;
908 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
909 memset(buffer1->data(), 0, kSize);
910 base::strlcpy(buffer1->data(), "And the data to save", kSize);
911 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
912 entry->Close();
913 SimulateCrash();
915 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
917 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
918 memset(buffer2->data(), 0, kSize);
919 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
920 entry->Close();
921 EXPECT_STREQ(buffer1->data(), buffer2->data());
924 TEST_F(DiskCacheBackendTest, ValidEntry) {
925 BackendValidEntry();
928 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
929 SetNewEviction();
930 BackendValidEntry();
933 // The same logic of the previous test (ValidEntry), but this time force the
934 // entry to be invalid, simulating a crash in the middle.
935 // We'll be leaking memory from this test.
936 void DiskCacheBackendTest::BackendInvalidEntry() {
937 InitCache();
939 std::string key("Some key");
940 disk_cache::Entry* entry;
941 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
943 const int kSize = 50;
944 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
945 memset(buffer->data(), 0, kSize);
946 base::strlcpy(buffer->data(), "And the data to save", kSize);
947 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
948 SimulateCrash();
950 EXPECT_NE(net::OK, OpenEntry(key, &entry));
951 EXPECT_EQ(0, cache_->GetEntryCount());
954 #if !defined(LEAK_SANITIZER)
955 // We'll be leaking memory from this test.
956 TEST_F(DiskCacheBackendTest, InvalidEntry) {
957 BackendInvalidEntry();
960 // We'll be leaking memory from this test.
961 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
962 SetNewEviction();
963 BackendInvalidEntry();
966 // We'll be leaking memory from this test.
967 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
968 SetCacheType(net::APP_CACHE);
969 BackendInvalidEntry();
972 // We'll be leaking memory from this test.
973 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
974 SetCacheType(net::SHADER_CACHE);
975 BackendInvalidEntry();
978 // Almost the same test, but this time crash the cache after reading an entry.
979 // We'll be leaking memory from this test.
980 void DiskCacheBackendTest::BackendInvalidEntryRead() {
981 InitCache();
983 std::string key("Some key");
984 disk_cache::Entry* entry;
985 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
987 const int kSize = 50;
988 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
989 memset(buffer->data(), 0, kSize);
990 base::strlcpy(buffer->data(), "And the data to save", kSize);
991 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
992 entry->Close();
993 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
994 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
996 SimulateCrash();
998 if (type_ == net::APP_CACHE) {
999 // Reading an entry and crashing should not make it dirty.
1000 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1001 EXPECT_EQ(1, cache_->GetEntryCount());
1002 entry->Close();
1003 } else {
1004 EXPECT_NE(net::OK, OpenEntry(key, &entry));
1005 EXPECT_EQ(0, cache_->GetEntryCount());
1009 // We'll be leaking memory from this test.
1010 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1011 BackendInvalidEntryRead();
1014 // We'll be leaking memory from this test.
1015 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1016 SetNewEviction();
1017 BackendInvalidEntryRead();
1020 // We'll be leaking memory from this test.
1021 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1022 SetCacheType(net::APP_CACHE);
1023 BackendInvalidEntryRead();
1026 // We'll be leaking memory from this test.
1027 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1028 SetCacheType(net::SHADER_CACHE);
1029 BackendInvalidEntryRead();
1032 // We'll be leaking memory from this test.
1033 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1034 // Work with a tiny index table (16 entries)
1035 SetMask(0xf);
1036 SetMaxSize(0x100000);
1037 InitCache();
1039 int seed = static_cast<int>(Time::Now().ToInternalValue());
1040 srand(seed);
1042 const int kNumEntries = 100;
1043 disk_cache::Entry* entries[kNumEntries];
1044 for (int i = 0; i < kNumEntries; i++) {
1045 std::string key = GenerateKey(true);
1046 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
1048 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1050 for (int i = 0; i < kNumEntries; i++) {
1051 int source1 = rand() % kNumEntries;
1052 int source2 = rand() % kNumEntries;
1053 disk_cache::Entry* temp = entries[source1];
1054 entries[source1] = entries[source2];
1055 entries[source2] = temp;
1058 std::string keys[kNumEntries];
1059 for (int i = 0; i < kNumEntries; i++) {
1060 keys[i] = entries[i]->GetKey();
1061 if (i < kNumEntries / 2)
1062 entries[i]->Close();
1065 SimulateCrash();
1067 for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1068 disk_cache::Entry* entry;
1069 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1072 for (int i = 0; i < kNumEntries / 2; i++) {
1073 disk_cache::Entry* entry;
1074 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry));
1075 entry->Close();
1078 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1081 // We'll be leaking memory from this test.
1082 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1083 BackendInvalidEntryWithLoad();
1086 // We'll be leaking memory from this test.
1087 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1088 SetNewEviction();
1089 BackendInvalidEntryWithLoad();
1092 // We'll be leaking memory from this test.
1093 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1094 SetCacheType(net::APP_CACHE);
1095 BackendInvalidEntryWithLoad();
1098 // We'll be leaking memory from this test.
1099 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1100 SetCacheType(net::SHADER_CACHE);
1101 BackendInvalidEntryWithLoad();
1104 // We'll be leaking memory from this test.
1105 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1106 const int kSize = 0x3000; // 12 kB
1107 SetMaxSize(kSize * 10);
1108 InitCache();
1110 std::string first("some key");
1111 std::string second("something else");
1112 disk_cache::Entry* entry;
1113 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
1115 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1116 memset(buffer->data(), 0, kSize);
1117 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1119 // Simulate a crash.
1120 SimulateCrash();
1122 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
1123 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1125 EXPECT_EQ(2, cache_->GetEntryCount());
1126 SetMaxSize(kSize);
1127 entry->Close(); // Trim the cache.
1128 FlushQueueForTest();
1130 // If we evicted the entry in less than 20mS, we have one entry in the cache;
1131 // if it took more than that, we posted a task and we'll delete the second
1132 // entry too.
1133 base::MessageLoop::current()->RunUntilIdle();
1135 // This may be not thread-safe in general, but for now it's OK so add some
1136 // ThreadSanitizer annotations to ignore data races on cache_.
1137 // See http://crbug.com/55970
1138 ANNOTATE_IGNORE_READS_BEGIN();
1139 EXPECT_GE(1, cache_->GetEntryCount());
1140 ANNOTATE_IGNORE_READS_END();
1142 EXPECT_NE(net::OK, OpenEntry(first, &entry));
1145 // We'll be leaking memory from this test.
1146 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1147 BackendTrimInvalidEntry();
1150 // We'll be leaking memory from this test.
1151 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1152 SetNewEviction();
1153 BackendTrimInvalidEntry();
1156 // We'll be leaking memory from this test.
1157 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1158 SetMask(0xf); // 16-entry table.
1160 const int kSize = 0x3000; // 12 kB
1161 SetMaxSize(kSize * 40);
1162 InitCache();
1164 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1165 memset(buffer->data(), 0, kSize);
1166 disk_cache::Entry* entry;
1168 // Writing 32 entries to this cache chains most of them.
1169 for (int i = 0; i < 32; i++) {
1170 std::string key(base::StringPrintf("some key %d", i));
1171 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1172 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1173 entry->Close();
1174 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1175 // Note that we are not closing the entries.
1178 // Simulate a crash.
1179 SimulateCrash();
1181 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
1182 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1184 FlushQueueForTest();
1185 EXPECT_EQ(33, cache_->GetEntryCount());
1186 SetMaxSize(kSize);
1188 // For the new eviction code, all corrupt entries are on the second list so
1189 // they are not going away that easy.
1190 if (new_eviction_) {
1191 EXPECT_EQ(net::OK, DoomAllEntries());
1194 entry->Close(); // Trim the cache.
1195 FlushQueueForTest();
1197 // We may abort the eviction before cleaning up everything.
1198 base::MessageLoop::current()->RunUntilIdle();
1199 FlushQueueForTest();
1200 // If it's not clear enough: we may still have eviction tasks running at this
1201 // time, so the number of entries is changing while we read it.
1202 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1203 EXPECT_GE(30, cache_->GetEntryCount());
1204 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1207 // We'll be leaking memory from this test.
1208 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1209 BackendTrimInvalidEntry2();
1212 // We'll be leaking memory from this test.
1213 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1214 SetNewEviction();
1215 BackendTrimInvalidEntry2();
1217 #endif // !defined(LEAK_SANITIZER)
1219 void DiskCacheBackendTest::BackendEnumerations() {
1220 InitCache();
1221 Time initial = Time::Now();
1223 const int kNumEntries = 100;
1224 for (int i = 0; i < kNumEntries; i++) {
1225 std::string key = GenerateKey(true);
1226 disk_cache::Entry* entry;
1227 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1228 entry->Close();
1230 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1231 Time final = Time::Now();
1233 disk_cache::Entry* entry;
1234 void* iter = NULL;
1235 int count = 0;
1236 Time last_modified[kNumEntries];
1237 Time last_used[kNumEntries];
1238 while (OpenNextEntry(&iter, &entry) == net::OK) {
1239 ASSERT_TRUE(NULL != entry);
1240 if (count < kNumEntries) {
1241 last_modified[count] = entry->GetLastModified();
1242 last_used[count] = entry->GetLastUsed();
1243 EXPECT_TRUE(initial <= last_modified[count]);
1244 EXPECT_TRUE(final >= last_modified[count]);
1247 entry->Close();
1248 count++;
1250 EXPECT_EQ(kNumEntries, count);
1252 iter = NULL;
1253 count = 0;
1254 // The previous enumeration should not have changed the timestamps.
1255 while (OpenNextEntry(&iter, &entry) == net::OK) {
1256 ASSERT_TRUE(NULL != entry);
1257 if (count < kNumEntries) {
1258 EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1259 EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1261 entry->Close();
1262 count++;
1264 EXPECT_EQ(kNumEntries, count);
1267 TEST_F(DiskCacheBackendTest, Enumerations) {
1268 BackendEnumerations();
1271 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1272 SetNewEviction();
1273 BackendEnumerations();
1276 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1277 SetMemoryOnlyMode();
1278 BackendEnumerations();
1281 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1282 SetCacheType(net::SHADER_CACHE);
1283 BackendEnumerations();
1286 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1287 SetCacheType(net::APP_CACHE);
1288 BackendEnumerations();
1291 // Verifies enumerations while entries are open.
1292 void DiskCacheBackendTest::BackendEnumerations2() {
1293 InitCache();
1294 const std::string first("first");
1295 const std::string second("second");
1296 disk_cache::Entry *entry1, *entry2;
1297 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1298 entry1->Close();
1299 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1300 entry2->Close();
1301 FlushQueueForTest();
1303 // Make sure that the timestamp is not the same.
1304 AddDelay();
1305 ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
1306 void* iter = NULL;
1307 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1308 EXPECT_EQ(entry2->GetKey(), second);
1310 // Two entries and the iterator pointing at "first".
1311 entry1->Close();
1312 entry2->Close();
1314 // The iterator should still be valid, so we should not crash.
1315 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1316 EXPECT_EQ(entry2->GetKey(), first);
1317 entry2->Close();
1318 cache_->EndEnumeration(&iter);
1320 // Modify the oldest entry and get the newest element.
1321 ASSERT_EQ(net::OK, OpenEntry(first, &entry1));
1322 EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false));
1323 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1324 if (type_ == net::APP_CACHE) {
1325 // The list is not updated.
1326 EXPECT_EQ(entry2->GetKey(), second);
1327 } else {
1328 EXPECT_EQ(entry2->GetKey(), first);
1331 entry1->Close();
1332 entry2->Close();
1333 cache_->EndEnumeration(&iter);
1336 TEST_F(DiskCacheBackendTest, Enumerations2) {
1337 BackendEnumerations2();
1340 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1341 SetNewEviction();
1342 BackendEnumerations2();
1345 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
1346 SetMemoryOnlyMode();
1347 BackendEnumerations2();
1350 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1351 SetCacheType(net::APP_CACHE);
1352 BackendEnumerations2();
1355 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1356 SetCacheType(net::SHADER_CACHE);
1357 BackendEnumerations2();
1360 // Verify that ReadData calls do not update the LRU cache
1361 // when using the SHADER_CACHE type.
1362 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1363 SetCacheType(net::SHADER_CACHE);
1364 InitCache();
1365 const std::string first("first");
1366 const std::string second("second");
1367 disk_cache::Entry *entry1, *entry2;
1368 const int kSize = 50;
1369 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1371 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1372 memset(buffer1->data(), 0, kSize);
1373 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1374 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1376 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1377 entry2->Close();
1379 FlushQueueForTest();
1381 // Make sure that the timestamp is not the same.
1382 AddDelay();
1384 // Read from the last item in the LRU.
1385 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1386 entry1->Close();
1388 void* iter = NULL;
1389 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1390 EXPECT_EQ(entry2->GetKey(), second);
1391 entry2->Close();
1392 cache_->EndEnumeration(&iter);
1395 #if !defined(LEAK_SANITIZER)
1396 // Verify handling of invalid entries while doing enumerations.
1397 // We'll be leaking memory from this test.
1398 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1399 InitCache();
1401 std::string key("Some key");
1402 disk_cache::Entry *entry, *entry1, *entry2;
1403 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1405 const int kSize = 50;
1406 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1407 memset(buffer1->data(), 0, kSize);
1408 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1409 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1410 entry1->Close();
1411 ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1412 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1414 std::string key2("Another key");
1415 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1416 entry2->Close();
1417 ASSERT_EQ(2, cache_->GetEntryCount());
1419 SimulateCrash();
1421 void* iter = NULL;
1422 int count = 0;
1423 while (OpenNextEntry(&iter, &entry) == net::OK) {
1424 ASSERT_TRUE(NULL != entry);
1425 EXPECT_EQ(key2, entry->GetKey());
1426 entry->Close();
1427 count++;
1429 EXPECT_EQ(1, count);
1430 EXPECT_EQ(1, cache_->GetEntryCount());
1433 // We'll be leaking memory from this test.
1434 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1435 BackendInvalidEntryEnumeration();
1438 // We'll be leaking memory from this test.
1439 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1440 SetNewEviction();
1441 BackendInvalidEntryEnumeration();
1443 #endif // !defined(LEAK_SANITIZER)
1445 // Tests that if for some reason entries are modified close to existing cache
1446 // iterators, we don't generate fatal errors or reset the cache.
1447 void DiskCacheBackendTest::BackendFixEnumerators() {
1448 InitCache();
1450 int seed = static_cast<int>(Time::Now().ToInternalValue());
1451 srand(seed);
1453 const int kNumEntries = 10;
1454 for (int i = 0; i < kNumEntries; i++) {
1455 std::string key = GenerateKey(true);
1456 disk_cache::Entry* entry;
1457 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1458 entry->Close();
1460 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1462 disk_cache::Entry *entry1, *entry2;
1463 void* iter1 = NULL;
1464 void* iter2 = NULL;
1465 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
1466 ASSERT_TRUE(NULL != entry1);
1467 entry1->Close();
1468 entry1 = NULL;
1470 // Let's go to the middle of the list.
1471 for (int i = 0; i < kNumEntries / 2; i++) {
1472 if (entry1)
1473 entry1->Close();
1474 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
1475 ASSERT_TRUE(NULL != entry1);
1477 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1478 ASSERT_TRUE(NULL != entry2);
1479 entry2->Close();
1482 // Messing up with entry1 will modify entry2->next.
1483 entry1->Doom();
1484 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1485 ASSERT_TRUE(NULL != entry2);
1487 // The link entry2->entry1 should be broken.
1488 EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1489 entry1->Close();
1490 entry2->Close();
1492 // And the second iterator should keep working.
1493 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1494 ASSERT_TRUE(NULL != entry2);
1495 entry2->Close();
1497 cache_->EndEnumeration(&iter1);
1498 cache_->EndEnumeration(&iter2);
1501 TEST_F(DiskCacheBackendTest, FixEnumerators) {
1502 BackendFixEnumerators();
1505 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1506 SetNewEviction();
1507 BackendFixEnumerators();
1510 void DiskCacheBackendTest::BackendDoomRecent() {
1511 InitCache();
1513 disk_cache::Entry *entry;
1514 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1515 entry->Close();
1516 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1517 entry->Close();
1518 FlushQueueForTest();
1520 AddDelay();
1521 Time middle = Time::Now();
1523 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1524 entry->Close();
1525 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1526 entry->Close();
1527 FlushQueueForTest();
1529 AddDelay();
1530 Time final = Time::Now();
1532 ASSERT_EQ(4, cache_->GetEntryCount());
1533 EXPECT_EQ(net::OK, DoomEntriesSince(final));
1534 ASSERT_EQ(4, cache_->GetEntryCount());
1536 EXPECT_EQ(net::OK, DoomEntriesSince(middle));
1537 ASSERT_EQ(2, cache_->GetEntryCount());
1539 ASSERT_EQ(net::OK, OpenEntry("second", &entry));
1540 entry->Close();
1543 TEST_F(DiskCacheBackendTest, DoomRecent) {
1544 BackendDoomRecent();
1547 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
1548 SetNewEviction();
1549 BackendDoomRecent();
1552 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
1553 SetMemoryOnlyMode();
1554 BackendDoomRecent();
1557 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
1558 SetMemoryOnlyMode();
1559 base::Time start;
1560 InitSparseCache(&start, NULL);
1561 DoomEntriesSince(start);
1562 EXPECT_EQ(1, cache_->GetEntryCount());
1565 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
1566 base::Time start;
1567 InitSparseCache(&start, NULL);
1568 DoomEntriesSince(start);
1569 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1570 // MemBackendImpl does not. Thats why expected value differs here from
1571 // MemoryOnlyDoomEntriesSinceSparse.
1572 EXPECT_EQ(3, cache_->GetEntryCount());
1575 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
1576 SetMemoryOnlyMode();
1577 InitSparseCache(NULL, NULL);
1578 EXPECT_EQ(net::OK, DoomAllEntries());
1579 EXPECT_EQ(0, cache_->GetEntryCount());
1582 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
1583 InitSparseCache(NULL, NULL);
1584 EXPECT_EQ(net::OK, DoomAllEntries());
1585 EXPECT_EQ(0, cache_->GetEntryCount());
1588 void DiskCacheBackendTest::BackendDoomBetween() {
1589 InitCache();
1591 disk_cache::Entry *entry;
1592 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1593 entry->Close();
1594 FlushQueueForTest();
1596 AddDelay();
1597 Time middle_start = Time::Now();
1599 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1600 entry->Close();
1601 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1602 entry->Close();
1603 FlushQueueForTest();
1605 AddDelay();
1606 Time middle_end = Time::Now();
1607 AddDelay();
1609 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1610 entry->Close();
1611 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1612 entry->Close();
1613 FlushQueueForTest();
1615 AddDelay();
1616 Time final = Time::Now();
1618 ASSERT_EQ(4, cache_->GetEntryCount());
1619 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end));
1620 ASSERT_EQ(2, cache_->GetEntryCount());
1622 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1623 entry->Close();
1625 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final));
1626 ASSERT_EQ(1, cache_->GetEntryCount());
1628 ASSERT_EQ(net::OK, OpenEntry("first", &entry));
1629 entry->Close();
1632 TEST_F(DiskCacheBackendTest, DoomBetween) {
1633 BackendDoomBetween();
1636 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
1637 SetNewEviction();
1638 BackendDoomBetween();
1641 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
1642 SetMemoryOnlyMode();
1643 BackendDoomBetween();
1646 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
1647 SetMemoryOnlyMode();
1648 base::Time start, end;
1649 InitSparseCache(&start, &end);
1650 DoomEntriesBetween(start, end);
1651 EXPECT_EQ(3, cache_->GetEntryCount());
1653 start = end;
1654 end = base::Time::Now();
1655 DoomEntriesBetween(start, end);
1656 EXPECT_EQ(1, cache_->GetEntryCount());
1659 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
1660 base::Time start, end;
1661 InitSparseCache(&start, &end);
1662 DoomEntriesBetween(start, end);
1663 EXPECT_EQ(9, cache_->GetEntryCount());
1665 start = end;
1666 end = base::Time::Now();
1667 DoomEntriesBetween(start, end);
1668 EXPECT_EQ(3, cache_->GetEntryCount());
1671 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
1672 int num_entries, bool load) {
1673 success_ = false;
1674 ASSERT_TRUE(CopyTestCache(name));
1675 DisableFirstCleanup();
1677 uint32 mask;
1678 if (load) {
1679 mask = 0xf;
1680 SetMaxSize(0x100000);
1681 } else {
1682 // Clear the settings from the previous run.
1683 mask = 0;
1684 SetMaxSize(0);
1686 SetMask(mask);
1688 InitCache();
1689 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
1691 std::string key("the first key");
1692 disk_cache::Entry* entry1;
1693 ASSERT_NE(net::OK, OpenEntry(key, &entry1));
1695 int actual = cache_->GetEntryCount();
1696 if (num_entries != actual) {
1697 ASSERT_TRUE(load);
1698 // If there is a heavy load, inserting an entry will make another entry
1699 // dirty (on the hash bucket) so two entries are removed.
1700 ASSERT_EQ(num_entries - 1, actual);
1703 cache_.reset();
1704 cache_impl_ = NULL;
1706 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask));
1707 success_ = true;
1710 void DiskCacheBackendTest::BackendRecoverInsert() {
1711 // Tests with an empty cache.
1712 BackendTransaction("insert_empty1", 0, false);
1713 ASSERT_TRUE(success_) << "insert_empty1";
1714 BackendTransaction("insert_empty2", 0, false);
1715 ASSERT_TRUE(success_) << "insert_empty2";
1716 BackendTransaction("insert_empty3", 0, false);
1717 ASSERT_TRUE(success_) << "insert_empty3";
1719 // Tests with one entry on the cache.
1720 BackendTransaction("insert_one1", 1, false);
1721 ASSERT_TRUE(success_) << "insert_one1";
1722 BackendTransaction("insert_one2", 1, false);
1723 ASSERT_TRUE(success_) << "insert_one2";
1724 BackendTransaction("insert_one3", 1, false);
1725 ASSERT_TRUE(success_) << "insert_one3";
1727 // Tests with one hundred entries on the cache, tiny index.
1728 BackendTransaction("insert_load1", 100, true);
1729 ASSERT_TRUE(success_) << "insert_load1";
1730 BackendTransaction("insert_load2", 100, true);
1731 ASSERT_TRUE(success_) << "insert_load2";
1734 TEST_F(DiskCacheBackendTest, RecoverInsert) {
1735 BackendRecoverInsert();
1738 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
1739 SetNewEviction();
1740 BackendRecoverInsert();
1743 void DiskCacheBackendTest::BackendRecoverRemove() {
1744 // Removing the only element.
1745 BackendTransaction("remove_one1", 0, false);
1746 ASSERT_TRUE(success_) << "remove_one1";
1747 BackendTransaction("remove_one2", 0, false);
1748 ASSERT_TRUE(success_) << "remove_one2";
1749 BackendTransaction("remove_one3", 0, false);
1750 ASSERT_TRUE(success_) << "remove_one3";
1752 // Removing the head.
1753 BackendTransaction("remove_head1", 1, false);
1754 ASSERT_TRUE(success_) << "remove_head1";
1755 BackendTransaction("remove_head2", 1, false);
1756 ASSERT_TRUE(success_) << "remove_head2";
1757 BackendTransaction("remove_head3", 1, false);
1758 ASSERT_TRUE(success_) << "remove_head3";
1760 // Removing the tail.
1761 BackendTransaction("remove_tail1", 1, false);
1762 ASSERT_TRUE(success_) << "remove_tail1";
1763 BackendTransaction("remove_tail2", 1, false);
1764 ASSERT_TRUE(success_) << "remove_tail2";
1765 BackendTransaction("remove_tail3", 1, false);
1766 ASSERT_TRUE(success_) << "remove_tail3";
1768 // Removing with one hundred entries on the cache, tiny index.
1769 BackendTransaction("remove_load1", 100, true);
1770 ASSERT_TRUE(success_) << "remove_load1";
1771 BackendTransaction("remove_load2", 100, true);
1772 ASSERT_TRUE(success_) << "remove_load2";
1773 BackendTransaction("remove_load3", 100, true);
1774 ASSERT_TRUE(success_) << "remove_load3";
1776 // This case cannot be reverted.
1777 BackendTransaction("remove_one4", 0, false);
1778 ASSERT_TRUE(success_) << "remove_one4";
1779 BackendTransaction("remove_head4", 1, false);
1780 ASSERT_TRUE(success_) << "remove_head4";
1783 TEST_F(DiskCacheBackendTest, RecoverRemove) {
1784 BackendRecoverRemove();
1787 TEST_F(DiskCacheBackendTest, NewEvictionRecoverRemove) {
1788 SetNewEviction();
1789 BackendRecoverRemove();
1792 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1793 success_ = false;
1794 ASSERT_TRUE(CopyTestCache("insert_load1"));
1795 DisableFirstCleanup();
1797 SetMask(0xf);
1798 SetMaxSize(0x1000);
1800 // We should not crash here.
1801 InitCache();
1802 DisableIntegrityCheck();
1805 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
1806 BackendRecoverWithEviction();
1809 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
1810 SetNewEviction();
1811 BackendRecoverWithEviction();
1814 // Tests that the |BackendImpl| fails to start with the wrong cache version.
1815 TEST_F(DiskCacheTest, WrongVersion) {
1816 ASSERT_TRUE(CopyTestCache("wrong_version"));
1817 base::Thread cache_thread("CacheThread");
1818 ASSERT_TRUE(cache_thread.StartWithOptions(
1819 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1820 net::TestCompletionCallback cb;
1822 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1823 cache_path_, cache_thread.message_loop_proxy().get(), NULL));
1824 int rv = cache->Init(cb.callback());
1825 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
1828 class BadEntropyProvider : public base::FieldTrial::EntropyProvider {
1829 public:
1830 virtual ~BadEntropyProvider() {}
1832 virtual double GetEntropyForTrial(const std::string& trial_name,
1833 uint32 randomization_seed) const OVERRIDE {
1834 return 0.5;
1838 // Tests that the disk cache successfully joins the control group, dropping the
1839 // existing cache in favour of a new empty cache.
1840 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
1841 base::Thread cache_thread("CacheThread");
1842 ASSERT_TRUE(cache_thread.StartWithOptions(
1843 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1845 scoped_ptr<disk_cache::BackendImpl> cache =
1846 CreateExistingEntryCache(cache_thread, cache_path_);
1847 ASSERT_TRUE(cache.get());
1848 cache.reset();
1850 // Instantiate the SimpleCacheTrial, forcing this run into the
1851 // ExperimentControl group.
1852 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1853 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1854 "ExperimentControl");
1855 net::TestCompletionCallback cb;
1856 scoped_ptr<disk_cache::Backend> base_cache;
1857 int rv =
1858 disk_cache::CreateCacheBackend(net::DISK_CACHE,
1859 net::CACHE_BACKEND_BLOCKFILE,
1860 cache_path_,
1862 true,
1863 cache_thread.message_loop_proxy().get(),
1864 NULL,
1865 &base_cache,
1866 cb.callback());
1867 ASSERT_EQ(net::OK, cb.GetResult(rv));
1868 EXPECT_EQ(0, base_cache->GetEntryCount());
1871 // Tests that the disk cache can restart in the control group preserving
1872 // existing entries.
1873 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
1874 // Instantiate the SimpleCacheTrial, forcing this run into the
1875 // ExperimentControl group.
1876 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1877 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1878 "ExperimentControl");
1880 base::Thread cache_thread("CacheThread");
1881 ASSERT_TRUE(cache_thread.StartWithOptions(
1882 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1884 scoped_ptr<disk_cache::BackendImpl> cache =
1885 CreateExistingEntryCache(cache_thread, cache_path_);
1886 ASSERT_TRUE(cache.get());
1888 net::TestCompletionCallback cb;
1890 const int kRestartCount = 5;
1891 for (int i = 0; i < kRestartCount; ++i) {
1892 cache.reset(new disk_cache::BackendImpl(
1893 cache_path_, cache_thread.message_loop_proxy(), NULL));
1894 int rv = cache->Init(cb.callback());
1895 ASSERT_EQ(net::OK, cb.GetResult(rv));
1896 EXPECT_EQ(1, cache->GetEntryCount());
1898 disk_cache::Entry* entry = NULL;
1899 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1900 EXPECT_EQ(net::OK, cb.GetResult(rv));
1901 EXPECT_TRUE(entry);
1902 entry->Close();
1906 // Tests that the disk cache can leave the control group preserving existing
1907 // entries.
1908 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
1909 base::Thread cache_thread("CacheThread");
1910 ASSERT_TRUE(cache_thread.StartWithOptions(
1911 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1914 // Instantiate the SimpleCacheTrial, forcing this run into the
1915 // ExperimentControl group.
1916 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1917 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1918 "ExperimentControl");
1920 scoped_ptr<disk_cache::BackendImpl> cache =
1921 CreateExistingEntryCache(cache_thread, cache_path_);
1922 ASSERT_TRUE(cache.get());
1925 // Instantiate the SimpleCacheTrial, forcing this run into the
1926 // ExperimentNo group.
1927 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1928 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1929 net::TestCompletionCallback cb;
1931 const int kRestartCount = 5;
1932 for (int i = 0; i < kRestartCount; ++i) {
1933 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1934 cache_path_, cache_thread.message_loop_proxy(), NULL));
1935 int rv = cache->Init(cb.callback());
1936 ASSERT_EQ(net::OK, cb.GetResult(rv));
1937 EXPECT_EQ(1, cache->GetEntryCount());
1939 disk_cache::Entry* entry = NULL;
1940 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1941 EXPECT_EQ(net::OK, cb.GetResult(rv));
1942 EXPECT_TRUE(entry);
1943 entry->Close();
1947 // Tests that the cache is properly restarted on recovery error.
1948 TEST_F(DiskCacheBackendTest, DeleteOld) {
1949 ASSERT_TRUE(CopyTestCache("wrong_version"));
1950 SetNewEviction();
1951 base::Thread cache_thread("CacheThread");
1952 ASSERT_TRUE(cache_thread.StartWithOptions(
1953 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1955 net::TestCompletionCallback cb;
1956 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
1957 base::FilePath path(cache_path_);
1958 int rv =
1959 disk_cache::CreateCacheBackend(net::DISK_CACHE,
1960 net::CACHE_BACKEND_BLOCKFILE,
1961 path,
1963 true,
1964 cache_thread.message_loop_proxy().get(),
1965 NULL,
1966 &cache_,
1967 cb.callback());
1968 path.clear(); // Make sure path was captured by the previous call.
1969 ASSERT_EQ(net::OK, cb.GetResult(rv));
1970 base::ThreadRestrictions::SetIOAllowed(prev);
1971 cache_.reset();
1972 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
1975 // We want to be able to deal with messed up entries on disk.
1976 void DiskCacheBackendTest::BackendInvalidEntry2() {
1977 ASSERT_TRUE(CopyTestCache("bad_entry"));
1978 DisableFirstCleanup();
1979 InitCache();
1981 disk_cache::Entry *entry1, *entry2;
1982 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
1983 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
1984 entry1->Close();
1986 // CheckCacheIntegrity will fail at this point.
1987 DisableIntegrityCheck();
1990 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
1991 BackendInvalidEntry2();
1994 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
1995 SetNewEviction();
1996 BackendInvalidEntry2();
1999 // Tests that we don't crash or hang when enumerating this cache.
2000 void DiskCacheBackendTest::BackendInvalidEntry3() {
2001 SetMask(0x1); // 2-entry table.
2002 SetMaxSize(0x3000); // 12 kB.
2003 DisableFirstCleanup();
2004 InitCache();
2006 disk_cache::Entry* entry;
2007 void* iter = NULL;
2008 while (OpenNextEntry(&iter, &entry) == net::OK) {
2009 entry->Close();
2013 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2014 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2015 BackendInvalidEntry3();
2018 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2019 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2020 SetNewEviction();
2021 BackendInvalidEntry3();
2022 DisableIntegrityCheck();
2025 // Test that we handle a dirty entry on the LRU list, already replaced with
2026 // the same key, and with hash collisions.
2027 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2028 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2029 SetMask(0x1); // 2-entry table.
2030 SetMaxSize(0x3000); // 12 kB.
2031 DisableFirstCleanup();
2032 InitCache();
2034 TrimForTest(false);
2037 // Test that we handle a dirty entry on the deleted list, already replaced with
2038 // the same key, and with hash collisions.
2039 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2040 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2041 SetNewEviction();
2042 SetMask(0x1); // 2-entry table.
2043 SetMaxSize(0x3000); // 12 kB.
2044 DisableFirstCleanup();
2045 InitCache();
2047 TrimDeletedListForTest(false);
2050 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2051 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2052 SetMask(0x1); // 2-entry table.
2053 SetMaxSize(0x3000); // 12 kB.
2054 DisableFirstCleanup();
2055 InitCache();
2057 // There is a dirty entry (but marked as clean) at the end, pointing to a
2058 // deleted entry through the hash collision list. We should not re-insert the
2059 // deleted entry into the index table.
2061 TrimForTest(false);
2062 // The cache should be clean (as detected by CheckCacheIntegrity).
2065 // Tests that we don't hang when there is a loop on the hash collision list.
2066 // The test cache could be a result of bug 69135.
2067 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2068 ASSERT_TRUE(CopyTestCache("list_loop2"));
2069 SetMask(0x1); // 2-entry table.
2070 SetMaxSize(0x3000); // 12 kB.
2071 DisableFirstCleanup();
2072 InitCache();
2074 // The second entry points at itselft, and the first entry is not accessible
2075 // though the index, but it is at the head of the LRU.
2077 disk_cache::Entry* entry;
2078 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
2079 entry->Close();
2081 TrimForTest(false);
2082 TrimForTest(false);
2083 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
2084 entry->Close();
2085 EXPECT_EQ(1, cache_->GetEntryCount());
2088 // Tests that we don't hang when there is a loop on the hash collision list.
2089 // The test cache could be a result of bug 69135.
2090 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2091 ASSERT_TRUE(CopyTestCache("list_loop3"));
2092 SetMask(0x1); // 2-entry table.
2093 SetMaxSize(0x3000); // 12 kB.
2094 DisableFirstCleanup();
2095 InitCache();
2097 // There is a wide loop of 5 entries.
2099 disk_cache::Entry* entry;
2100 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2103 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2104 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2105 DisableFirstCleanup();
2106 SetNewEviction();
2107 InitCache();
2109 // The second entry is dirty, but removing it should not corrupt the list.
2110 disk_cache::Entry* entry;
2111 ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2112 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
2114 // This should not delete the cache.
2115 entry->Doom();
2116 FlushQueueForTest();
2117 entry->Close();
2119 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry));
2120 entry->Close();
2123 // Tests handling of corrupt entries by keeping the rankings node around, with
2124 // a fatal failure.
2125 void DiskCacheBackendTest::BackendInvalidEntry7() {
2126 const int kSize = 0x3000; // 12 kB.
2127 SetMaxSize(kSize * 10);
2128 InitCache();
2130 std::string first("some key");
2131 std::string second("something else");
2132 disk_cache::Entry* entry;
2133 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2134 entry->Close();
2135 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2137 // Corrupt this entry.
2138 disk_cache::EntryImpl* entry_impl =
2139 static_cast<disk_cache::EntryImpl*>(entry);
2141 entry_impl->rankings()->Data()->next = 0;
2142 entry_impl->rankings()->Store();
2143 entry->Close();
2144 FlushQueueForTest();
2145 EXPECT_EQ(2, cache_->GetEntryCount());
2147 // This should detect the bad entry.
2148 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2149 EXPECT_EQ(1, cache_->GetEntryCount());
2151 // We should delete the cache. The list still has a corrupt node.
2152 void* iter = NULL;
2153 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2154 FlushQueueForTest();
2155 EXPECT_EQ(0, cache_->GetEntryCount());
2158 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2159 BackendInvalidEntry7();
2162 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2163 SetNewEviction();
2164 BackendInvalidEntry7();
2167 // Tests handling of corrupt entries by keeping the rankings node around, with
2168 // a non fatal failure.
2169 void DiskCacheBackendTest::BackendInvalidEntry8() {
2170 const int kSize = 0x3000; // 12 kB
2171 SetMaxSize(kSize * 10);
2172 InitCache();
2174 std::string first("some key");
2175 std::string second("something else");
2176 disk_cache::Entry* entry;
2177 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2178 entry->Close();
2179 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2181 // Corrupt this entry.
2182 disk_cache::EntryImpl* entry_impl =
2183 static_cast<disk_cache::EntryImpl*>(entry);
2185 entry_impl->rankings()->Data()->contents = 0;
2186 entry_impl->rankings()->Store();
2187 entry->Close();
2188 FlushQueueForTest();
2189 EXPECT_EQ(2, cache_->GetEntryCount());
2191 // This should detect the bad entry.
2192 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2193 EXPECT_EQ(1, cache_->GetEntryCount());
2195 // We should not delete the cache.
2196 void* iter = NULL;
2197 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2198 entry->Close();
2199 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2200 EXPECT_EQ(1, cache_->GetEntryCount());
2203 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2204 BackendInvalidEntry8();
2207 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2208 SetNewEviction();
2209 BackendInvalidEntry8();
2212 // Tests handling of corrupt entries detected by enumerations. Note that these
2213 // tests (xx9 to xx11) are basically just going though slightly different
2214 // codepaths so they are tighlty coupled with the code, but that is better than
2215 // not testing error handling code.
2216 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2217 const int kSize = 0x3000; // 12 kB.
2218 SetMaxSize(kSize * 10);
2219 InitCache();
2221 std::string first("some key");
2222 std::string second("something else");
2223 disk_cache::Entry* entry;
2224 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2225 entry->Close();
2226 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2228 // Corrupt this entry.
2229 disk_cache::EntryImpl* entry_impl =
2230 static_cast<disk_cache::EntryImpl*>(entry);
2232 entry_impl->entry()->Data()->state = 0xbad;
2233 entry_impl->entry()->Store();
2234 entry->Close();
2235 FlushQueueForTest();
2236 EXPECT_EQ(2, cache_->GetEntryCount());
2238 if (eviction) {
2239 TrimForTest(false);
2240 EXPECT_EQ(1, cache_->GetEntryCount());
2241 TrimForTest(false);
2242 EXPECT_EQ(1, cache_->GetEntryCount());
2243 } else {
2244 // We should detect the problem through the list, but we should not delete
2245 // the entry, just fail the iteration.
2246 void* iter = NULL;
2247 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2249 // Now a full iteration will work, and return one entry.
2250 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2251 entry->Close();
2252 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2254 // This should detect what's left of the bad entry.
2255 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2256 EXPECT_EQ(2, cache_->GetEntryCount());
2258 DisableIntegrityCheck();
2261 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2262 BackendInvalidEntry9(false);
2265 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2266 SetNewEviction();
2267 BackendInvalidEntry9(false);
2270 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2271 BackendInvalidEntry9(true);
2274 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2275 SetNewEviction();
2276 BackendInvalidEntry9(true);
2279 // Tests handling of corrupt entries detected by enumerations.
2280 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2281 const int kSize = 0x3000; // 12 kB.
2282 SetMaxSize(kSize * 10);
2283 SetNewEviction();
2284 InitCache();
2286 std::string first("some key");
2287 std::string second("something else");
2288 disk_cache::Entry* entry;
2289 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2290 entry->Close();
2291 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2292 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2293 entry->Close();
2294 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2296 // Corrupt this entry.
2297 disk_cache::EntryImpl* entry_impl =
2298 static_cast<disk_cache::EntryImpl*>(entry);
2300 entry_impl->entry()->Data()->state = 0xbad;
2301 entry_impl->entry()->Store();
2302 entry->Close();
2303 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2304 entry->Close();
2305 EXPECT_EQ(3, cache_->GetEntryCount());
2307 // We have:
2308 // List 0: third -> second (bad).
2309 // List 1: first.
2311 if (eviction) {
2312 // Detection order: second -> first -> third.
2313 TrimForTest(false);
2314 EXPECT_EQ(3, cache_->GetEntryCount());
2315 TrimForTest(false);
2316 EXPECT_EQ(2, cache_->GetEntryCount());
2317 TrimForTest(false);
2318 EXPECT_EQ(1, cache_->GetEntryCount());
2319 } else {
2320 // Detection order: third -> second -> first.
2321 // We should detect the problem through the list, but we should not delete
2322 // the entry.
2323 void* iter = NULL;
2324 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2325 entry->Close();
2326 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2327 EXPECT_EQ(first, entry->GetKey());
2328 entry->Close();
2329 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2331 DisableIntegrityCheck();
2334 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2335 BackendInvalidEntry10(false);
2338 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
2339 BackendInvalidEntry10(true);
2342 // Tests handling of corrupt entries detected by enumerations.
2343 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
2344 const int kSize = 0x3000; // 12 kB.
2345 SetMaxSize(kSize * 10);
2346 SetNewEviction();
2347 InitCache();
2349 std::string first("some key");
2350 std::string second("something else");
2351 disk_cache::Entry* entry;
2352 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2353 entry->Close();
2354 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2355 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2356 entry->Close();
2357 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2358 entry->Close();
2359 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
2360 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2362 // Corrupt this entry.
2363 disk_cache::EntryImpl* entry_impl =
2364 static_cast<disk_cache::EntryImpl*>(entry);
2366 entry_impl->entry()->Data()->state = 0xbad;
2367 entry_impl->entry()->Store();
2368 entry->Close();
2369 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2370 entry->Close();
2371 FlushQueueForTest();
2372 EXPECT_EQ(3, cache_->GetEntryCount());
2374 // We have:
2375 // List 0: third.
2376 // List 1: second (bad) -> first.
2378 if (eviction) {
2379 // Detection order: third -> first -> second.
2380 TrimForTest(false);
2381 EXPECT_EQ(2, cache_->GetEntryCount());
2382 TrimForTest(false);
2383 EXPECT_EQ(1, cache_->GetEntryCount());
2384 TrimForTest(false);
2385 EXPECT_EQ(1, cache_->GetEntryCount());
2386 } else {
2387 // Detection order: third -> second.
2388 // We should detect the problem through the list, but we should not delete
2389 // the entry, just fail the iteration.
2390 void* iter = NULL;
2391 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2392 entry->Close();
2393 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2395 // Now a full iteration will work, and return two entries.
2396 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2397 entry->Close();
2398 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2399 entry->Close();
2400 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2402 DisableIntegrityCheck();
2405 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
2406 BackendInvalidEntry11(false);
2409 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
2410 BackendInvalidEntry11(true);
2413 // Tests handling of corrupt entries in the middle of a long eviction run.
2414 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2415 const int kSize = 0x3000; // 12 kB
2416 SetMaxSize(kSize * 10);
2417 InitCache();
2419 std::string first("some key");
2420 std::string second("something else");
2421 disk_cache::Entry* entry;
2422 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2423 entry->Close();
2424 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2426 // Corrupt this entry.
2427 disk_cache::EntryImpl* entry_impl =
2428 static_cast<disk_cache::EntryImpl*>(entry);
2430 entry_impl->entry()->Data()->state = 0xbad;
2431 entry_impl->entry()->Store();
2432 entry->Close();
2433 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2434 entry->Close();
2435 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
2436 TrimForTest(true);
2437 EXPECT_EQ(1, cache_->GetEntryCount());
2438 entry->Close();
2439 DisableIntegrityCheck();
2442 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
2443 BackendTrimInvalidEntry12();
2446 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
2447 SetNewEviction();
2448 BackendTrimInvalidEntry12();
2451 // We want to be able to deal with messed up entries on disk.
2452 void DiskCacheBackendTest::BackendInvalidRankings2() {
2453 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2454 DisableFirstCleanup();
2455 InitCache();
2457 disk_cache::Entry *entry1, *entry2;
2458 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
2459 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2));
2460 entry2->Close();
2462 // CheckCacheIntegrity will fail at this point.
2463 DisableIntegrityCheck();
2466 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
2467 BackendInvalidRankings2();
2470 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
2471 SetNewEviction();
2472 BackendInvalidRankings2();
2475 // If the LRU is corrupt, we delete the cache.
2476 void DiskCacheBackendTest::BackendInvalidRankings() {
2477 disk_cache::Entry* entry;
2478 void* iter = NULL;
2479 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2480 entry->Close();
2481 EXPECT_EQ(2, cache_->GetEntryCount());
2483 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2484 FlushQueueForTest(); // Allow the restart to finish.
2485 EXPECT_EQ(0, cache_->GetEntryCount());
2488 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
2489 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2490 DisableFirstCleanup();
2491 InitCache();
2492 BackendInvalidRankings();
2495 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
2496 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2497 DisableFirstCleanup();
2498 SetNewEviction();
2499 InitCache();
2500 BackendInvalidRankings();
2503 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
2504 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2505 DisableFirstCleanup();
2506 InitCache();
2507 SetTestMode(); // Fail cache reinitialization.
2508 BackendInvalidRankings();
2511 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
2512 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2513 DisableFirstCleanup();
2514 SetNewEviction();
2515 InitCache();
2516 SetTestMode(); // Fail cache reinitialization.
2517 BackendInvalidRankings();
2520 // If the LRU is corrupt and we have open entries, we disable the cache.
2521 void DiskCacheBackendTest::BackendDisable() {
2522 disk_cache::Entry *entry1, *entry2;
2523 void* iter = NULL;
2524 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2526 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
2527 EXPECT_EQ(0, cache_->GetEntryCount());
2528 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2530 entry1->Close();
2531 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2532 FlushQueueForTest(); // This one actually allows that task to complete.
2534 EXPECT_EQ(0, cache_->GetEntryCount());
2537 TEST_F(DiskCacheBackendTest, DisableSuccess) {
2538 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2539 DisableFirstCleanup();
2540 InitCache();
2541 BackendDisable();
2544 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
2545 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2546 DisableFirstCleanup();
2547 SetNewEviction();
2548 InitCache();
2549 BackendDisable();
2552 TEST_F(DiskCacheBackendTest, DisableFailure) {
2553 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2554 DisableFirstCleanup();
2555 InitCache();
2556 SetTestMode(); // Fail cache reinitialization.
2557 BackendDisable();
2560 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
2561 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2562 DisableFirstCleanup();
2563 SetNewEviction();
2564 InitCache();
2565 SetTestMode(); // Fail cache reinitialization.
2566 BackendDisable();
2569 // This is another type of corruption on the LRU; disable the cache.
2570 void DiskCacheBackendTest::BackendDisable2() {
2571 EXPECT_EQ(8, cache_->GetEntryCount());
2573 disk_cache::Entry* entry;
2574 void* iter = NULL;
2575 int count = 0;
2576 while (OpenNextEntry(&iter, &entry) == net::OK) {
2577 ASSERT_TRUE(NULL != entry);
2578 entry->Close();
2579 count++;
2580 ASSERT_LT(count, 9);
2583 FlushQueueForTest();
2584 EXPECT_EQ(0, cache_->GetEntryCount());
2587 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
2588 ASSERT_TRUE(CopyTestCache("list_loop"));
2589 DisableFirstCleanup();
2590 InitCache();
2591 BackendDisable2();
2594 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
2595 ASSERT_TRUE(CopyTestCache("list_loop"));
2596 DisableFirstCleanup();
2597 SetNewEviction();
2598 InitCache();
2599 BackendDisable2();
2602 TEST_F(DiskCacheBackendTest, DisableFailure2) {
2603 ASSERT_TRUE(CopyTestCache("list_loop"));
2604 DisableFirstCleanup();
2605 InitCache();
2606 SetTestMode(); // Fail cache reinitialization.
2607 BackendDisable2();
2610 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
2611 ASSERT_TRUE(CopyTestCache("list_loop"));
2612 DisableFirstCleanup();
2613 SetNewEviction();
2614 InitCache();
2615 SetTestMode(); // Fail cache reinitialization.
2616 BackendDisable2();
2619 // If the index size changes when we disable the cache, we should not crash.
2620 void DiskCacheBackendTest::BackendDisable3() {
2621 disk_cache::Entry *entry1, *entry2;
2622 void* iter = NULL;
2623 EXPECT_EQ(2, cache_->GetEntryCount());
2624 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2625 entry1->Close();
2627 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
2628 FlushQueueForTest();
2630 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2));
2631 entry2->Close();
2633 EXPECT_EQ(1, cache_->GetEntryCount());
2636 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
2637 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2638 DisableFirstCleanup();
2639 SetMaxSize(20 * 1024 * 1024);
2640 InitCache();
2641 BackendDisable3();
2644 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
2645 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2646 DisableFirstCleanup();
2647 SetMaxSize(20 * 1024 * 1024);
2648 SetNewEviction();
2649 InitCache();
2650 BackendDisable3();
2653 // If we disable the cache, already open entries should work as far as possible.
2654 void DiskCacheBackendTest::BackendDisable4() {
2655 disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
2656 void* iter = NULL;
2657 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2659 char key2[2000];
2660 char key3[20000];
2661 CacheTestFillBuffer(key2, sizeof(key2), true);
2662 CacheTestFillBuffer(key3, sizeof(key3), true);
2663 key2[sizeof(key2) - 1] = '\0';
2664 key3[sizeof(key3) - 1] = '\0';
2665 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2666 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3));
2668 const int kBufSize = 20000;
2669 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
2670 memset(buf->data(), 0, kBufSize);
2671 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2672 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2674 // This line should disable the cache but not delete it.
2675 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4));
2676 EXPECT_EQ(0, cache_->GetEntryCount());
2678 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
2680 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
2681 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2682 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
2684 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
2685 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2686 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
2688 std::string key = entry2->GetKey();
2689 EXPECT_EQ(sizeof(key2) - 1, key.size());
2690 key = entry3->GetKey();
2691 EXPECT_EQ(sizeof(key3) - 1, key.size());
2693 entry1->Close();
2694 entry2->Close();
2695 entry3->Close();
2696 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2697 FlushQueueForTest(); // This one actually allows that task to complete.
2699 EXPECT_EQ(0, cache_->GetEntryCount());
2702 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
2703 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2704 DisableFirstCleanup();
2705 InitCache();
2706 BackendDisable4();
2709 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
2710 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2711 DisableFirstCleanup();
2712 SetNewEviction();
2713 InitCache();
2714 BackendDisable4();
2717 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
2718 MessageLoopHelper helper;
2720 ASSERT_TRUE(CleanupCacheDir());
2721 scoped_ptr<disk_cache::BackendImpl> cache;
2722 cache.reset(new disk_cache::BackendImpl(
2723 cache_path_, base::MessageLoopProxy::current().get(), NULL));
2724 ASSERT_TRUE(NULL != cache.get());
2725 cache->SetUnitTestMode();
2726 ASSERT_EQ(net::OK, cache->SyncInit());
2728 // Wait for a callback that never comes... about 2 secs :). The message loop
2729 // has to run to allow invocation of the usage timer.
2730 helper.WaitUntilCacheIoFinished(1);
2733 TEST_F(DiskCacheBackendTest, TimerNotCreated) {
2734 ASSERT_TRUE(CopyTestCache("wrong_version"));
2736 scoped_ptr<disk_cache::BackendImpl> cache;
2737 cache.reset(new disk_cache::BackendImpl(
2738 cache_path_, base::MessageLoopProxy::current().get(), NULL));
2739 ASSERT_TRUE(NULL != cache.get());
2740 cache->SetUnitTestMode();
2741 ASSERT_NE(net::OK, cache->SyncInit());
2743 ASSERT_TRUE(NULL == cache->GetTimerForTest());
2745 DisableIntegrityCheck();
2748 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
2749 InitCache();
2750 disk_cache::Entry* entry;
2751 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
2752 entry->Close();
2753 FlushQueueForTest();
2755 disk_cache::StatsItems stats;
2756 cache_->GetStats(&stats);
2757 EXPECT_FALSE(stats.empty());
2759 disk_cache::StatsItems::value_type hits("Create hit", "0x1");
2760 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2762 cache_.reset();
2764 // Now open the cache and verify that the stats are still there.
2765 DisableFirstCleanup();
2766 InitCache();
2767 EXPECT_EQ(1, cache_->GetEntryCount());
2769 stats.clear();
2770 cache_->GetStats(&stats);
2771 EXPECT_FALSE(stats.empty());
2773 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2776 void DiskCacheBackendTest::BackendDoomAll() {
2777 InitCache();
2779 disk_cache::Entry *entry1, *entry2;
2780 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
2781 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
2782 entry1->Close();
2783 entry2->Close();
2785 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2786 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2788 ASSERT_EQ(4, cache_->GetEntryCount());
2789 EXPECT_EQ(net::OK, DoomAllEntries());
2790 ASSERT_EQ(0, cache_->GetEntryCount());
2792 // We should stop posting tasks at some point (if we post any).
2793 base::MessageLoop::current()->RunUntilIdle();
2795 disk_cache::Entry *entry3, *entry4;
2796 EXPECT_NE(net::OK, OpenEntry("third", &entry3));
2797 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
2798 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
2800 EXPECT_EQ(net::OK, DoomAllEntries());
2801 ASSERT_EQ(0, cache_->GetEntryCount());
2803 entry1->Close();
2804 entry2->Close();
2805 entry3->Doom(); // The entry should be already doomed, but this must work.
2806 entry3->Close();
2807 entry4->Close();
2809 // Now try with all references released.
2810 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2811 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2812 entry1->Close();
2813 entry2->Close();
2815 ASSERT_EQ(2, cache_->GetEntryCount());
2816 EXPECT_EQ(net::OK, DoomAllEntries());
2817 ASSERT_EQ(0, cache_->GetEntryCount());
2819 EXPECT_EQ(net::OK, DoomAllEntries());
2822 TEST_F(DiskCacheBackendTest, DoomAll) {
2823 BackendDoomAll();
2826 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
2827 SetNewEviction();
2828 BackendDoomAll();
2831 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
2832 SetMemoryOnlyMode();
2833 BackendDoomAll();
2836 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
2837 SetCacheType(net::APP_CACHE);
2838 BackendDoomAll();
2841 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
2842 SetCacheType(net::SHADER_CACHE);
2843 BackendDoomAll();
2846 // If the index size changes when we doom the cache, we should not crash.
2847 void DiskCacheBackendTest::BackendDoomAll2() {
2848 EXPECT_EQ(2, cache_->GetEntryCount());
2849 EXPECT_EQ(net::OK, DoomAllEntries());
2851 disk_cache::Entry* entry;
2852 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry));
2853 entry->Close();
2855 EXPECT_EQ(1, cache_->GetEntryCount());
2858 TEST_F(DiskCacheBackendTest, DoomAll2) {
2859 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2860 DisableFirstCleanup();
2861 SetMaxSize(20 * 1024 * 1024);
2862 InitCache();
2863 BackendDoomAll2();
2866 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
2867 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2868 DisableFirstCleanup();
2869 SetMaxSize(20 * 1024 * 1024);
2870 SetNewEviction();
2871 InitCache();
2872 BackendDoomAll2();
2875 // We should be able to create the same entry on multiple simultaneous instances
2876 // of the cache.
2877 TEST_F(DiskCacheTest, MultipleInstances) {
2878 base::ScopedTempDir store1, store2;
2879 ASSERT_TRUE(store1.CreateUniqueTempDir());
2880 ASSERT_TRUE(store2.CreateUniqueTempDir());
2882 base::Thread cache_thread("CacheThread");
2883 ASSERT_TRUE(cache_thread.StartWithOptions(
2884 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
2885 net::TestCompletionCallback cb;
2887 const int kNumberOfCaches = 2;
2888 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
2890 int rv =
2891 disk_cache::CreateCacheBackend(net::DISK_CACHE,
2892 net::CACHE_BACKEND_DEFAULT,
2893 store1.path(),
2895 false,
2896 cache_thread.message_loop_proxy().get(),
2897 NULL,
2898 &cache[0],
2899 cb.callback());
2900 ASSERT_EQ(net::OK, cb.GetResult(rv));
2901 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE,
2902 net::CACHE_BACKEND_DEFAULT,
2903 store2.path(),
2905 false,
2906 cache_thread.message_loop_proxy().get(),
2907 NULL,
2908 &cache[1],
2909 cb.callback());
2910 ASSERT_EQ(net::OK, cb.GetResult(rv));
2912 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
2914 std::string key("the first key");
2915 disk_cache::Entry* entry;
2916 for (int i = 0; i < kNumberOfCaches; i++) {
2917 rv = cache[i]->CreateEntry(key, &entry, cb.callback());
2918 ASSERT_EQ(net::OK, cb.GetResult(rv));
2919 entry->Close();
2923 // Test the six regions of the curve that determines the max cache size.
2924 TEST_F(DiskCacheTest, AutomaticMaxSize) {
2925 using disk_cache::kDefaultCacheSize;
2926 int64 large_size = kDefaultCacheSize;
2928 // Region 1: expected = available * 0.8
2929 EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
2930 disk_cache::PreferredCacheSize(large_size - 1));
2931 EXPECT_EQ(kDefaultCacheSize * 8 / 10,
2932 disk_cache::PreferredCacheSize(large_size));
2933 EXPECT_EQ(kDefaultCacheSize - 1,
2934 disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
2936 // Region 2: expected = default_size
2937 EXPECT_EQ(kDefaultCacheSize,
2938 disk_cache::PreferredCacheSize(large_size * 10 / 8));
2939 EXPECT_EQ(kDefaultCacheSize,
2940 disk_cache::PreferredCacheSize(large_size * 10 - 1));
2942 // Region 3: expected = available * 0.1
2943 EXPECT_EQ(kDefaultCacheSize,
2944 disk_cache::PreferredCacheSize(large_size * 10));
2945 EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
2946 disk_cache::PreferredCacheSize(large_size * 25 - 1));
2948 // Region 4: expected = default_size * 2.5
2949 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2950 disk_cache::PreferredCacheSize(large_size * 25));
2951 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2952 disk_cache::PreferredCacheSize(large_size * 100 - 1));
2953 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2954 disk_cache::PreferredCacheSize(large_size * 100));
2955 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2956 disk_cache::PreferredCacheSize(large_size * 250 - 1));
2958 // Region 5: expected = available * 0.1
2959 int64 largest_size = kDefaultCacheSize * 4;
2960 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2961 disk_cache::PreferredCacheSize(large_size * 250));
2962 EXPECT_EQ(largest_size - 1,
2963 disk_cache::PreferredCacheSize(largest_size * 100 - 1));
2965 // Region 6: expected = largest possible size
2966 EXPECT_EQ(largest_size,
2967 disk_cache::PreferredCacheSize(largest_size * 100));
2968 EXPECT_EQ(largest_size,
2969 disk_cache::PreferredCacheSize(largest_size * 10000));
2972 // Tests that we can "migrate" a running instance from one experiment group to
2973 // another.
2974 TEST_F(DiskCacheBackendTest, Histograms) {
2975 InitCache();
2976 disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro.
2978 for (int i = 1; i < 3; i++) {
2979 CACHE_UMA(HOURS, "FillupTime", i, 28);
2983 // Make sure that we keep the total memory used by the internal buffers under
2984 // control.
2985 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
2986 InitCache();
2987 std::string key("the first key");
2988 disk_cache::Entry* entry;
2989 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2991 const int kSize = 200;
2992 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
2993 CacheTestFillBuffer(buffer->data(), kSize, true);
2995 for (int i = 0; i < 10; i++) {
2996 SCOPED_TRACE(i);
2997 // Allocate 2MB for this entry.
2998 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
2999 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
3000 EXPECT_EQ(kSize,
3001 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
3002 EXPECT_EQ(kSize,
3003 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
3005 // Delete one of the buffers and truncate the other.
3006 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
3007 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
3009 // Delete the second buffer, writing 10 bytes to disk.
3010 entry->Close();
3011 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3014 entry->Close();
3015 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
3018 // This test assumes at least 150MB of system memory.
3019 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
3020 InitCache();
3022 const int kOneMB = 1024 * 1024;
3023 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3024 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3026 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3027 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3029 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3030 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3032 cache_impl_->BufferDeleted(kOneMB);
3033 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3035 // Check the upper limit.
3036 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3038 for (int i = 0; i < 30; i++)
3039 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
3041 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3044 // Tests that sharing of external files works and we are able to delete the
3045 // files when we need to.
3046 TEST_F(DiskCacheBackendTest, FileSharing) {
3047 InitCache();
3049 disk_cache::Addr address(0x80000001);
3050 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3051 base::FilePath name = cache_impl_->GetFileName(address);
3053 scoped_refptr<disk_cache::File> file(new disk_cache::File(false));
3054 file->Init(name);
3056 #if defined(OS_WIN)
3057 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3058 DWORD access = GENERIC_READ | GENERIC_WRITE;
3059 base::win::ScopedHandle file2(CreateFile(
3060 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
3061 EXPECT_FALSE(file2.IsValid());
3063 sharing |= FILE_SHARE_DELETE;
3064 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL,
3065 OPEN_EXISTING, 0, NULL));
3066 EXPECT_TRUE(file2.IsValid());
3067 #endif
3069 EXPECT_TRUE(base::DeleteFile(name, false));
3071 // We should be able to use the file.
3072 const int kSize = 200;
3073 char buffer1[kSize];
3074 char buffer2[kSize];
3075 memset(buffer1, 't', kSize);
3076 memset(buffer2, 0, kSize);
3077 EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3078 EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3079 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3081 EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
3084 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3085 InitCache();
3087 disk_cache::Entry* entry;
3089 for (int i = 0; i < 2; ++i) {
3090 std::string key = base::StringPrintf("key%d", i);
3091 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3092 entry->Close();
3095 // Ping the oldest entry.
3096 cache_->OnExternalCacheHit("key0");
3098 TrimForTest(false);
3100 // Make sure the older key remains.
3101 EXPECT_EQ(1, cache_->GetEntryCount());
3102 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3103 entry->Close();
3106 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3107 SetCacheType(net::SHADER_CACHE);
3108 InitCache();
3110 disk_cache::Entry* entry;
3112 for (int i = 0; i < 2; ++i) {
3113 std::string key = base::StringPrintf("key%d", i);
3114 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3115 entry->Close();
3118 // Ping the oldest entry.
3119 cache_->OnExternalCacheHit("key0");
3121 TrimForTest(false);
3123 // Make sure the older key remains.
3124 EXPECT_EQ(1, cache_->GetEntryCount());
3125 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3126 entry->Close();
3129 void DiskCacheBackendTest::TracingBackendBasics() {
3130 InitCache();
3131 cache_.reset(new disk_cache::TracingCacheBackend(cache_.Pass()));
3132 cache_impl_ = NULL;
3133 EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
3134 if (!simple_cache_mode_) {
3135 EXPECT_EQ(0, cache_->GetEntryCount());
3138 net::TestCompletionCallback cb;
3139 disk_cache::Entry* entry = NULL;
3140 EXPECT_NE(net::OK, OpenEntry("key", &entry));
3141 EXPECT_TRUE(NULL == entry);
3143 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3144 EXPECT_TRUE(NULL != entry);
3146 disk_cache::Entry* same_entry = NULL;
3147 ASSERT_EQ(net::OK, OpenEntry("key", &same_entry));
3148 EXPECT_TRUE(NULL != same_entry);
3150 if (!simple_cache_mode_) {
3151 EXPECT_EQ(1, cache_->GetEntryCount());
3153 entry->Close();
3154 entry = NULL;
3155 same_entry->Close();
3156 same_entry = NULL;
3159 TEST_F(DiskCacheBackendTest, TracingBackendBasics) {
3160 TracingBackendBasics();
3163 // The Simple Cache backend requires a few guarantees from the filesystem like
3164 // atomic renaming of recently open files. Those guarantees are not provided in
3165 // general on Windows.
3166 #if defined(OS_POSIX)
3168 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3169 SetCacheType(net::APP_CACHE);
3170 SetSimpleCacheMode();
3171 BackendShutdownWithPendingCreate(false);
3174 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3175 SetCacheType(net::APP_CACHE);
3176 SetSimpleCacheMode();
3177 BackendShutdownWithPendingFileIO(false);
3180 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3181 SetSimpleCacheMode();
3182 BackendBasics();
3185 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3186 SetCacheType(net::APP_CACHE);
3187 SetSimpleCacheMode();
3188 BackendBasics();
3191 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3192 SetSimpleCacheMode();
3193 BackendKeying();
3196 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3197 SetSimpleCacheMode();
3198 SetCacheType(net::APP_CACHE);
3199 BackendKeying();
3202 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) {
3203 SetSimpleCacheMode();
3204 BackendSetSize();
3207 // MacOS has a default open file limit of 256 files, which is incompatible with
3208 // this simple cache test.
3209 #if defined(OS_MACOSX)
3210 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3211 #else
3212 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3213 #endif
3215 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) {
3216 SetMaxSize(0x100000);
3217 SetSimpleCacheMode();
3218 BackendLoad();
3221 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) {
3222 SetCacheType(net::APP_CACHE);
3223 SetSimpleCacheMode();
3224 SetMaxSize(0x100000);
3225 BackendLoad();
3228 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
3229 SetSimpleCacheMode();
3230 BackendDoomRecent();
3233 TEST_F(DiskCacheBackendTest, SimpleDoomBetween) {
3234 SetSimpleCacheMode();
3235 BackendDoomBetween();
3238 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
3239 SetSimpleCacheMode();
3240 BackendDoomAll();
3243 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
3244 SetCacheType(net::APP_CACHE);
3245 SetSimpleCacheMode();
3246 BackendDoomAll();
3249 TEST_F(DiskCacheBackendTest, SimpleCacheTracingBackendBasics) {
3250 SetSimpleCacheMode();
3251 TracingBackendBasics();
3252 // TODO(pasko): implement integrity checking on the Simple Backend.
3253 DisableIntegrityCheck();
3256 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
3257 SetSimpleCacheMode();
3258 InitCache();
3260 const char* key = "the first key";
3261 disk_cache::Entry* entry = NULL;
3263 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3264 ASSERT_TRUE(entry != NULL);
3265 entry->Close();
3266 entry = NULL;
3268 // To make sure the file creation completed we need to call open again so that
3269 // we block until it actually created the files.
3270 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3271 ASSERT_TRUE(entry != NULL);
3272 entry->Close();
3273 entry = NULL;
3275 // Delete one of the files in the entry.
3276 base::FilePath to_delete_file = cache_path_.AppendASCII(
3277 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3278 EXPECT_TRUE(base::PathExists(to_delete_file));
3279 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
3281 // Failing to open the entry should delete the rest of these files.
3282 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3284 // Confirm the rest of the files are gone.
3285 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
3286 base::FilePath should_be_gone_file(cache_path_.AppendASCII(
3287 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
3288 EXPECT_FALSE(base::PathExists(should_be_gone_file));
3292 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
3293 SetSimpleCacheMode();
3294 InitCache();
3296 const char* key = "the first key";
3297 disk_cache::Entry* entry = NULL;
3299 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3300 disk_cache::Entry* null = NULL;
3301 ASSERT_NE(null, entry);
3302 entry->Close();
3303 entry = NULL;
3305 // To make sure the file creation completed we need to call open again so that
3306 // we block until it actually created the files.
3307 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3308 ASSERT_NE(null, entry);
3309 entry->Close();
3310 entry = NULL;
3312 // Write an invalid header for stream 0 and stream 1.
3313 base::FilePath entry_file1_path = cache_path_.AppendASCII(
3314 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3316 disk_cache::SimpleFileHeader header;
3317 header.initial_magic_number = GG_UINT64_C(0xbadf00d);
3318 EXPECT_EQ(
3319 implicit_cast<int>(sizeof(header)),
3320 base::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
3321 sizeof(header)));
3322 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3325 // Tests that the Simple Cache Backend fails to initialize with non-matching
3326 // file structure on disk.
3327 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
3328 // Create a cache structure with the |BackendImpl|.
3329 InitCache();
3330 disk_cache::Entry* entry;
3331 const int kSize = 50;
3332 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3333 CacheTestFillBuffer(buffer->data(), kSize, false);
3334 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3335 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3336 entry->Close();
3337 cache_.reset();
3339 // Check that the |SimpleBackendImpl| does not favor this structure.
3340 base::Thread cache_thread("CacheThread");
3341 ASSERT_TRUE(cache_thread.StartWithOptions(
3342 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3343 disk_cache::SimpleBackendImpl* simple_cache =
3344 new disk_cache::SimpleBackendImpl(cache_path_,
3346 net::DISK_CACHE,
3347 cache_thread.message_loop_proxy().get(),
3348 NULL);
3349 net::TestCompletionCallback cb;
3350 int rv = simple_cache->Init(cb.callback());
3351 EXPECT_NE(net::OK, cb.GetResult(rv));
3352 delete simple_cache;
3353 DisableIntegrityCheck();
3356 // Tests that the |BackendImpl| refuses to initialize on top of the files
3357 // generated by the Simple Cache Backend.
3358 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
3359 // Create a cache structure with the |SimpleBackendImpl|.
3360 SetSimpleCacheMode();
3361 InitCache();
3362 disk_cache::Entry* entry;
3363 const int kSize = 50;
3364 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3365 CacheTestFillBuffer(buffer->data(), kSize, false);
3366 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3367 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3368 entry->Close();
3369 cache_.reset();
3371 // Check that the |BackendImpl| does not favor this structure.
3372 base::Thread cache_thread("CacheThread");
3373 ASSERT_TRUE(cache_thread.StartWithOptions(
3374 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3375 disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(
3376 cache_path_, base::MessageLoopProxy::current().get(), NULL);
3377 cache->SetUnitTestMode();
3378 net::TestCompletionCallback cb;
3379 int rv = cache->Init(cb.callback());
3380 EXPECT_NE(net::OK, cb.GetResult(rv));
3381 delete cache;
3382 DisableIntegrityCheck();
3385 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
3386 SetSimpleCacheMode();
3387 BackendFixEnumerators();
3390 // Tests basic functionality of the SimpleBackend implementation of the
3391 // enumeration API.
3392 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
3393 SetSimpleCacheMode();
3394 InitCache();
3395 std::set<std::string> key_pool;
3396 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3398 // Check that enumeration returns all entries.
3399 std::set<std::string> keys_to_match(key_pool);
3400 void* iter = NULL;
3401 size_t count = 0;
3402 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3403 cache_->EndEnumeration(&iter);
3404 EXPECT_EQ(key_pool.size(), count);
3405 EXPECT_TRUE(keys_to_match.empty());
3407 // Check that opening entries does not affect enumeration.
3408 keys_to_match = key_pool;
3409 iter = NULL;
3410 count = 0;
3411 disk_cache::Entry* entry_opened_before;
3412 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
3413 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3414 &iter,
3415 &keys_to_match,
3416 &count));
3418 disk_cache::Entry* entry_opened_middle;
3419 ASSERT_EQ(net::OK,
3420 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
3421 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3422 cache_->EndEnumeration(&iter);
3423 entry_opened_before->Close();
3424 entry_opened_middle->Close();
3426 EXPECT_EQ(key_pool.size(), count);
3427 EXPECT_TRUE(keys_to_match.empty());
3430 // Tests that the enumerations are not affected by dooming an entry in the
3431 // middle.
3432 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
3433 SetSimpleCacheMode();
3434 InitCache();
3435 std::set<std::string> key_pool;
3436 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3438 // Check that enumeration returns all entries but the doomed one.
3439 std::set<std::string> keys_to_match(key_pool);
3440 void* iter = NULL;
3441 size_t count = 0;
3442 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3443 &iter,
3444 &keys_to_match,
3445 &count));
3447 std::string key_to_delete = *(keys_to_match.begin());
3448 DoomEntry(key_to_delete);
3449 keys_to_match.erase(key_to_delete);
3450 key_pool.erase(key_to_delete);
3451 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3452 cache_->EndEnumeration(&iter);
3454 EXPECT_EQ(key_pool.size(), count);
3455 EXPECT_TRUE(keys_to_match.empty());
3458 // Tests that enumerations are not affected by corrupt files.
3459 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
3460 SetSimpleCacheMode();
3461 InitCache();
3462 std::set<std::string> key_pool;
3463 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3465 // Create a corrupt entry. The write/read sequence ensures that the entry will
3466 // have been created before corrupting the platform files, in the case of
3467 // optimistic operations.
3468 const std::string key = "the key";
3469 disk_cache::Entry* corrupted_entry;
3471 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
3472 ASSERT_TRUE(corrupted_entry);
3473 const int kSize = 50;
3474 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3475 CacheTestFillBuffer(buffer->data(), kSize, false);
3476 ASSERT_EQ(kSize,
3477 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
3478 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
3479 corrupted_entry->Close();
3481 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3482 key, cache_path_));
3483 EXPECT_EQ(key_pool.size() + 1,
3484 implicit_cast<size_t>(cache_->GetEntryCount()));
3486 // Check that enumeration returns all entries but the corrupt one.
3487 std::set<std::string> keys_to_match(key_pool);
3488 void* iter = NULL;
3489 size_t count = 0;
3490 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3491 cache_->EndEnumeration(&iter);
3493 EXPECT_EQ(key_pool.size(), count);
3494 EXPECT_TRUE(keys_to_match.empty());
3497 #endif // defined(OS_POSIX)