Updating trunk VERSION from 2139.0 to 2140.0
[chromium-blink-merge.git] / net / disk_cache / backend_unittest.cc
blob65f8cb0bae6ee75ca4bb1298fd98018878447a17
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/file_util.h"
7 #include "base/metrics/field_trial.h"
8 #include "base/port.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
12 #include "base/thread_task_runner_handle.h"
13 #include "base/threading/platform_thread.h"
14 #include "base/threading/thread_restrictions.h"
15 #include "net/base/cache_type.h"
16 #include "net/base/io_buffer.h"
17 #include "net/base/net_errors.h"
18 #include "net/base/test_completion_callback.h"
19 #include "net/disk_cache/blockfile/backend_impl.h"
20 #include "net/disk_cache/blockfile/entry_impl.h"
21 #include "net/disk_cache/blockfile/experiments.h"
22 #include "net/disk_cache/blockfile/histogram_macros.h"
23 #include "net/disk_cache/blockfile/mapped_file.h"
24 #include "net/disk_cache/cache_util.h"
25 #include "net/disk_cache/disk_cache_test_base.h"
26 #include "net/disk_cache/disk_cache_test_util.h"
27 #include "net/disk_cache/memory/mem_backend_impl.h"
28 #include "net/disk_cache/simple/simple_backend_impl.h"
29 #include "net/disk_cache/simple/simple_entry_format.h"
30 #include "net/disk_cache/simple/simple_test_util.h"
31 #include "net/disk_cache/simple/simple_util.h"
32 #include "net/disk_cache/tracing/tracing_cache_backend.h"
33 #include "testing/gtest/include/gtest/gtest.h"
35 #if defined(OS_WIN)
36 #include "base/win/scoped_handle.h"
37 #endif
39 // Provide a BackendImpl object to macros from histogram_macros.h.
40 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
42 using base::Time;
44 namespace {
46 const char kExistingEntryKey[] = "existing entry key";
48 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
49 const base::Thread& cache_thread,
50 base::FilePath& cache_path) {
51 net::TestCompletionCallback cb;
53 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
54 cache_path, cache_thread.message_loop_proxy(), NULL));
55 int rv = cache->Init(cb.callback());
56 if (cb.GetResult(rv) != net::OK)
57 return scoped_ptr<disk_cache::BackendImpl>();
59 disk_cache::Entry* entry = NULL;
60 rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
61 if (cb.GetResult(rv) != net::OK)
62 return scoped_ptr<disk_cache::BackendImpl>();
63 entry->Close();
65 return cache.Pass();
68 } // namespace
70 // Tests that can run with different types of caches.
71 class DiskCacheBackendTest : public DiskCacheTestWithCache {
72 protected:
73 // Some utility methods:
75 // Perform IO operations on the cache until there is pending IO.
76 int GeneratePendingIO(net::TestCompletionCallback* cb);
78 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
79 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
80 // There are 4 entries after doomed_start and 2 after doomed_end.
81 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
83 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
84 bool EnumerateAndMatchKeys(int max_to_open,
85 void** iter,
86 std::set<std::string>* keys_to_match,
87 size_t* count);
89 // Actual tests:
90 void BackendBasics();
91 void BackendKeying();
92 void BackendShutdownWithPendingFileIO(bool fast);
93 void BackendShutdownWithPendingIO(bool fast);
94 void BackendShutdownWithPendingCreate(bool fast);
95 void BackendSetSize();
96 void BackendLoad();
97 void BackendChain();
98 void BackendValidEntry();
99 void BackendInvalidEntry();
100 void BackendInvalidEntryRead();
101 void BackendInvalidEntryWithLoad();
102 void BackendTrimInvalidEntry();
103 void BackendTrimInvalidEntry2();
104 void BackendEnumerations();
105 void BackendEnumerations2();
106 void BackendInvalidEntryEnumeration();
107 void BackendFixEnumerators();
108 void BackendDoomRecent();
109 void BackendDoomBetween();
110 void BackendTransaction(const std::string& name, int num_entries, bool load);
111 void BackendRecoverInsert();
112 void BackendRecoverRemove();
113 void BackendRecoverWithEviction();
114 void BackendInvalidEntry2();
115 void BackendInvalidEntry3();
116 void BackendInvalidEntry7();
117 void BackendInvalidEntry8();
118 void BackendInvalidEntry9(bool eviction);
119 void BackendInvalidEntry10(bool eviction);
120 void BackendInvalidEntry11(bool eviction);
121 void BackendTrimInvalidEntry12();
122 void BackendDoomAll();
123 void BackendDoomAll2();
124 void BackendInvalidRankings();
125 void BackendInvalidRankings2();
126 void BackendDisable();
127 void BackendDisable2();
128 void BackendDisable3();
129 void BackendDisable4();
130 void TracingBackendBasics();
133 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
134 if (!use_current_thread_) {
135 ADD_FAILURE();
136 return net::ERR_FAILED;
139 disk_cache::Entry* entry;
140 int rv = cache_->CreateEntry("some key", &entry, cb->callback());
141 if (cb->GetResult(rv) != net::OK)
142 return net::ERR_CACHE_CREATE_FAILURE;
144 const int kSize = 25000;
145 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
146 CacheTestFillBuffer(buffer->data(), kSize, false);
148 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
149 // We are using the current thread as the cache thread because we want to
150 // be able to call directly this method to make sure that the OS (instead
151 // of us switching thread) is returning IO pending.
152 if (!simple_cache_mode_) {
153 rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
154 0, i, buffer.get(), kSize, cb->callback(), false);
155 } else {
156 rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
159 if (rv == net::ERR_IO_PENDING)
160 break;
161 if (rv != kSize)
162 rv = net::ERR_FAILED;
165 // Don't call Close() to avoid going through the queue or we'll deadlock
166 // waiting for the operation to finish.
167 if (!simple_cache_mode_)
168 static_cast<disk_cache::EntryImpl*>(entry)->Release();
169 else
170 entry->Close();
172 return rv;
175 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
176 base::Time* doomed_end) {
177 InitCache();
179 const int kSize = 50;
180 // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
181 const int kOffset = 10 + 1024 * 1024;
183 disk_cache::Entry* entry0 = NULL;
184 disk_cache::Entry* entry1 = NULL;
185 disk_cache::Entry* entry2 = NULL;
187 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
188 CacheTestFillBuffer(buffer->data(), kSize, false);
190 ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
191 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
192 ASSERT_EQ(kSize,
193 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
194 entry0->Close();
196 FlushQueueForTest();
197 AddDelay();
198 if (doomed_start)
199 *doomed_start = base::Time::Now();
201 // Order in rankings list:
202 // first_part1, first_part2, second_part1, second_part2
203 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
204 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
205 ASSERT_EQ(kSize,
206 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
207 entry1->Close();
209 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
210 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
211 ASSERT_EQ(kSize,
212 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
213 entry2->Close();
215 FlushQueueForTest();
216 AddDelay();
217 if (doomed_end)
218 *doomed_end = base::Time::Now();
220 // Order in rankings list:
221 // third_part1, fourth_part1, third_part2, fourth_part2
222 disk_cache::Entry* entry3 = NULL;
223 disk_cache::Entry* entry4 = NULL;
224 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
225 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
226 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
227 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
228 ASSERT_EQ(kSize,
229 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
230 ASSERT_EQ(kSize,
231 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
232 entry3->Close();
233 entry4->Close();
235 FlushQueueForTest();
236 AddDelay();
239 // Creates entries based on random keys. Stores these keys in |key_pool|.
240 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
241 std::set<std::string>* key_pool) {
242 const int kNumEntries = 10;
244 for (int i = 0; i < kNumEntries; ++i) {
245 std::string key = GenerateKey(true);
246 disk_cache::Entry* entry;
247 if (CreateEntry(key, &entry) != net::OK)
248 return false;
249 key_pool->insert(key);
250 entry->Close();
252 return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
255 // Performs iteration over the backend and checks that the keys of entries
256 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
257 // will be opened, if it is positive. Otherwise, iteration will continue until
258 // OpenNextEntry stops returning net::OK.
259 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
260 int max_to_open,
261 void** iter,
262 std::set<std::string>* keys_to_match,
263 size_t* count) {
264 disk_cache::Entry* entry;
266 while (OpenNextEntry(iter, &entry) == net::OK) {
267 if (!entry)
268 return false;
269 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
270 entry->Close();
271 ++(*count);
272 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
273 break;
276 return true;
279 void DiskCacheBackendTest::BackendBasics() {
280 InitCache();
281 disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
282 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
283 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
284 ASSERT_TRUE(NULL != entry1);
285 entry1->Close();
286 entry1 = NULL;
288 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
289 ASSERT_TRUE(NULL != entry1);
290 entry1->Close();
291 entry1 = NULL;
293 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
294 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
295 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
296 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
297 ASSERT_TRUE(NULL != entry1);
298 ASSERT_TRUE(NULL != entry2);
299 EXPECT_EQ(2, cache_->GetEntryCount());
301 disk_cache::Entry* entry3 = NULL;
302 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3));
303 ASSERT_TRUE(NULL != entry3);
304 EXPECT_TRUE(entry2 == entry3);
305 EXPECT_EQ(2, cache_->GetEntryCount());
307 EXPECT_EQ(net::OK, DoomEntry("some other key"));
308 EXPECT_EQ(1, cache_->GetEntryCount());
309 entry1->Close();
310 entry2->Close();
311 entry3->Close();
313 EXPECT_EQ(net::OK, DoomEntry("the first key"));
314 EXPECT_EQ(0, cache_->GetEntryCount());
316 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
317 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
318 entry1->Doom();
319 entry1->Close();
320 EXPECT_EQ(net::OK, DoomEntry("some other key"));
321 EXPECT_EQ(0, cache_->GetEntryCount());
322 entry2->Close();
325 TEST_F(DiskCacheBackendTest, Basics) {
326 BackendBasics();
329 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
330 SetNewEviction();
331 BackendBasics();
334 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
335 SetMemoryOnlyMode();
336 BackendBasics();
339 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
340 SetCacheType(net::APP_CACHE);
341 BackendBasics();
344 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
345 SetCacheType(net::SHADER_CACHE);
346 BackendBasics();
349 void DiskCacheBackendTest::BackendKeying() {
350 InitCache();
351 const char* kName1 = "the first key";
352 const char* kName2 = "the first Key";
353 disk_cache::Entry *entry1, *entry2;
354 ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1));
356 ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2));
357 EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
358 entry2->Close();
360 char buffer[30];
361 base::strlcpy(buffer, kName1, arraysize(buffer));
362 ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2));
363 EXPECT_TRUE(entry1 == entry2);
364 entry2->Close();
366 base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
367 ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2));
368 EXPECT_TRUE(entry1 == entry2);
369 entry2->Close();
371 base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3);
372 ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2));
373 EXPECT_TRUE(entry1 == entry2);
374 entry2->Close();
376 // Now verify long keys.
377 char buffer2[20000];
378 memset(buffer2, 's', sizeof(buffer2));
379 buffer2[1023] = '\0';
380 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
381 entry2->Close();
383 buffer2[1023] = 'g';
384 buffer2[19999] = '\0';
385 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
386 entry2->Close();
387 entry1->Close();
390 TEST_F(DiskCacheBackendTest, Keying) {
391 BackendKeying();
394 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
395 SetNewEviction();
396 BackendKeying();
399 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
400 SetMemoryOnlyMode();
401 BackendKeying();
404 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
405 SetCacheType(net::APP_CACHE);
406 BackendKeying();
409 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
410 SetCacheType(net::SHADER_CACHE);
411 BackendKeying();
414 TEST_F(DiskCacheTest, CreateBackend) {
415 net::TestCompletionCallback cb;
418 ASSERT_TRUE(CleanupCacheDir());
419 base::Thread cache_thread("CacheThread");
420 ASSERT_TRUE(cache_thread.StartWithOptions(
421 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
423 // Test the private factory method(s).
424 scoped_ptr<disk_cache::Backend> cache;
425 cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
426 ASSERT_TRUE(cache.get());
427 cache.reset();
429 // Now test the public API.
430 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
431 net::CACHE_BACKEND_DEFAULT,
432 cache_path_,
434 false,
435 cache_thread.task_runner(),
436 NULL,
437 &cache,
438 cb.callback());
439 ASSERT_EQ(net::OK, cb.GetResult(rv));
440 ASSERT_TRUE(cache.get());
441 cache.reset();
443 rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
444 net::CACHE_BACKEND_DEFAULT,
445 base::FilePath(), 0,
446 false, NULL, NULL, &cache,
447 cb.callback());
448 ASSERT_EQ(net::OK, cb.GetResult(rv));
449 ASSERT_TRUE(cache.get());
450 cache.reset();
453 base::MessageLoop::current()->RunUntilIdle();
456 // Tests that |BackendImpl| fails to initialize with a missing file.
457 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
458 ASSERT_TRUE(CopyTestCache("bad_entry"));
459 base::FilePath filename = cache_path_.AppendASCII("data_1");
460 base::DeleteFile(filename, false);
461 base::Thread cache_thread("CacheThread");
462 ASSERT_TRUE(cache_thread.StartWithOptions(
463 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
464 net::TestCompletionCallback cb;
466 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
467 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
468 cache_path_, cache_thread.task_runner(), NULL));
469 int rv = cache->Init(cb.callback());
470 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
471 base::ThreadRestrictions::SetIOAllowed(prev);
473 cache.reset();
474 DisableIntegrityCheck();
477 TEST_F(DiskCacheBackendTest, ExternalFiles) {
478 InitCache();
479 // First, let's create a file on the folder.
480 base::FilePath filename = cache_path_.AppendASCII("f_000001");
482 const int kSize = 50;
483 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
484 CacheTestFillBuffer(buffer1->data(), kSize, false);
485 ASSERT_EQ(kSize, base::WriteFile(filename, buffer1->data(), kSize));
487 // Now let's create a file with the cache.
488 disk_cache::Entry* entry;
489 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
490 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
491 entry->Close();
493 // And verify that the first file is still there.
494 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
495 ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
496 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
499 // Tests that we deal with file-level pending operations at destruction time.
500 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
501 ASSERT_TRUE(CleanupCacheDir());
502 uint32 flags = disk_cache::kNoBuffering;
503 if (!fast)
504 flags |= disk_cache::kNoRandom;
506 UseCurrentThread();
507 CreateBackend(flags, NULL);
509 net::TestCompletionCallback cb;
510 int rv = GeneratePendingIO(&cb);
512 // The cache destructor will see one pending operation here.
513 cache_.reset();
515 if (rv == net::ERR_IO_PENDING) {
516 if (fast || simple_cache_mode_)
517 EXPECT_FALSE(cb.have_result());
518 else
519 EXPECT_TRUE(cb.have_result());
522 base::MessageLoop::current()->RunUntilIdle();
524 #if !defined(OS_IOS)
525 // Wait for the actual operation to complete, or we'll keep a file handle that
526 // may cause issues later. Note that on iOS systems even though this test
527 // uses a single thread, the actual IO is posted to a worker thread and the
528 // cache destructor breaks the link to reach cb when the operation completes.
529 rv = cb.GetResult(rv);
530 #endif
533 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
534 BackendShutdownWithPendingFileIO(false);
537 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
538 // builds because they contain a lot of intentional memory leaks.
539 // The wrapper scripts used to run tests under Valgrind Memcheck will also
540 // disable these tests. See:
541 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
542 #if !defined(LEAK_SANITIZER)
543 // We'll be leaking from this test.
544 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
545 // The integrity test sets kNoRandom so there's a version mismatch if we don't
546 // force new eviction.
547 SetNewEviction();
548 BackendShutdownWithPendingFileIO(true);
550 #endif
552 // See crbug.com/330074
553 #if !defined(OS_IOS)
554 // Tests that one cache instance is not affected by another one going away.
555 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
556 base::ScopedTempDir store;
557 ASSERT_TRUE(store.CreateUniqueTempDir());
559 net::TestCompletionCallback cb;
560 scoped_ptr<disk_cache::Backend> extra_cache;
561 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
562 net::CACHE_BACKEND_DEFAULT,
563 store.path(),
565 false,
566 base::ThreadTaskRunnerHandle::Get(),
567 NULL,
568 &extra_cache,
569 cb.callback());
570 ASSERT_EQ(net::OK, cb.GetResult(rv));
571 ASSERT_TRUE(extra_cache.get() != NULL);
573 ASSERT_TRUE(CleanupCacheDir());
574 SetNewEviction(); // Match the expected behavior for integrity verification.
575 UseCurrentThread();
577 CreateBackend(disk_cache::kNoBuffering, NULL);
578 rv = GeneratePendingIO(&cb);
580 // cache_ has a pending operation, and extra_cache will go away.
581 extra_cache.reset();
583 if (rv == net::ERR_IO_PENDING)
584 EXPECT_FALSE(cb.have_result());
586 base::MessageLoop::current()->RunUntilIdle();
588 // Wait for the actual operation to complete, or we'll keep a file handle that
589 // may cause issues later.
590 rv = cb.GetResult(rv);
592 #endif
594 // Tests that we deal with background-thread pending operations.
595 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
596 net::TestCompletionCallback cb;
599 ASSERT_TRUE(CleanupCacheDir());
600 base::Thread cache_thread("CacheThread");
601 ASSERT_TRUE(cache_thread.StartWithOptions(
602 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
604 uint32 flags = disk_cache::kNoBuffering;
605 if (!fast)
606 flags |= disk_cache::kNoRandom;
608 CreateBackend(flags, &cache_thread);
610 disk_cache::Entry* entry;
611 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
612 ASSERT_EQ(net::OK, cb.GetResult(rv));
614 entry->Close();
616 // The cache destructor will see one pending operation here.
617 cache_.reset();
620 base::MessageLoop::current()->RunUntilIdle();
623 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
624 BackendShutdownWithPendingIO(false);
627 #if !defined(LEAK_SANITIZER)
628 // We'll be leaking from this test.
629 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
630 // The integrity test sets kNoRandom so there's a version mismatch if we don't
631 // force new eviction.
632 SetNewEviction();
633 BackendShutdownWithPendingIO(true);
635 #endif
637 // Tests that we deal with create-type pending operations.
638 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
639 net::TestCompletionCallback cb;
642 ASSERT_TRUE(CleanupCacheDir());
643 base::Thread cache_thread("CacheThread");
644 ASSERT_TRUE(cache_thread.StartWithOptions(
645 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
647 disk_cache::BackendFlags flags =
648 fast ? disk_cache::kNone : disk_cache::kNoRandom;
649 CreateBackend(flags, &cache_thread);
651 disk_cache::Entry* entry;
652 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
653 ASSERT_EQ(net::ERR_IO_PENDING, rv);
655 cache_.reset();
656 EXPECT_FALSE(cb.have_result());
659 base::MessageLoop::current()->RunUntilIdle();
662 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
663 BackendShutdownWithPendingCreate(false);
666 #if !defined(LEAK_SANITIZER)
667 // We'll be leaking an entry from this test.
668 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
669 // The integrity test sets kNoRandom so there's a version mismatch if we don't
670 // force new eviction.
671 SetNewEviction();
672 BackendShutdownWithPendingCreate(true);
674 #endif
676 // Disabled on android since this test requires cache creator to create
677 // blockfile caches.
678 #if !defined(OS_ANDROID)
679 TEST_F(DiskCacheTest, TruncatedIndex) {
680 ASSERT_TRUE(CleanupCacheDir());
681 base::FilePath index = cache_path_.AppendASCII("index");
682 ASSERT_EQ(5, base::WriteFile(index, "hello", 5));
684 base::Thread cache_thread("CacheThread");
685 ASSERT_TRUE(cache_thread.StartWithOptions(
686 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
687 net::TestCompletionCallback cb;
689 scoped_ptr<disk_cache::Backend> backend;
690 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
691 net::CACHE_BACKEND_BLOCKFILE,
692 cache_path_,
694 false,
695 cache_thread.task_runner(),
696 NULL,
697 &backend,
698 cb.callback());
699 ASSERT_NE(net::OK, cb.GetResult(rv));
701 ASSERT_FALSE(backend);
703 #endif
705 void DiskCacheBackendTest::BackendSetSize() {
706 const int cache_size = 0x10000; // 64 kB
707 SetMaxSize(cache_size);
708 InitCache();
710 std::string first("some key");
711 std::string second("something else");
712 disk_cache::Entry* entry;
713 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
715 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
716 memset(buffer->data(), 0, cache_size);
717 EXPECT_EQ(cache_size / 10,
718 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
719 << "normal file";
721 EXPECT_EQ(net::ERR_FAILED,
722 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
723 << "file size above the limit";
725 // By doubling the total size, we make this file cacheable.
726 SetMaxSize(cache_size * 2);
727 EXPECT_EQ(cache_size / 5,
728 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
730 // Let's fill up the cache!.
731 SetMaxSize(cache_size * 10);
732 EXPECT_EQ(cache_size * 3 / 4,
733 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
734 entry->Close();
735 FlushQueueForTest();
737 SetMaxSize(cache_size);
739 // The cache is 95% full.
741 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
742 EXPECT_EQ(cache_size / 10,
743 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
745 disk_cache::Entry* entry2;
746 ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
747 EXPECT_EQ(cache_size / 10,
748 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
749 entry2->Close(); // This will trigger the cache trim.
751 EXPECT_NE(net::OK, OpenEntry(first, &entry2));
753 FlushQueueForTest(); // Make sure that we are done trimming the cache.
754 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
756 entry->Close();
757 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
758 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
759 entry->Close();
762 TEST_F(DiskCacheBackendTest, SetSize) {
763 BackendSetSize();
766 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
767 SetNewEviction();
768 BackendSetSize();
771 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
772 SetMemoryOnlyMode();
773 BackendSetSize();
776 void DiskCacheBackendTest::BackendLoad() {
777 InitCache();
778 int seed = static_cast<int>(Time::Now().ToInternalValue());
779 srand(seed);
781 disk_cache::Entry* entries[100];
782 for (int i = 0; i < 100; i++) {
783 std::string key = GenerateKey(true);
784 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
786 EXPECT_EQ(100, cache_->GetEntryCount());
788 for (int i = 0; i < 100; i++) {
789 int source1 = rand() % 100;
790 int source2 = rand() % 100;
791 disk_cache::Entry* temp = entries[source1];
792 entries[source1] = entries[source2];
793 entries[source2] = temp;
796 for (int i = 0; i < 100; i++) {
797 disk_cache::Entry* entry;
798 ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry));
799 EXPECT_TRUE(entry == entries[i]);
800 entry->Close();
801 entries[i]->Doom();
802 entries[i]->Close();
804 FlushQueueForTest();
805 EXPECT_EQ(0, cache_->GetEntryCount());
808 TEST_F(DiskCacheBackendTest, Load) {
809 // Work with a tiny index table (16 entries)
810 SetMask(0xf);
811 SetMaxSize(0x100000);
812 BackendLoad();
815 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
816 SetNewEviction();
817 // Work with a tiny index table (16 entries)
818 SetMask(0xf);
819 SetMaxSize(0x100000);
820 BackendLoad();
823 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
824 SetMaxSize(0x100000);
825 SetMemoryOnlyMode();
826 BackendLoad();
829 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
830 SetCacheType(net::APP_CACHE);
831 // Work with a tiny index table (16 entries)
832 SetMask(0xf);
833 SetMaxSize(0x100000);
834 BackendLoad();
837 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
838 SetCacheType(net::SHADER_CACHE);
839 // Work with a tiny index table (16 entries)
840 SetMask(0xf);
841 SetMaxSize(0x100000);
842 BackendLoad();
845 // Tests the chaining of an entry to the current head.
846 void DiskCacheBackendTest::BackendChain() {
847 SetMask(0x1); // 2-entry table.
848 SetMaxSize(0x3000); // 12 kB.
849 InitCache();
851 disk_cache::Entry* entry;
852 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
853 entry->Close();
854 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry));
855 entry->Close();
858 TEST_F(DiskCacheBackendTest, Chain) {
859 BackendChain();
862 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
863 SetNewEviction();
864 BackendChain();
867 TEST_F(DiskCacheBackendTest, AppCacheChain) {
868 SetCacheType(net::APP_CACHE);
869 BackendChain();
872 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
873 SetCacheType(net::SHADER_CACHE);
874 BackendChain();
877 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
878 SetNewEviction();
879 InitCache();
881 disk_cache::Entry* entry;
882 for (int i = 0; i < 100; i++) {
883 std::string name(base::StringPrintf("Key %d", i));
884 ASSERT_EQ(net::OK, CreateEntry(name, &entry));
885 entry->Close();
886 if (i < 90) {
887 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
888 ASSERT_EQ(net::OK, OpenEntry(name, &entry));
889 entry->Close();
893 // The first eviction must come from list 1 (10% limit), the second must come
894 // from list 0.
895 TrimForTest(false);
896 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
897 TrimForTest(false);
898 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
900 // Double check that we still have the list tails.
901 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry));
902 entry->Close();
903 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry));
904 entry->Close();
907 // Before looking for invalid entries, let's check a valid entry.
908 void DiskCacheBackendTest::BackendValidEntry() {
909 InitCache();
911 std::string key("Some key");
912 disk_cache::Entry* entry;
913 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
915 const int kSize = 50;
916 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
917 memset(buffer1->data(), 0, kSize);
918 base::strlcpy(buffer1->data(), "And the data to save", kSize);
919 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
920 entry->Close();
921 SimulateCrash();
923 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
925 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
926 memset(buffer2->data(), 0, kSize);
927 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
928 entry->Close();
929 EXPECT_STREQ(buffer1->data(), buffer2->data());
932 TEST_F(DiskCacheBackendTest, ValidEntry) {
933 BackendValidEntry();
936 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
937 SetNewEviction();
938 BackendValidEntry();
941 // The same logic of the previous test (ValidEntry), but this time force the
942 // entry to be invalid, simulating a crash in the middle.
943 // We'll be leaking memory from this test.
944 void DiskCacheBackendTest::BackendInvalidEntry() {
945 InitCache();
947 std::string key("Some key");
948 disk_cache::Entry* entry;
949 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
951 const int kSize = 50;
952 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
953 memset(buffer->data(), 0, kSize);
954 base::strlcpy(buffer->data(), "And the data to save", kSize);
955 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
956 SimulateCrash();
958 EXPECT_NE(net::OK, OpenEntry(key, &entry));
959 EXPECT_EQ(0, cache_->GetEntryCount());
962 #if !defined(LEAK_SANITIZER)
963 // We'll be leaking memory from this test.
964 TEST_F(DiskCacheBackendTest, InvalidEntry) {
965 BackendInvalidEntry();
968 // We'll be leaking memory from this test.
969 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
970 SetNewEviction();
971 BackendInvalidEntry();
974 // We'll be leaking memory from this test.
975 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
976 SetCacheType(net::APP_CACHE);
977 BackendInvalidEntry();
980 // We'll be leaking memory from this test.
981 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
982 SetCacheType(net::SHADER_CACHE);
983 BackendInvalidEntry();
986 // Almost the same test, but this time crash the cache after reading an entry.
987 // We'll be leaking memory from this test.
988 void DiskCacheBackendTest::BackendInvalidEntryRead() {
989 InitCache();
991 std::string key("Some key");
992 disk_cache::Entry* entry;
993 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
995 const int kSize = 50;
996 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
997 memset(buffer->data(), 0, kSize);
998 base::strlcpy(buffer->data(), "And the data to save", kSize);
999 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1000 entry->Close();
1001 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1002 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
1004 SimulateCrash();
1006 if (type_ == net::APP_CACHE) {
1007 // Reading an entry and crashing should not make it dirty.
1008 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1009 EXPECT_EQ(1, cache_->GetEntryCount());
1010 entry->Close();
1011 } else {
1012 EXPECT_NE(net::OK, OpenEntry(key, &entry));
1013 EXPECT_EQ(0, cache_->GetEntryCount());
1017 // We'll be leaking memory from this test.
1018 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1019 BackendInvalidEntryRead();
1022 // We'll be leaking memory from this test.
1023 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1024 SetNewEviction();
1025 BackendInvalidEntryRead();
1028 // We'll be leaking memory from this test.
1029 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1030 SetCacheType(net::APP_CACHE);
1031 BackendInvalidEntryRead();
1034 // We'll be leaking memory from this test.
1035 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1036 SetCacheType(net::SHADER_CACHE);
1037 BackendInvalidEntryRead();
1040 // We'll be leaking memory from this test.
1041 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1042 // Work with a tiny index table (16 entries)
1043 SetMask(0xf);
1044 SetMaxSize(0x100000);
1045 InitCache();
1047 int seed = static_cast<int>(Time::Now().ToInternalValue());
1048 srand(seed);
1050 const int kNumEntries = 100;
1051 disk_cache::Entry* entries[kNumEntries];
1052 for (int i = 0; i < kNumEntries; i++) {
1053 std::string key = GenerateKey(true);
1054 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
1056 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1058 for (int i = 0; i < kNumEntries; i++) {
1059 int source1 = rand() % kNumEntries;
1060 int source2 = rand() % kNumEntries;
1061 disk_cache::Entry* temp = entries[source1];
1062 entries[source1] = entries[source2];
1063 entries[source2] = temp;
1066 std::string keys[kNumEntries];
1067 for (int i = 0; i < kNumEntries; i++) {
1068 keys[i] = entries[i]->GetKey();
1069 if (i < kNumEntries / 2)
1070 entries[i]->Close();
1073 SimulateCrash();
1075 for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1076 disk_cache::Entry* entry;
1077 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1080 for (int i = 0; i < kNumEntries / 2; i++) {
1081 disk_cache::Entry* entry;
1082 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry));
1083 entry->Close();
1086 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1089 // We'll be leaking memory from this test.
1090 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1091 BackendInvalidEntryWithLoad();
1094 // We'll be leaking memory from this test.
1095 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1096 SetNewEviction();
1097 BackendInvalidEntryWithLoad();
1100 // We'll be leaking memory from this test.
1101 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1102 SetCacheType(net::APP_CACHE);
1103 BackendInvalidEntryWithLoad();
1106 // We'll be leaking memory from this test.
1107 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1108 SetCacheType(net::SHADER_CACHE);
1109 BackendInvalidEntryWithLoad();
1112 // We'll be leaking memory from this test.
1113 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1114 const int kSize = 0x3000; // 12 kB
1115 SetMaxSize(kSize * 10);
1116 InitCache();
1118 std::string first("some key");
1119 std::string second("something else");
1120 disk_cache::Entry* entry;
1121 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
1123 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1124 memset(buffer->data(), 0, kSize);
1125 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1127 // Simulate a crash.
1128 SimulateCrash();
1130 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
1131 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1133 EXPECT_EQ(2, cache_->GetEntryCount());
1134 SetMaxSize(kSize);
1135 entry->Close(); // Trim the cache.
1136 FlushQueueForTest();
1138 // If we evicted the entry in less than 20mS, we have one entry in the cache;
1139 // if it took more than that, we posted a task and we'll delete the second
1140 // entry too.
1141 base::MessageLoop::current()->RunUntilIdle();
1143 // This may be not thread-safe in general, but for now it's OK so add some
1144 // ThreadSanitizer annotations to ignore data races on cache_.
1145 // See http://crbug.com/55970
1146 ANNOTATE_IGNORE_READS_BEGIN();
1147 EXPECT_GE(1, cache_->GetEntryCount());
1148 ANNOTATE_IGNORE_READS_END();
1150 EXPECT_NE(net::OK, OpenEntry(first, &entry));
1153 // We'll be leaking memory from this test.
1154 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1155 BackendTrimInvalidEntry();
1158 // We'll be leaking memory from this test.
1159 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1160 SetNewEviction();
1161 BackendTrimInvalidEntry();
1164 // We'll be leaking memory from this test.
1165 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1166 SetMask(0xf); // 16-entry table.
1168 const int kSize = 0x3000; // 12 kB
1169 SetMaxSize(kSize * 40);
1170 InitCache();
1172 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1173 memset(buffer->data(), 0, kSize);
1174 disk_cache::Entry* entry;
1176 // Writing 32 entries to this cache chains most of them.
1177 for (int i = 0; i < 32; i++) {
1178 std::string key(base::StringPrintf("some key %d", i));
1179 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1180 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1181 entry->Close();
1182 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1183 // Note that we are not closing the entries.
1186 // Simulate a crash.
1187 SimulateCrash();
1189 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
1190 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1192 FlushQueueForTest();
1193 EXPECT_EQ(33, cache_->GetEntryCount());
1194 SetMaxSize(kSize);
1196 // For the new eviction code, all corrupt entries are on the second list so
1197 // they are not going away that easy.
1198 if (new_eviction_) {
1199 EXPECT_EQ(net::OK, DoomAllEntries());
1202 entry->Close(); // Trim the cache.
1203 FlushQueueForTest();
1205 // We may abort the eviction before cleaning up everything.
1206 base::MessageLoop::current()->RunUntilIdle();
1207 FlushQueueForTest();
1208 // If it's not clear enough: we may still have eviction tasks running at this
1209 // time, so the number of entries is changing while we read it.
1210 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1211 EXPECT_GE(30, cache_->GetEntryCount());
1212 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1215 // We'll be leaking memory from this test.
1216 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1217 BackendTrimInvalidEntry2();
1220 // We'll be leaking memory from this test.
1221 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1222 SetNewEviction();
1223 BackendTrimInvalidEntry2();
1225 #endif // !defined(LEAK_SANITIZER)
1227 void DiskCacheBackendTest::BackendEnumerations() {
1228 InitCache();
1229 Time initial = Time::Now();
1231 const int kNumEntries = 100;
1232 for (int i = 0; i < kNumEntries; i++) {
1233 std::string key = GenerateKey(true);
1234 disk_cache::Entry* entry;
1235 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1236 entry->Close();
1238 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1239 Time final = Time::Now();
1241 disk_cache::Entry* entry;
1242 void* iter = NULL;
1243 int count = 0;
1244 Time last_modified[kNumEntries];
1245 Time last_used[kNumEntries];
1246 while (OpenNextEntry(&iter, &entry) == net::OK) {
1247 ASSERT_TRUE(NULL != entry);
1248 if (count < kNumEntries) {
1249 last_modified[count] = entry->GetLastModified();
1250 last_used[count] = entry->GetLastUsed();
1251 EXPECT_TRUE(initial <= last_modified[count]);
1252 EXPECT_TRUE(final >= last_modified[count]);
1255 entry->Close();
1256 count++;
1258 EXPECT_EQ(kNumEntries, count);
1260 iter = NULL;
1261 count = 0;
1262 // The previous enumeration should not have changed the timestamps.
1263 while (OpenNextEntry(&iter, &entry) == net::OK) {
1264 ASSERT_TRUE(NULL != entry);
1265 if (count < kNumEntries) {
1266 EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1267 EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1269 entry->Close();
1270 count++;
1272 EXPECT_EQ(kNumEntries, count);
1275 TEST_F(DiskCacheBackendTest, Enumerations) {
1276 BackendEnumerations();
1279 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1280 SetNewEviction();
1281 BackendEnumerations();
1284 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1285 SetMemoryOnlyMode();
1286 BackendEnumerations();
1289 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1290 SetCacheType(net::SHADER_CACHE);
1291 BackendEnumerations();
1294 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1295 SetCacheType(net::APP_CACHE);
1296 BackendEnumerations();
1299 // Verifies enumerations while entries are open.
1300 void DiskCacheBackendTest::BackendEnumerations2() {
1301 InitCache();
1302 const std::string first("first");
1303 const std::string second("second");
1304 disk_cache::Entry *entry1, *entry2;
1305 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1306 entry1->Close();
1307 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1308 entry2->Close();
1309 FlushQueueForTest();
1311 // Make sure that the timestamp is not the same.
1312 AddDelay();
1313 ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
1314 void* iter = NULL;
1315 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1316 EXPECT_EQ(entry2->GetKey(), second);
1318 // Two entries and the iterator pointing at "first".
1319 entry1->Close();
1320 entry2->Close();
1322 // The iterator should still be valid, so we should not crash.
1323 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1324 EXPECT_EQ(entry2->GetKey(), first);
1325 entry2->Close();
1326 cache_->EndEnumeration(&iter);
1328 // Modify the oldest entry and get the newest element.
1329 ASSERT_EQ(net::OK, OpenEntry(first, &entry1));
1330 EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false));
1331 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1332 if (type_ == net::APP_CACHE) {
1333 // The list is not updated.
1334 EXPECT_EQ(entry2->GetKey(), second);
1335 } else {
1336 EXPECT_EQ(entry2->GetKey(), first);
1339 entry1->Close();
1340 entry2->Close();
1341 cache_->EndEnumeration(&iter);
1344 TEST_F(DiskCacheBackendTest, Enumerations2) {
1345 BackendEnumerations2();
1348 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1349 SetNewEviction();
1350 BackendEnumerations2();
1353 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
1354 SetMemoryOnlyMode();
1355 BackendEnumerations2();
1358 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1359 SetCacheType(net::APP_CACHE);
1360 BackendEnumerations2();
1363 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1364 SetCacheType(net::SHADER_CACHE);
1365 BackendEnumerations2();
1368 // Verify that ReadData calls do not update the LRU cache
1369 // when using the SHADER_CACHE type.
1370 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1371 SetCacheType(net::SHADER_CACHE);
1372 InitCache();
1373 const std::string first("first");
1374 const std::string second("second");
1375 disk_cache::Entry *entry1, *entry2;
1376 const int kSize = 50;
1377 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1379 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1380 memset(buffer1->data(), 0, kSize);
1381 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1382 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1384 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1385 entry2->Close();
1387 FlushQueueForTest();
1389 // Make sure that the timestamp is not the same.
1390 AddDelay();
1392 // Read from the last item in the LRU.
1393 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1394 entry1->Close();
1396 void* iter = NULL;
1397 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry2));
1398 EXPECT_EQ(entry2->GetKey(), second);
1399 entry2->Close();
1400 cache_->EndEnumeration(&iter);
1403 #if !defined(LEAK_SANITIZER)
1404 // Verify handling of invalid entries while doing enumerations.
1405 // We'll be leaking memory from this test.
1406 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1407 InitCache();
1409 std::string key("Some key");
1410 disk_cache::Entry *entry, *entry1, *entry2;
1411 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1413 const int kSize = 50;
1414 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1415 memset(buffer1->data(), 0, kSize);
1416 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1417 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1418 entry1->Close();
1419 ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1420 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1422 std::string key2("Another key");
1423 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1424 entry2->Close();
1425 ASSERT_EQ(2, cache_->GetEntryCount());
1427 SimulateCrash();
1429 void* iter = NULL;
1430 int count = 0;
1431 while (OpenNextEntry(&iter, &entry) == net::OK) {
1432 ASSERT_TRUE(NULL != entry);
1433 EXPECT_EQ(key2, entry->GetKey());
1434 entry->Close();
1435 count++;
1437 EXPECT_EQ(1, count);
1438 EXPECT_EQ(1, cache_->GetEntryCount());
1441 // We'll be leaking memory from this test.
1442 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1443 BackendInvalidEntryEnumeration();
1446 // We'll be leaking memory from this test.
1447 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1448 SetNewEviction();
1449 BackendInvalidEntryEnumeration();
1451 #endif // !defined(LEAK_SANITIZER)
1453 // Tests that if for some reason entries are modified close to existing cache
1454 // iterators, we don't generate fatal errors or reset the cache.
1455 void DiskCacheBackendTest::BackendFixEnumerators() {
1456 InitCache();
1458 int seed = static_cast<int>(Time::Now().ToInternalValue());
1459 srand(seed);
1461 const int kNumEntries = 10;
1462 for (int i = 0; i < kNumEntries; i++) {
1463 std::string key = GenerateKey(true);
1464 disk_cache::Entry* entry;
1465 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1466 entry->Close();
1468 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1470 disk_cache::Entry *entry1, *entry2;
1471 void* iter1 = NULL;
1472 void* iter2 = NULL;
1473 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
1474 ASSERT_TRUE(NULL != entry1);
1475 entry1->Close();
1476 entry1 = NULL;
1478 // Let's go to the middle of the list.
1479 for (int i = 0; i < kNumEntries / 2; i++) {
1480 if (entry1)
1481 entry1->Close();
1482 ASSERT_EQ(net::OK, OpenNextEntry(&iter1, &entry1));
1483 ASSERT_TRUE(NULL != entry1);
1485 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1486 ASSERT_TRUE(NULL != entry2);
1487 entry2->Close();
1490 // Messing up with entry1 will modify entry2->next.
1491 entry1->Doom();
1492 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1493 ASSERT_TRUE(NULL != entry2);
1495 // The link entry2->entry1 should be broken.
1496 EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1497 entry1->Close();
1498 entry2->Close();
1500 // And the second iterator should keep working.
1501 ASSERT_EQ(net::OK, OpenNextEntry(&iter2, &entry2));
1502 ASSERT_TRUE(NULL != entry2);
1503 entry2->Close();
1505 cache_->EndEnumeration(&iter1);
1506 cache_->EndEnumeration(&iter2);
1509 TEST_F(DiskCacheBackendTest, FixEnumerators) {
1510 BackendFixEnumerators();
1513 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1514 SetNewEviction();
1515 BackendFixEnumerators();
1518 void DiskCacheBackendTest::BackendDoomRecent() {
1519 InitCache();
1521 disk_cache::Entry *entry;
1522 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1523 entry->Close();
1524 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1525 entry->Close();
1526 FlushQueueForTest();
1528 AddDelay();
1529 Time middle = Time::Now();
1531 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1532 entry->Close();
1533 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1534 entry->Close();
1535 FlushQueueForTest();
1537 AddDelay();
1538 Time final = Time::Now();
1540 ASSERT_EQ(4, cache_->GetEntryCount());
1541 EXPECT_EQ(net::OK, DoomEntriesSince(final));
1542 ASSERT_EQ(4, cache_->GetEntryCount());
1544 EXPECT_EQ(net::OK, DoomEntriesSince(middle));
1545 ASSERT_EQ(2, cache_->GetEntryCount());
1547 ASSERT_EQ(net::OK, OpenEntry("second", &entry));
1548 entry->Close();
1551 TEST_F(DiskCacheBackendTest, DoomRecent) {
1552 BackendDoomRecent();
1555 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
1556 SetNewEviction();
1557 BackendDoomRecent();
1560 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
1561 SetMemoryOnlyMode();
1562 BackendDoomRecent();
1565 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
1566 SetMemoryOnlyMode();
1567 base::Time start;
1568 InitSparseCache(&start, NULL);
1569 DoomEntriesSince(start);
1570 EXPECT_EQ(1, cache_->GetEntryCount());
1573 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
1574 base::Time start;
1575 InitSparseCache(&start, NULL);
1576 DoomEntriesSince(start);
1577 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1578 // MemBackendImpl does not. Thats why expected value differs here from
1579 // MemoryOnlyDoomEntriesSinceSparse.
1580 EXPECT_EQ(3, cache_->GetEntryCount());
1583 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
1584 SetMemoryOnlyMode();
1585 InitSparseCache(NULL, NULL);
1586 EXPECT_EQ(net::OK, DoomAllEntries());
1587 EXPECT_EQ(0, cache_->GetEntryCount());
1590 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
1591 InitSparseCache(NULL, NULL);
1592 EXPECT_EQ(net::OK, DoomAllEntries());
1593 EXPECT_EQ(0, cache_->GetEntryCount());
1596 void DiskCacheBackendTest::BackendDoomBetween() {
1597 InitCache();
1599 disk_cache::Entry *entry;
1600 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1601 entry->Close();
1602 FlushQueueForTest();
1604 AddDelay();
1605 Time middle_start = Time::Now();
1607 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1608 entry->Close();
1609 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1610 entry->Close();
1611 FlushQueueForTest();
1613 AddDelay();
1614 Time middle_end = Time::Now();
1616 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1617 entry->Close();
1618 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1619 entry->Close();
1620 FlushQueueForTest();
1622 AddDelay();
1623 Time final = Time::Now();
1625 ASSERT_EQ(4, cache_->GetEntryCount());
1626 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end));
1627 ASSERT_EQ(2, cache_->GetEntryCount());
1629 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1630 entry->Close();
1632 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final));
1633 ASSERT_EQ(1, cache_->GetEntryCount());
1635 ASSERT_EQ(net::OK, OpenEntry("first", &entry));
1636 entry->Close();
1639 TEST_F(DiskCacheBackendTest, DoomBetween) {
1640 BackendDoomBetween();
1643 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
1644 SetNewEviction();
1645 BackendDoomBetween();
1648 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
1649 SetMemoryOnlyMode();
1650 BackendDoomBetween();
1653 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
1654 SetMemoryOnlyMode();
1655 base::Time start, end;
1656 InitSparseCache(&start, &end);
1657 DoomEntriesBetween(start, end);
1658 EXPECT_EQ(3, cache_->GetEntryCount());
1660 start = end;
1661 end = base::Time::Now();
1662 DoomEntriesBetween(start, end);
1663 EXPECT_EQ(1, cache_->GetEntryCount());
1666 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
1667 base::Time start, end;
1668 InitSparseCache(&start, &end);
1669 DoomEntriesBetween(start, end);
1670 EXPECT_EQ(9, cache_->GetEntryCount());
1672 start = end;
1673 end = base::Time::Now();
1674 DoomEntriesBetween(start, end);
1675 EXPECT_EQ(3, cache_->GetEntryCount());
1678 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
1679 int num_entries, bool load) {
1680 success_ = false;
1681 ASSERT_TRUE(CopyTestCache(name));
1682 DisableFirstCleanup();
1684 uint32 mask;
1685 if (load) {
1686 mask = 0xf;
1687 SetMaxSize(0x100000);
1688 } else {
1689 // Clear the settings from the previous run.
1690 mask = 0;
1691 SetMaxSize(0);
1693 SetMask(mask);
1695 InitCache();
1696 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
1698 std::string key("the first key");
1699 disk_cache::Entry* entry1;
1700 ASSERT_NE(net::OK, OpenEntry(key, &entry1));
1702 int actual = cache_->GetEntryCount();
1703 if (num_entries != actual) {
1704 ASSERT_TRUE(load);
1705 // If there is a heavy load, inserting an entry will make another entry
1706 // dirty (on the hash bucket) so two entries are removed.
1707 ASSERT_EQ(num_entries - 1, actual);
1710 cache_.reset();
1711 cache_impl_ = NULL;
1713 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask));
1714 success_ = true;
1717 void DiskCacheBackendTest::BackendRecoverInsert() {
1718 // Tests with an empty cache.
1719 BackendTransaction("insert_empty1", 0, false);
1720 ASSERT_TRUE(success_) << "insert_empty1";
1721 BackendTransaction("insert_empty2", 0, false);
1722 ASSERT_TRUE(success_) << "insert_empty2";
1723 BackendTransaction("insert_empty3", 0, false);
1724 ASSERT_TRUE(success_) << "insert_empty3";
1726 // Tests with one entry on the cache.
1727 BackendTransaction("insert_one1", 1, false);
1728 ASSERT_TRUE(success_) << "insert_one1";
1729 BackendTransaction("insert_one2", 1, false);
1730 ASSERT_TRUE(success_) << "insert_one2";
1731 BackendTransaction("insert_one3", 1, false);
1732 ASSERT_TRUE(success_) << "insert_one3";
1734 // Tests with one hundred entries on the cache, tiny index.
1735 BackendTransaction("insert_load1", 100, true);
1736 ASSERT_TRUE(success_) << "insert_load1";
1737 BackendTransaction("insert_load2", 100, true);
1738 ASSERT_TRUE(success_) << "insert_load2";
1741 TEST_F(DiskCacheBackendTest, RecoverInsert) {
1742 BackendRecoverInsert();
1745 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
1746 SetNewEviction();
1747 BackendRecoverInsert();
1750 void DiskCacheBackendTest::BackendRecoverRemove() {
1751 // Removing the only element.
1752 BackendTransaction("remove_one1", 0, false);
1753 ASSERT_TRUE(success_) << "remove_one1";
1754 BackendTransaction("remove_one2", 0, false);
1755 ASSERT_TRUE(success_) << "remove_one2";
1756 BackendTransaction("remove_one3", 0, false);
1757 ASSERT_TRUE(success_) << "remove_one3";
1759 // Removing the head.
1760 BackendTransaction("remove_head1", 1, false);
1761 ASSERT_TRUE(success_) << "remove_head1";
1762 BackendTransaction("remove_head2", 1, false);
1763 ASSERT_TRUE(success_) << "remove_head2";
1764 BackendTransaction("remove_head3", 1, false);
1765 ASSERT_TRUE(success_) << "remove_head3";
1767 // Removing the tail.
1768 BackendTransaction("remove_tail1", 1, false);
1769 ASSERT_TRUE(success_) << "remove_tail1";
1770 BackendTransaction("remove_tail2", 1, false);
1771 ASSERT_TRUE(success_) << "remove_tail2";
1772 BackendTransaction("remove_tail3", 1, false);
1773 ASSERT_TRUE(success_) << "remove_tail3";
1775 // Removing with one hundred entries on the cache, tiny index.
1776 BackendTransaction("remove_load1", 100, true);
1777 ASSERT_TRUE(success_) << "remove_load1";
1778 BackendTransaction("remove_load2", 100, true);
1779 ASSERT_TRUE(success_) << "remove_load2";
1780 BackendTransaction("remove_load3", 100, true);
1781 ASSERT_TRUE(success_) << "remove_load3";
1783 // This case cannot be reverted.
1784 BackendTransaction("remove_one4", 0, false);
1785 ASSERT_TRUE(success_) << "remove_one4";
1786 BackendTransaction("remove_head4", 1, false);
1787 ASSERT_TRUE(success_) << "remove_head4";
1790 #if defined(OS_WIN)
1791 // http://crbug.com/396392
1792 #define MAYBE_RecoverRemove DISABLED_RecoverRemove
1793 #else
1794 #define MAYBE_RecoverRemove RecoverRemove
1795 #endif
1796 TEST_F(DiskCacheBackendTest, MAYBE_RecoverRemove) {
1797 BackendRecoverRemove();
1800 #if defined(OS_WIN)
1801 // http://crbug.com/396392
1802 #define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove
1803 #else
1804 #define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove
1805 #endif
1806 TEST_F(DiskCacheBackendTest, MAYBE_NewEvictionRecoverRemove) {
1807 SetNewEviction();
1808 BackendRecoverRemove();
1811 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1812 success_ = false;
1813 ASSERT_TRUE(CopyTestCache("insert_load1"));
1814 DisableFirstCleanup();
1816 SetMask(0xf);
1817 SetMaxSize(0x1000);
1819 // We should not crash here.
1820 InitCache();
1821 DisableIntegrityCheck();
1824 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
1825 BackendRecoverWithEviction();
1828 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
1829 SetNewEviction();
1830 BackendRecoverWithEviction();
1833 // Tests that the |BackendImpl| fails to start with the wrong cache version.
1834 TEST_F(DiskCacheTest, WrongVersion) {
1835 ASSERT_TRUE(CopyTestCache("wrong_version"));
1836 base::Thread cache_thread("CacheThread");
1837 ASSERT_TRUE(cache_thread.StartWithOptions(
1838 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1839 net::TestCompletionCallback cb;
1841 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1842 cache_path_, cache_thread.task_runner(), NULL));
1843 int rv = cache->Init(cb.callback());
1844 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
1847 class BadEntropyProvider : public base::FieldTrial::EntropyProvider {
1848 public:
1849 virtual ~BadEntropyProvider() {}
1851 virtual double GetEntropyForTrial(const std::string& trial_name,
1852 uint32 randomization_seed) const OVERRIDE {
1853 return 0.5;
1857 // Tests that the disk cache successfully joins the control group, dropping the
1858 // existing cache in favour of a new empty cache.
1859 // Disabled on android since this test requires cache creator to create
1860 // blockfile caches.
1861 #if !defined(OS_ANDROID)
1862 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
1863 base::Thread cache_thread("CacheThread");
1864 ASSERT_TRUE(cache_thread.StartWithOptions(
1865 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1867 scoped_ptr<disk_cache::BackendImpl> cache =
1868 CreateExistingEntryCache(cache_thread, cache_path_);
1869 ASSERT_TRUE(cache.get());
1870 cache.reset();
1872 // Instantiate the SimpleCacheTrial, forcing this run into the
1873 // ExperimentControl group.
1874 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1875 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1876 "ExperimentControl");
1877 net::TestCompletionCallback cb;
1878 scoped_ptr<disk_cache::Backend> base_cache;
1879 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
1880 net::CACHE_BACKEND_BLOCKFILE,
1881 cache_path_,
1883 true,
1884 cache_thread.task_runner(),
1885 NULL,
1886 &base_cache,
1887 cb.callback());
1888 ASSERT_EQ(net::OK, cb.GetResult(rv));
1889 EXPECT_EQ(0, base_cache->GetEntryCount());
1891 #endif
1893 // Tests that the disk cache can restart in the control group preserving
1894 // existing entries.
1895 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
1896 // Instantiate the SimpleCacheTrial, forcing this run into the
1897 // ExperimentControl group.
1898 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1899 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1900 "ExperimentControl");
1902 base::Thread cache_thread("CacheThread");
1903 ASSERT_TRUE(cache_thread.StartWithOptions(
1904 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1906 scoped_ptr<disk_cache::BackendImpl> cache =
1907 CreateExistingEntryCache(cache_thread, cache_path_);
1908 ASSERT_TRUE(cache.get());
1910 net::TestCompletionCallback cb;
1912 const int kRestartCount = 5;
1913 for (int i = 0; i < kRestartCount; ++i) {
1914 cache.reset(new disk_cache::BackendImpl(
1915 cache_path_, cache_thread.message_loop_proxy(), NULL));
1916 int rv = cache->Init(cb.callback());
1917 ASSERT_EQ(net::OK, cb.GetResult(rv));
1918 EXPECT_EQ(1, cache->GetEntryCount());
1920 disk_cache::Entry* entry = NULL;
1921 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1922 EXPECT_EQ(net::OK, cb.GetResult(rv));
1923 EXPECT_TRUE(entry);
1924 entry->Close();
1928 // Tests that the disk cache can leave the control group preserving existing
1929 // entries.
1930 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
1931 base::Thread cache_thread("CacheThread");
1932 ASSERT_TRUE(cache_thread.StartWithOptions(
1933 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1936 // Instantiate the SimpleCacheTrial, forcing this run into the
1937 // ExperimentControl group.
1938 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1939 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1940 "ExperimentControl");
1942 scoped_ptr<disk_cache::BackendImpl> cache =
1943 CreateExistingEntryCache(cache_thread, cache_path_);
1944 ASSERT_TRUE(cache.get());
1947 // Instantiate the SimpleCacheTrial, forcing this run into the
1948 // ExperimentNo group.
1949 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1950 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1951 net::TestCompletionCallback cb;
1953 const int kRestartCount = 5;
1954 for (int i = 0; i < kRestartCount; ++i) {
1955 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1956 cache_path_, cache_thread.message_loop_proxy(), NULL));
1957 int rv = cache->Init(cb.callback());
1958 ASSERT_EQ(net::OK, cb.GetResult(rv));
1959 EXPECT_EQ(1, cache->GetEntryCount());
1961 disk_cache::Entry* entry = NULL;
1962 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1963 EXPECT_EQ(net::OK, cb.GetResult(rv));
1964 EXPECT_TRUE(entry);
1965 entry->Close();
1969 // Tests that the cache is properly restarted on recovery error.
1970 // Disabled on android since this test requires cache creator to create
1971 // blockfile caches.
1972 #if !defined(OS_ANDROID)
1973 TEST_F(DiskCacheBackendTest, DeleteOld) {
1974 ASSERT_TRUE(CopyTestCache("wrong_version"));
1975 SetNewEviction();
1976 base::Thread cache_thread("CacheThread");
1977 ASSERT_TRUE(cache_thread.StartWithOptions(
1978 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1980 net::TestCompletionCallback cb;
1981 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
1982 base::FilePath path(cache_path_);
1983 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
1984 net::CACHE_BACKEND_BLOCKFILE,
1985 path,
1987 true,
1988 cache_thread.task_runner(),
1989 NULL,
1990 &cache_,
1991 cb.callback());
1992 path.clear(); // Make sure path was captured by the previous call.
1993 ASSERT_EQ(net::OK, cb.GetResult(rv));
1994 base::ThreadRestrictions::SetIOAllowed(prev);
1995 cache_.reset();
1996 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
1998 #endif
2000 // We want to be able to deal with messed up entries on disk.
2001 void DiskCacheBackendTest::BackendInvalidEntry2() {
2002 ASSERT_TRUE(CopyTestCache("bad_entry"));
2003 DisableFirstCleanup();
2004 InitCache();
2006 disk_cache::Entry *entry1, *entry2;
2007 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
2008 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
2009 entry1->Close();
2011 // CheckCacheIntegrity will fail at this point.
2012 DisableIntegrityCheck();
2015 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
2016 BackendInvalidEntry2();
2019 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
2020 SetNewEviction();
2021 BackendInvalidEntry2();
2024 // Tests that we don't crash or hang when enumerating this cache.
2025 void DiskCacheBackendTest::BackendInvalidEntry3() {
2026 SetMask(0x1); // 2-entry table.
2027 SetMaxSize(0x3000); // 12 kB.
2028 DisableFirstCleanup();
2029 InitCache();
2031 disk_cache::Entry* entry;
2032 void* iter = NULL;
2033 while (OpenNextEntry(&iter, &entry) == net::OK) {
2034 entry->Close();
2038 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2039 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2040 BackendInvalidEntry3();
2043 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2044 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2045 SetNewEviction();
2046 BackendInvalidEntry3();
2047 DisableIntegrityCheck();
2050 // Test that we handle a dirty entry on the LRU list, already replaced with
2051 // the same key, and with hash collisions.
2052 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2053 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2054 SetMask(0x1); // 2-entry table.
2055 SetMaxSize(0x3000); // 12 kB.
2056 DisableFirstCleanup();
2057 InitCache();
2059 TrimForTest(false);
2062 // Test that we handle a dirty entry on the deleted list, already replaced with
2063 // the same key, and with hash collisions.
2064 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2065 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2066 SetNewEviction();
2067 SetMask(0x1); // 2-entry table.
2068 SetMaxSize(0x3000); // 12 kB.
2069 DisableFirstCleanup();
2070 InitCache();
2072 TrimDeletedListForTest(false);
2075 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2076 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2077 SetMask(0x1); // 2-entry table.
2078 SetMaxSize(0x3000); // 12 kB.
2079 DisableFirstCleanup();
2080 InitCache();
2082 // There is a dirty entry (but marked as clean) at the end, pointing to a
2083 // deleted entry through the hash collision list. We should not re-insert the
2084 // deleted entry into the index table.
2086 TrimForTest(false);
2087 // The cache should be clean (as detected by CheckCacheIntegrity).
2090 // Tests that we don't hang when there is a loop on the hash collision list.
2091 // The test cache could be a result of bug 69135.
2092 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2093 ASSERT_TRUE(CopyTestCache("list_loop2"));
2094 SetMask(0x1); // 2-entry table.
2095 SetMaxSize(0x3000); // 12 kB.
2096 DisableFirstCleanup();
2097 InitCache();
2099 // The second entry points at itselft, and the first entry is not accessible
2100 // though the index, but it is at the head of the LRU.
2102 disk_cache::Entry* entry;
2103 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
2104 entry->Close();
2106 TrimForTest(false);
2107 TrimForTest(false);
2108 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
2109 entry->Close();
2110 EXPECT_EQ(1, cache_->GetEntryCount());
2113 // Tests that we don't hang when there is a loop on the hash collision list.
2114 // The test cache could be a result of bug 69135.
2115 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2116 ASSERT_TRUE(CopyTestCache("list_loop3"));
2117 SetMask(0x1); // 2-entry table.
2118 SetMaxSize(0x3000); // 12 kB.
2119 DisableFirstCleanup();
2120 InitCache();
2122 // There is a wide loop of 5 entries.
2124 disk_cache::Entry* entry;
2125 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2128 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2129 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2130 DisableFirstCleanup();
2131 SetNewEviction();
2132 InitCache();
2134 // The second entry is dirty, but removing it should not corrupt the list.
2135 disk_cache::Entry* entry;
2136 ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2137 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
2139 // This should not delete the cache.
2140 entry->Doom();
2141 FlushQueueForTest();
2142 entry->Close();
2144 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry));
2145 entry->Close();
2148 // Tests handling of corrupt entries by keeping the rankings node around, with
2149 // a fatal failure.
2150 void DiskCacheBackendTest::BackendInvalidEntry7() {
2151 const int kSize = 0x3000; // 12 kB.
2152 SetMaxSize(kSize * 10);
2153 InitCache();
2155 std::string first("some key");
2156 std::string second("something else");
2157 disk_cache::Entry* entry;
2158 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2159 entry->Close();
2160 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2162 // Corrupt this entry.
2163 disk_cache::EntryImpl* entry_impl =
2164 static_cast<disk_cache::EntryImpl*>(entry);
2166 entry_impl->rankings()->Data()->next = 0;
2167 entry_impl->rankings()->Store();
2168 entry->Close();
2169 FlushQueueForTest();
2170 EXPECT_EQ(2, cache_->GetEntryCount());
2172 // This should detect the bad entry.
2173 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2174 EXPECT_EQ(1, cache_->GetEntryCount());
2176 // We should delete the cache. The list still has a corrupt node.
2177 void* iter = NULL;
2178 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2179 FlushQueueForTest();
2180 EXPECT_EQ(0, cache_->GetEntryCount());
2183 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2184 BackendInvalidEntry7();
2187 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2188 SetNewEviction();
2189 BackendInvalidEntry7();
2192 // Tests handling of corrupt entries by keeping the rankings node around, with
2193 // a non fatal failure.
2194 void DiskCacheBackendTest::BackendInvalidEntry8() {
2195 const int kSize = 0x3000; // 12 kB
2196 SetMaxSize(kSize * 10);
2197 InitCache();
2199 std::string first("some key");
2200 std::string second("something else");
2201 disk_cache::Entry* entry;
2202 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2203 entry->Close();
2204 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2206 // Corrupt this entry.
2207 disk_cache::EntryImpl* entry_impl =
2208 static_cast<disk_cache::EntryImpl*>(entry);
2210 entry_impl->rankings()->Data()->contents = 0;
2211 entry_impl->rankings()->Store();
2212 entry->Close();
2213 FlushQueueForTest();
2214 EXPECT_EQ(2, cache_->GetEntryCount());
2216 // This should detect the bad entry.
2217 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2218 EXPECT_EQ(1, cache_->GetEntryCount());
2220 // We should not delete the cache.
2221 void* iter = NULL;
2222 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2223 entry->Close();
2224 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2225 EXPECT_EQ(1, cache_->GetEntryCount());
2228 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2229 BackendInvalidEntry8();
2232 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2233 SetNewEviction();
2234 BackendInvalidEntry8();
2237 // Tests handling of corrupt entries detected by enumerations. Note that these
2238 // tests (xx9 to xx11) are basically just going though slightly different
2239 // codepaths so they are tighlty coupled with the code, but that is better than
2240 // not testing error handling code.
2241 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2242 const int kSize = 0x3000; // 12 kB.
2243 SetMaxSize(kSize * 10);
2244 InitCache();
2246 std::string first("some key");
2247 std::string second("something else");
2248 disk_cache::Entry* entry;
2249 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2250 entry->Close();
2251 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2253 // Corrupt this entry.
2254 disk_cache::EntryImpl* entry_impl =
2255 static_cast<disk_cache::EntryImpl*>(entry);
2257 entry_impl->entry()->Data()->state = 0xbad;
2258 entry_impl->entry()->Store();
2259 entry->Close();
2260 FlushQueueForTest();
2261 EXPECT_EQ(2, cache_->GetEntryCount());
2263 if (eviction) {
2264 TrimForTest(false);
2265 EXPECT_EQ(1, cache_->GetEntryCount());
2266 TrimForTest(false);
2267 EXPECT_EQ(1, cache_->GetEntryCount());
2268 } else {
2269 // We should detect the problem through the list, but we should not delete
2270 // the entry, just fail the iteration.
2271 void* iter = NULL;
2272 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2274 // Now a full iteration will work, and return one entry.
2275 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2276 entry->Close();
2277 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2279 // This should detect what's left of the bad entry.
2280 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2281 EXPECT_EQ(2, cache_->GetEntryCount());
2283 DisableIntegrityCheck();
2286 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2287 BackendInvalidEntry9(false);
2290 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2291 SetNewEviction();
2292 BackendInvalidEntry9(false);
2295 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2296 BackendInvalidEntry9(true);
2299 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2300 SetNewEviction();
2301 BackendInvalidEntry9(true);
2304 // Tests handling of corrupt entries detected by enumerations.
2305 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2306 const int kSize = 0x3000; // 12 kB.
2307 SetMaxSize(kSize * 10);
2308 SetNewEviction();
2309 InitCache();
2311 std::string first("some key");
2312 std::string second("something else");
2313 disk_cache::Entry* entry;
2314 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2315 entry->Close();
2316 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2317 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2318 entry->Close();
2319 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2321 // Corrupt this entry.
2322 disk_cache::EntryImpl* entry_impl =
2323 static_cast<disk_cache::EntryImpl*>(entry);
2325 entry_impl->entry()->Data()->state = 0xbad;
2326 entry_impl->entry()->Store();
2327 entry->Close();
2328 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2329 entry->Close();
2330 EXPECT_EQ(3, cache_->GetEntryCount());
2332 // We have:
2333 // List 0: third -> second (bad).
2334 // List 1: first.
2336 if (eviction) {
2337 // Detection order: second -> first -> third.
2338 TrimForTest(false);
2339 EXPECT_EQ(3, cache_->GetEntryCount());
2340 TrimForTest(false);
2341 EXPECT_EQ(2, cache_->GetEntryCount());
2342 TrimForTest(false);
2343 EXPECT_EQ(1, cache_->GetEntryCount());
2344 } else {
2345 // Detection order: third -> second -> first.
2346 // We should detect the problem through the list, but we should not delete
2347 // the entry.
2348 void* iter = NULL;
2349 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2350 entry->Close();
2351 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2352 EXPECT_EQ(first, entry->GetKey());
2353 entry->Close();
2354 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2356 DisableIntegrityCheck();
2359 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2360 BackendInvalidEntry10(false);
2363 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
2364 BackendInvalidEntry10(true);
2367 // Tests handling of corrupt entries detected by enumerations.
2368 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
2369 const int kSize = 0x3000; // 12 kB.
2370 SetMaxSize(kSize * 10);
2371 SetNewEviction();
2372 InitCache();
2374 std::string first("some key");
2375 std::string second("something else");
2376 disk_cache::Entry* entry;
2377 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2378 entry->Close();
2379 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2380 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2381 entry->Close();
2382 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2383 entry->Close();
2384 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
2385 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2387 // Corrupt this entry.
2388 disk_cache::EntryImpl* entry_impl =
2389 static_cast<disk_cache::EntryImpl*>(entry);
2391 entry_impl->entry()->Data()->state = 0xbad;
2392 entry_impl->entry()->Store();
2393 entry->Close();
2394 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2395 entry->Close();
2396 FlushQueueForTest();
2397 EXPECT_EQ(3, cache_->GetEntryCount());
2399 // We have:
2400 // List 0: third.
2401 // List 1: second (bad) -> first.
2403 if (eviction) {
2404 // Detection order: third -> first -> second.
2405 TrimForTest(false);
2406 EXPECT_EQ(2, cache_->GetEntryCount());
2407 TrimForTest(false);
2408 EXPECT_EQ(1, cache_->GetEntryCount());
2409 TrimForTest(false);
2410 EXPECT_EQ(1, cache_->GetEntryCount());
2411 } else {
2412 // Detection order: third -> second.
2413 // We should detect the problem through the list, but we should not delete
2414 // the entry, just fail the iteration.
2415 void* iter = NULL;
2416 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2417 entry->Close();
2418 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2420 // Now a full iteration will work, and return two entries.
2421 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2422 entry->Close();
2423 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2424 entry->Close();
2425 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2427 DisableIntegrityCheck();
2430 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
2431 BackendInvalidEntry11(false);
2434 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
2435 BackendInvalidEntry11(true);
2438 // Tests handling of corrupt entries in the middle of a long eviction run.
2439 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2440 const int kSize = 0x3000; // 12 kB
2441 SetMaxSize(kSize * 10);
2442 InitCache();
2444 std::string first("some key");
2445 std::string second("something else");
2446 disk_cache::Entry* entry;
2447 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2448 entry->Close();
2449 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2451 // Corrupt this entry.
2452 disk_cache::EntryImpl* entry_impl =
2453 static_cast<disk_cache::EntryImpl*>(entry);
2455 entry_impl->entry()->Data()->state = 0xbad;
2456 entry_impl->entry()->Store();
2457 entry->Close();
2458 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2459 entry->Close();
2460 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
2461 TrimForTest(true);
2462 EXPECT_EQ(1, cache_->GetEntryCount());
2463 entry->Close();
2464 DisableIntegrityCheck();
2467 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
2468 BackendTrimInvalidEntry12();
2471 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
2472 SetNewEviction();
2473 BackendTrimInvalidEntry12();
2476 // We want to be able to deal with messed up entries on disk.
2477 void DiskCacheBackendTest::BackendInvalidRankings2() {
2478 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2479 DisableFirstCleanup();
2480 InitCache();
2482 disk_cache::Entry *entry1, *entry2;
2483 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
2484 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2));
2485 entry2->Close();
2487 // CheckCacheIntegrity will fail at this point.
2488 DisableIntegrityCheck();
2491 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
2492 BackendInvalidRankings2();
2495 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
2496 SetNewEviction();
2497 BackendInvalidRankings2();
2500 // If the LRU is corrupt, we delete the cache.
2501 void DiskCacheBackendTest::BackendInvalidRankings() {
2502 disk_cache::Entry* entry;
2503 void* iter = NULL;
2504 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry));
2505 entry->Close();
2506 EXPECT_EQ(2, cache_->GetEntryCount());
2508 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry));
2509 FlushQueueForTest(); // Allow the restart to finish.
2510 EXPECT_EQ(0, cache_->GetEntryCount());
2513 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
2514 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2515 DisableFirstCleanup();
2516 InitCache();
2517 BackendInvalidRankings();
2520 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
2521 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2522 DisableFirstCleanup();
2523 SetNewEviction();
2524 InitCache();
2525 BackendInvalidRankings();
2528 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
2529 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2530 DisableFirstCleanup();
2531 InitCache();
2532 SetTestMode(); // Fail cache reinitialization.
2533 BackendInvalidRankings();
2536 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
2537 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2538 DisableFirstCleanup();
2539 SetNewEviction();
2540 InitCache();
2541 SetTestMode(); // Fail cache reinitialization.
2542 BackendInvalidRankings();
2545 // If the LRU is corrupt and we have open entries, we disable the cache.
2546 void DiskCacheBackendTest::BackendDisable() {
2547 disk_cache::Entry *entry1, *entry2;
2548 void* iter = NULL;
2549 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2551 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
2552 EXPECT_EQ(0, cache_->GetEntryCount());
2553 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2555 entry1->Close();
2556 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2557 FlushQueueForTest(); // This one actually allows that task to complete.
2559 EXPECT_EQ(0, cache_->GetEntryCount());
2562 TEST_F(DiskCacheBackendTest, DisableSuccess) {
2563 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2564 DisableFirstCleanup();
2565 InitCache();
2566 BackendDisable();
2569 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
2570 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2571 DisableFirstCleanup();
2572 SetNewEviction();
2573 InitCache();
2574 BackendDisable();
2577 TEST_F(DiskCacheBackendTest, DisableFailure) {
2578 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2579 DisableFirstCleanup();
2580 InitCache();
2581 SetTestMode(); // Fail cache reinitialization.
2582 BackendDisable();
2585 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
2586 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2587 DisableFirstCleanup();
2588 SetNewEviction();
2589 InitCache();
2590 SetTestMode(); // Fail cache reinitialization.
2591 BackendDisable();
2594 // This is another type of corruption on the LRU; disable the cache.
2595 void DiskCacheBackendTest::BackendDisable2() {
2596 EXPECT_EQ(8, cache_->GetEntryCount());
2598 disk_cache::Entry* entry;
2599 void* iter = NULL;
2600 int count = 0;
2601 while (OpenNextEntry(&iter, &entry) == net::OK) {
2602 ASSERT_TRUE(NULL != entry);
2603 entry->Close();
2604 count++;
2605 ASSERT_LT(count, 9);
2608 FlushQueueForTest();
2609 EXPECT_EQ(0, cache_->GetEntryCount());
2612 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
2613 ASSERT_TRUE(CopyTestCache("list_loop"));
2614 DisableFirstCleanup();
2615 InitCache();
2616 BackendDisable2();
2619 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
2620 ASSERT_TRUE(CopyTestCache("list_loop"));
2621 DisableFirstCleanup();
2622 SetNewEviction();
2623 InitCache();
2624 BackendDisable2();
2627 TEST_F(DiskCacheBackendTest, DisableFailure2) {
2628 ASSERT_TRUE(CopyTestCache("list_loop"));
2629 DisableFirstCleanup();
2630 InitCache();
2631 SetTestMode(); // Fail cache reinitialization.
2632 BackendDisable2();
2635 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
2636 ASSERT_TRUE(CopyTestCache("list_loop"));
2637 DisableFirstCleanup();
2638 SetNewEviction();
2639 InitCache();
2640 SetTestMode(); // Fail cache reinitialization.
2641 BackendDisable2();
2644 // If the index size changes when we disable the cache, we should not crash.
2645 void DiskCacheBackendTest::BackendDisable3() {
2646 disk_cache::Entry *entry1, *entry2;
2647 void* iter = NULL;
2648 EXPECT_EQ(2, cache_->GetEntryCount());
2649 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2650 entry1->Close();
2652 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry2));
2653 FlushQueueForTest();
2655 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2));
2656 entry2->Close();
2658 EXPECT_EQ(1, cache_->GetEntryCount());
2661 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
2662 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2663 DisableFirstCleanup();
2664 SetMaxSize(20 * 1024 * 1024);
2665 InitCache();
2666 BackendDisable3();
2669 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
2670 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2671 DisableFirstCleanup();
2672 SetMaxSize(20 * 1024 * 1024);
2673 SetNewEviction();
2674 InitCache();
2675 BackendDisable3();
2678 // If we disable the cache, already open entries should work as far as possible.
2679 void DiskCacheBackendTest::BackendDisable4() {
2680 disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
2681 void* iter = NULL;
2682 ASSERT_EQ(net::OK, OpenNextEntry(&iter, &entry1));
2684 char key2[2000];
2685 char key3[20000];
2686 CacheTestFillBuffer(key2, sizeof(key2), true);
2687 CacheTestFillBuffer(key3, sizeof(key3), true);
2688 key2[sizeof(key2) - 1] = '\0';
2689 key3[sizeof(key3) - 1] = '\0';
2690 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2691 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3));
2693 const int kBufSize = 20000;
2694 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
2695 memset(buf->data(), 0, kBufSize);
2696 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2697 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2699 // This line should disable the cache but not delete it.
2700 EXPECT_NE(net::OK, OpenNextEntry(&iter, &entry4));
2701 EXPECT_EQ(0, cache_->GetEntryCount());
2703 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
2705 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
2706 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2707 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
2709 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
2710 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2711 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
2713 std::string key = entry2->GetKey();
2714 EXPECT_EQ(sizeof(key2) - 1, key.size());
2715 key = entry3->GetKey();
2716 EXPECT_EQ(sizeof(key3) - 1, key.size());
2718 entry1->Close();
2719 entry2->Close();
2720 entry3->Close();
2721 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2722 FlushQueueForTest(); // This one actually allows that task to complete.
2724 EXPECT_EQ(0, cache_->GetEntryCount());
2727 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
2728 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2729 DisableFirstCleanup();
2730 InitCache();
2731 BackendDisable4();
2734 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
2735 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2736 DisableFirstCleanup();
2737 SetNewEviction();
2738 InitCache();
2739 BackendDisable4();
2742 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
2743 MessageLoopHelper helper;
2745 ASSERT_TRUE(CleanupCacheDir());
2746 scoped_ptr<disk_cache::BackendImpl> cache;
2747 cache.reset(new disk_cache::BackendImpl(
2748 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
2749 ASSERT_TRUE(NULL != cache.get());
2750 cache->SetUnitTestMode();
2751 ASSERT_EQ(net::OK, cache->SyncInit());
2753 // Wait for a callback that never comes... about 2 secs :). The message loop
2754 // has to run to allow invocation of the usage timer.
2755 helper.WaitUntilCacheIoFinished(1);
2758 TEST_F(DiskCacheBackendTest, TimerNotCreated) {
2759 ASSERT_TRUE(CopyTestCache("wrong_version"));
2761 scoped_ptr<disk_cache::BackendImpl> cache;
2762 cache.reset(new disk_cache::BackendImpl(
2763 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
2764 ASSERT_TRUE(NULL != cache.get());
2765 cache->SetUnitTestMode();
2766 ASSERT_NE(net::OK, cache->SyncInit());
2768 ASSERT_TRUE(NULL == cache->GetTimerForTest());
2770 DisableIntegrityCheck();
2773 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
2774 InitCache();
2775 disk_cache::Entry* entry;
2776 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
2777 entry->Close();
2778 FlushQueueForTest();
2780 disk_cache::StatsItems stats;
2781 cache_->GetStats(&stats);
2782 EXPECT_FALSE(stats.empty());
2784 disk_cache::StatsItems::value_type hits("Create hit", "0x1");
2785 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2787 cache_.reset();
2789 // Now open the cache and verify that the stats are still there.
2790 DisableFirstCleanup();
2791 InitCache();
2792 EXPECT_EQ(1, cache_->GetEntryCount());
2794 stats.clear();
2795 cache_->GetStats(&stats);
2796 EXPECT_FALSE(stats.empty());
2798 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2801 void DiskCacheBackendTest::BackendDoomAll() {
2802 InitCache();
2804 disk_cache::Entry *entry1, *entry2;
2805 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
2806 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
2807 entry1->Close();
2808 entry2->Close();
2810 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2811 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2813 ASSERT_EQ(4, cache_->GetEntryCount());
2814 EXPECT_EQ(net::OK, DoomAllEntries());
2815 ASSERT_EQ(0, cache_->GetEntryCount());
2817 // We should stop posting tasks at some point (if we post any).
2818 base::MessageLoop::current()->RunUntilIdle();
2820 disk_cache::Entry *entry3, *entry4;
2821 EXPECT_NE(net::OK, OpenEntry("third", &entry3));
2822 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
2823 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
2825 EXPECT_EQ(net::OK, DoomAllEntries());
2826 ASSERT_EQ(0, cache_->GetEntryCount());
2828 entry1->Close();
2829 entry2->Close();
2830 entry3->Doom(); // The entry should be already doomed, but this must work.
2831 entry3->Close();
2832 entry4->Close();
2834 // Now try with all references released.
2835 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2836 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2837 entry1->Close();
2838 entry2->Close();
2840 ASSERT_EQ(2, cache_->GetEntryCount());
2841 EXPECT_EQ(net::OK, DoomAllEntries());
2842 ASSERT_EQ(0, cache_->GetEntryCount());
2844 EXPECT_EQ(net::OK, DoomAllEntries());
2847 TEST_F(DiskCacheBackendTest, DoomAll) {
2848 BackendDoomAll();
2851 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
2852 SetNewEviction();
2853 BackendDoomAll();
2856 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
2857 SetMemoryOnlyMode();
2858 BackendDoomAll();
2861 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
2862 SetCacheType(net::APP_CACHE);
2863 BackendDoomAll();
2866 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
2867 SetCacheType(net::SHADER_CACHE);
2868 BackendDoomAll();
2871 // If the index size changes when we doom the cache, we should not crash.
2872 void DiskCacheBackendTest::BackendDoomAll2() {
2873 EXPECT_EQ(2, cache_->GetEntryCount());
2874 EXPECT_EQ(net::OK, DoomAllEntries());
2876 disk_cache::Entry* entry;
2877 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry));
2878 entry->Close();
2880 EXPECT_EQ(1, cache_->GetEntryCount());
2883 TEST_F(DiskCacheBackendTest, DoomAll2) {
2884 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2885 DisableFirstCleanup();
2886 SetMaxSize(20 * 1024 * 1024);
2887 InitCache();
2888 BackendDoomAll2();
2891 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
2892 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2893 DisableFirstCleanup();
2894 SetMaxSize(20 * 1024 * 1024);
2895 SetNewEviction();
2896 InitCache();
2897 BackendDoomAll2();
2900 // We should be able to create the same entry on multiple simultaneous instances
2901 // of the cache.
2902 TEST_F(DiskCacheTest, MultipleInstances) {
2903 base::ScopedTempDir store1, store2;
2904 ASSERT_TRUE(store1.CreateUniqueTempDir());
2905 ASSERT_TRUE(store2.CreateUniqueTempDir());
2907 base::Thread cache_thread("CacheThread");
2908 ASSERT_TRUE(cache_thread.StartWithOptions(
2909 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
2910 net::TestCompletionCallback cb;
2912 const int kNumberOfCaches = 2;
2913 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
2915 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
2916 net::CACHE_BACKEND_DEFAULT,
2917 store1.path(),
2919 false,
2920 cache_thread.task_runner(),
2921 NULL,
2922 &cache[0],
2923 cb.callback());
2924 ASSERT_EQ(net::OK, cb.GetResult(rv));
2925 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE,
2926 net::CACHE_BACKEND_DEFAULT,
2927 store2.path(),
2929 false,
2930 cache_thread.task_runner(),
2931 NULL,
2932 &cache[1],
2933 cb.callback());
2934 ASSERT_EQ(net::OK, cb.GetResult(rv));
2936 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
2938 std::string key("the first key");
2939 disk_cache::Entry* entry;
2940 for (int i = 0; i < kNumberOfCaches; i++) {
2941 rv = cache[i]->CreateEntry(key, &entry, cb.callback());
2942 ASSERT_EQ(net::OK, cb.GetResult(rv));
2943 entry->Close();
2947 // Test the six regions of the curve that determines the max cache size.
2948 TEST_F(DiskCacheTest, AutomaticMaxSize) {
2949 using disk_cache::kDefaultCacheSize;
2950 int64 large_size = kDefaultCacheSize;
2952 // Region 1: expected = available * 0.8
2953 EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
2954 disk_cache::PreferredCacheSize(large_size - 1));
2955 EXPECT_EQ(kDefaultCacheSize * 8 / 10,
2956 disk_cache::PreferredCacheSize(large_size));
2957 EXPECT_EQ(kDefaultCacheSize - 1,
2958 disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
2960 // Region 2: expected = default_size
2961 EXPECT_EQ(kDefaultCacheSize,
2962 disk_cache::PreferredCacheSize(large_size * 10 / 8));
2963 EXPECT_EQ(kDefaultCacheSize,
2964 disk_cache::PreferredCacheSize(large_size * 10 - 1));
2966 // Region 3: expected = available * 0.1
2967 EXPECT_EQ(kDefaultCacheSize,
2968 disk_cache::PreferredCacheSize(large_size * 10));
2969 EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
2970 disk_cache::PreferredCacheSize(large_size * 25 - 1));
2972 // Region 4: expected = default_size * 2.5
2973 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2974 disk_cache::PreferredCacheSize(large_size * 25));
2975 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2976 disk_cache::PreferredCacheSize(large_size * 100 - 1));
2977 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2978 disk_cache::PreferredCacheSize(large_size * 100));
2979 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2980 disk_cache::PreferredCacheSize(large_size * 250 - 1));
2982 // Region 5: expected = available * 0.1
2983 int64 largest_size = kDefaultCacheSize * 4;
2984 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2985 disk_cache::PreferredCacheSize(large_size * 250));
2986 EXPECT_EQ(largest_size - 1,
2987 disk_cache::PreferredCacheSize(largest_size * 100 - 1));
2989 // Region 6: expected = largest possible size
2990 EXPECT_EQ(largest_size,
2991 disk_cache::PreferredCacheSize(largest_size * 100));
2992 EXPECT_EQ(largest_size,
2993 disk_cache::PreferredCacheSize(largest_size * 10000));
2996 // Tests that we can "migrate" a running instance from one experiment group to
2997 // another.
2998 TEST_F(DiskCacheBackendTest, Histograms) {
2999 InitCache();
3000 disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro.
3002 for (int i = 1; i < 3; i++) {
3003 CACHE_UMA(HOURS, "FillupTime", i, 28);
3007 // Make sure that we keep the total memory used by the internal buffers under
3008 // control.
3009 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
3010 InitCache();
3011 std::string key("the first key");
3012 disk_cache::Entry* entry;
3013 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3015 const int kSize = 200;
3016 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3017 CacheTestFillBuffer(buffer->data(), kSize, true);
3019 for (int i = 0; i < 10; i++) {
3020 SCOPED_TRACE(i);
3021 // Allocate 2MB for this entry.
3022 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
3023 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
3024 EXPECT_EQ(kSize,
3025 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
3026 EXPECT_EQ(kSize,
3027 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
3029 // Delete one of the buffers and truncate the other.
3030 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
3031 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
3033 // Delete the second buffer, writing 10 bytes to disk.
3034 entry->Close();
3035 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3038 entry->Close();
3039 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
3042 // This test assumes at least 150MB of system memory.
3043 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
3044 InitCache();
3046 const int kOneMB = 1024 * 1024;
3047 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3048 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3050 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3051 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3053 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3054 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3056 cache_impl_->BufferDeleted(kOneMB);
3057 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3059 // Check the upper limit.
3060 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3062 for (int i = 0; i < 30; i++)
3063 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
3065 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3068 // Tests that sharing of external files works and we are able to delete the
3069 // files when we need to.
3070 TEST_F(DiskCacheBackendTest, FileSharing) {
3071 InitCache();
3073 disk_cache::Addr address(0x80000001);
3074 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3075 base::FilePath name = cache_impl_->GetFileName(address);
3077 scoped_refptr<disk_cache::File> file(new disk_cache::File(false));
3078 file->Init(name);
3080 #if defined(OS_WIN)
3081 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3082 DWORD access = GENERIC_READ | GENERIC_WRITE;
3083 base::win::ScopedHandle file2(CreateFile(
3084 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
3085 EXPECT_FALSE(file2.IsValid());
3087 sharing |= FILE_SHARE_DELETE;
3088 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL,
3089 OPEN_EXISTING, 0, NULL));
3090 EXPECT_TRUE(file2.IsValid());
3091 #endif
3093 EXPECT_TRUE(base::DeleteFile(name, false));
3095 // We should be able to use the file.
3096 const int kSize = 200;
3097 char buffer1[kSize];
3098 char buffer2[kSize];
3099 memset(buffer1, 't', kSize);
3100 memset(buffer2, 0, kSize);
3101 EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3102 EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3103 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3105 EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
3108 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3109 InitCache();
3111 disk_cache::Entry* entry;
3113 for (int i = 0; i < 2; ++i) {
3114 std::string key = base::StringPrintf("key%d", i);
3115 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3116 entry->Close();
3119 // Ping the oldest entry.
3120 cache_->OnExternalCacheHit("key0");
3122 TrimForTest(false);
3124 // Make sure the older key remains.
3125 EXPECT_EQ(1, cache_->GetEntryCount());
3126 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3127 entry->Close();
3130 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3131 SetCacheType(net::SHADER_CACHE);
3132 InitCache();
3134 disk_cache::Entry* entry;
3136 for (int i = 0; i < 2; ++i) {
3137 std::string key = base::StringPrintf("key%d", i);
3138 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3139 entry->Close();
3142 // Ping the oldest entry.
3143 cache_->OnExternalCacheHit("key0");
3145 TrimForTest(false);
3147 // Make sure the older key remains.
3148 EXPECT_EQ(1, cache_->GetEntryCount());
3149 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3150 entry->Close();
3153 void DiskCacheBackendTest::TracingBackendBasics() {
3154 InitCache();
3155 cache_.reset(new disk_cache::TracingCacheBackend(cache_.Pass()));
3156 cache_impl_ = NULL;
3157 EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
3158 if (!simple_cache_mode_) {
3159 EXPECT_EQ(0, cache_->GetEntryCount());
3162 net::TestCompletionCallback cb;
3163 disk_cache::Entry* entry = NULL;
3164 EXPECT_NE(net::OK, OpenEntry("key", &entry));
3165 EXPECT_TRUE(NULL == entry);
3167 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3168 EXPECT_TRUE(NULL != entry);
3170 disk_cache::Entry* same_entry = NULL;
3171 ASSERT_EQ(net::OK, OpenEntry("key", &same_entry));
3172 EXPECT_TRUE(NULL != same_entry);
3174 if (!simple_cache_mode_) {
3175 EXPECT_EQ(1, cache_->GetEntryCount());
3177 entry->Close();
3178 entry = NULL;
3179 same_entry->Close();
3180 same_entry = NULL;
3183 TEST_F(DiskCacheBackendTest, TracingBackendBasics) {
3184 TracingBackendBasics();
3187 // The Simple Cache backend requires a few guarantees from the filesystem like
3188 // atomic renaming of recently open files. Those guarantees are not provided in
3189 // general on Windows.
3190 #if defined(OS_POSIX)
3192 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3193 SetCacheType(net::APP_CACHE);
3194 SetSimpleCacheMode();
3195 BackendShutdownWithPendingCreate(false);
3198 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3199 SetCacheType(net::APP_CACHE);
3200 SetSimpleCacheMode();
3201 BackendShutdownWithPendingFileIO(false);
3204 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3205 SetSimpleCacheMode();
3206 BackendBasics();
3209 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3210 SetCacheType(net::APP_CACHE);
3211 SetSimpleCacheMode();
3212 BackendBasics();
3215 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3216 SetSimpleCacheMode();
3217 BackendKeying();
3220 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3221 SetSimpleCacheMode();
3222 SetCacheType(net::APP_CACHE);
3223 BackendKeying();
3226 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) {
3227 SetSimpleCacheMode();
3228 BackendSetSize();
3231 // MacOS has a default open file limit of 256 files, which is incompatible with
3232 // this simple cache test.
3233 #if defined(OS_MACOSX)
3234 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3235 #else
3236 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3237 #endif
3239 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) {
3240 SetMaxSize(0x100000);
3241 SetSimpleCacheMode();
3242 BackendLoad();
3245 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) {
3246 SetCacheType(net::APP_CACHE);
3247 SetSimpleCacheMode();
3248 SetMaxSize(0x100000);
3249 BackendLoad();
3252 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
3253 SetSimpleCacheMode();
3254 BackendDoomRecent();
3257 // crbug.com/330926, crbug.com/370677
3258 TEST_F(DiskCacheBackendTest, DISABLED_SimpleDoomBetween) {
3259 SetSimpleCacheMode();
3260 BackendDoomBetween();
3263 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
3264 SetSimpleCacheMode();
3265 BackendDoomAll();
3268 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
3269 SetCacheType(net::APP_CACHE);
3270 SetSimpleCacheMode();
3271 BackendDoomAll();
3274 TEST_F(DiskCacheBackendTest, SimpleCacheTracingBackendBasics) {
3275 SetSimpleCacheMode();
3276 TracingBackendBasics();
3277 // TODO(pasko): implement integrity checking on the Simple Backend.
3278 DisableIntegrityCheck();
3281 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
3282 SetSimpleCacheMode();
3283 InitCache();
3285 const char* key = "the first key";
3286 disk_cache::Entry* entry = NULL;
3288 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3289 ASSERT_TRUE(entry != NULL);
3290 entry->Close();
3291 entry = NULL;
3293 // To make sure the file creation completed we need to call open again so that
3294 // we block until it actually created the files.
3295 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3296 ASSERT_TRUE(entry != NULL);
3297 entry->Close();
3298 entry = NULL;
3300 // Delete one of the files in the entry.
3301 base::FilePath to_delete_file = cache_path_.AppendASCII(
3302 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3303 EXPECT_TRUE(base::PathExists(to_delete_file));
3304 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
3306 // Failing to open the entry should delete the rest of these files.
3307 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3309 // Confirm the rest of the files are gone.
3310 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
3311 base::FilePath should_be_gone_file(cache_path_.AppendASCII(
3312 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
3313 EXPECT_FALSE(base::PathExists(should_be_gone_file));
3317 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
3318 SetSimpleCacheMode();
3319 InitCache();
3321 const char* key = "the first key";
3322 disk_cache::Entry* entry = NULL;
3324 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3325 disk_cache::Entry* null = NULL;
3326 ASSERT_NE(null, entry);
3327 entry->Close();
3328 entry = NULL;
3330 // To make sure the file creation completed we need to call open again so that
3331 // we block until it actually created the files.
3332 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3333 ASSERT_NE(null, entry);
3334 entry->Close();
3335 entry = NULL;
3337 // Write an invalid header for stream 0 and stream 1.
3338 base::FilePath entry_file1_path = cache_path_.AppendASCII(
3339 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3341 disk_cache::SimpleFileHeader header;
3342 header.initial_magic_number = GG_UINT64_C(0xbadf00d);
3343 EXPECT_EQ(
3344 implicit_cast<int>(sizeof(header)),
3345 base::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
3346 sizeof(header)));
3347 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3350 // Tests that the Simple Cache Backend fails to initialize with non-matching
3351 // file structure on disk.
3352 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
3353 // Create a cache structure with the |BackendImpl|.
3354 InitCache();
3355 disk_cache::Entry* entry;
3356 const int kSize = 50;
3357 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3358 CacheTestFillBuffer(buffer->data(), kSize, false);
3359 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3360 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3361 entry->Close();
3362 cache_.reset();
3364 // Check that the |SimpleBackendImpl| does not favor this structure.
3365 base::Thread cache_thread("CacheThread");
3366 ASSERT_TRUE(cache_thread.StartWithOptions(
3367 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3368 disk_cache::SimpleBackendImpl* simple_cache =
3369 new disk_cache::SimpleBackendImpl(
3370 cache_path_, 0, net::DISK_CACHE, cache_thread.task_runner(), NULL);
3371 net::TestCompletionCallback cb;
3372 int rv = simple_cache->Init(cb.callback());
3373 EXPECT_NE(net::OK, cb.GetResult(rv));
3374 delete simple_cache;
3375 DisableIntegrityCheck();
3378 // Tests that the |BackendImpl| refuses to initialize on top of the files
3379 // generated by the Simple Cache Backend.
3380 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
3381 // Create a cache structure with the |SimpleBackendImpl|.
3382 SetSimpleCacheMode();
3383 InitCache();
3384 disk_cache::Entry* entry;
3385 const int kSize = 50;
3386 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3387 CacheTestFillBuffer(buffer->data(), kSize, false);
3388 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3389 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3390 entry->Close();
3391 cache_.reset();
3393 // Check that the |BackendImpl| does not favor this structure.
3394 base::Thread cache_thread("CacheThread");
3395 ASSERT_TRUE(cache_thread.StartWithOptions(
3396 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3397 disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(
3398 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL);
3399 cache->SetUnitTestMode();
3400 net::TestCompletionCallback cb;
3401 int rv = cache->Init(cb.callback());
3402 EXPECT_NE(net::OK, cb.GetResult(rv));
3403 delete cache;
3404 DisableIntegrityCheck();
3407 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
3408 SetSimpleCacheMode();
3409 BackendFixEnumerators();
3412 // Tests basic functionality of the SimpleBackend implementation of the
3413 // enumeration API.
3414 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
3415 SetSimpleCacheMode();
3416 InitCache();
3417 std::set<std::string> key_pool;
3418 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3420 // Check that enumeration returns all entries.
3421 std::set<std::string> keys_to_match(key_pool);
3422 void* iter = NULL;
3423 size_t count = 0;
3424 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3425 cache_->EndEnumeration(&iter);
3426 EXPECT_EQ(key_pool.size(), count);
3427 EXPECT_TRUE(keys_to_match.empty());
3429 // Check that opening entries does not affect enumeration.
3430 keys_to_match = key_pool;
3431 iter = NULL;
3432 count = 0;
3433 disk_cache::Entry* entry_opened_before;
3434 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
3435 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3436 &iter,
3437 &keys_to_match,
3438 &count));
3440 disk_cache::Entry* entry_opened_middle;
3441 ASSERT_EQ(net::OK,
3442 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
3443 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3444 cache_->EndEnumeration(&iter);
3445 entry_opened_before->Close();
3446 entry_opened_middle->Close();
3448 EXPECT_EQ(key_pool.size(), count);
3449 EXPECT_TRUE(keys_to_match.empty());
3452 // Tests that the enumerations are not affected by dooming an entry in the
3453 // middle.
3454 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
3455 SetSimpleCacheMode();
3456 InitCache();
3457 std::set<std::string> key_pool;
3458 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3460 // Check that enumeration returns all entries but the doomed one.
3461 std::set<std::string> keys_to_match(key_pool);
3462 void* iter = NULL;
3463 size_t count = 0;
3464 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3465 &iter,
3466 &keys_to_match,
3467 &count));
3469 std::string key_to_delete = *(keys_to_match.begin());
3470 DoomEntry(key_to_delete);
3471 keys_to_match.erase(key_to_delete);
3472 key_pool.erase(key_to_delete);
3473 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3474 cache_->EndEnumeration(&iter);
3476 EXPECT_EQ(key_pool.size(), count);
3477 EXPECT_TRUE(keys_to_match.empty());
3480 // Tests that enumerations are not affected by corrupt files.
3481 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
3482 SetSimpleCacheMode();
3483 InitCache();
3484 std::set<std::string> key_pool;
3485 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3487 // Create a corrupt entry. The write/read sequence ensures that the entry will
3488 // have been created before corrupting the platform files, in the case of
3489 // optimistic operations.
3490 const std::string key = "the key";
3491 disk_cache::Entry* corrupted_entry;
3493 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
3494 ASSERT_TRUE(corrupted_entry);
3495 const int kSize = 50;
3496 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3497 CacheTestFillBuffer(buffer->data(), kSize, false);
3498 ASSERT_EQ(kSize,
3499 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
3500 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
3501 corrupted_entry->Close();
3503 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3504 key, cache_path_));
3505 EXPECT_EQ(key_pool.size() + 1,
3506 implicit_cast<size_t>(cache_->GetEntryCount()));
3508 // Check that enumeration returns all entries but the corrupt one.
3509 std::set<std::string> keys_to_match(key_pool);
3510 void* iter = NULL;
3511 size_t count = 0;
3512 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter, &keys_to_match, &count));
3513 cache_->EndEnumeration(&iter);
3515 EXPECT_EQ(key_pool.size(), count);
3516 EXPECT_TRUE(keys_to_match.empty());
3519 #endif // defined(OS_POSIX)