Returning a GATT connection object for already connected devices.
[chromium-blink-merge.git] / net / disk_cache / backend_unittest.cc
blobba8d101c32ef870b7e173694107465d6ab1a56c7
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/files/file_util.h"
7 #include "base/metrics/field_trial.h"
8 #include "base/port.h"
9 #include "base/run_loop.h"
10 #include "base/strings/string_split.h"
11 #include "base/strings/string_util.h"
12 #include "base/strings/stringprintf.h"
13 #include "base/test/mock_entropy_provider.h"
14 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
15 #include "base/thread_task_runner_handle.h"
16 #include "base/threading/platform_thread.h"
17 #include "base/threading/thread_restrictions.h"
18 #include "net/base/cache_type.h"
19 #include "net/base/io_buffer.h"
20 #include "net/base/net_errors.h"
21 #include "net/base/test_completion_callback.h"
22 #include "net/disk_cache/blockfile/backend_impl.h"
23 #include "net/disk_cache/blockfile/entry_impl.h"
24 #include "net/disk_cache/blockfile/experiments.h"
25 #include "net/disk_cache/blockfile/histogram_macros.h"
26 #include "net/disk_cache/blockfile/mapped_file.h"
27 #include "net/disk_cache/cache_util.h"
28 #include "net/disk_cache/disk_cache_test_base.h"
29 #include "net/disk_cache/disk_cache_test_util.h"
30 #include "net/disk_cache/memory/mem_backend_impl.h"
31 #include "net/disk_cache/simple/simple_backend_impl.h"
32 #include "net/disk_cache/simple/simple_entry_format.h"
33 #include "net/disk_cache/simple/simple_test_util.h"
34 #include "net/disk_cache/simple/simple_util.h"
35 #include "testing/gtest/include/gtest/gtest.h"
37 #if defined(OS_WIN)
38 #include "base/win/scoped_handle.h"
39 #endif
41 // Provide a BackendImpl object to macros from histogram_macros.h.
42 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
44 using base::Time;
46 namespace {
48 const char kExistingEntryKey[] = "existing entry key";
50 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
51 const base::Thread& cache_thread,
52 base::FilePath& cache_path) {
53 net::TestCompletionCallback cb;
55 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
56 cache_path, cache_thread.message_loop_proxy(), NULL));
57 int rv = cache->Init(cb.callback());
58 if (cb.GetResult(rv) != net::OK)
59 return scoped_ptr<disk_cache::BackendImpl>();
61 disk_cache::Entry* entry = NULL;
62 rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
63 if (cb.GetResult(rv) != net::OK)
64 return scoped_ptr<disk_cache::BackendImpl>();
65 entry->Close();
67 return cache.Pass();
70 } // namespace
72 // Tests that can run with different types of caches.
73 class DiskCacheBackendTest : public DiskCacheTestWithCache {
74 protected:
75 // Some utility methods:
77 // Perform IO operations on the cache until there is pending IO.
78 int GeneratePendingIO(net::TestCompletionCallback* cb);
80 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
81 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
82 // There are 4 entries after doomed_start and 2 after doomed_end.
83 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
85 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
86 bool EnumerateAndMatchKeys(int max_to_open,
87 TestIterator* iter,
88 std::set<std::string>* keys_to_match,
89 size_t* count);
91 // Actual tests:
92 void BackendBasics();
93 void BackendKeying();
94 void BackendShutdownWithPendingFileIO(bool fast);
95 void BackendShutdownWithPendingIO(bool fast);
96 void BackendShutdownWithPendingCreate(bool fast);
97 void BackendSetSize();
98 void BackendLoad();
99 void BackendChain();
100 void BackendValidEntry();
101 void BackendInvalidEntry();
102 void BackendInvalidEntryRead();
103 void BackendInvalidEntryWithLoad();
104 void BackendTrimInvalidEntry();
105 void BackendTrimInvalidEntry2();
106 void BackendEnumerations();
107 void BackendEnumerations2();
108 void BackendInvalidEntryEnumeration();
109 void BackendFixEnumerators();
110 void BackendDoomRecent();
111 void BackendDoomBetween();
112 void BackendTransaction(const std::string& name, int num_entries, bool load);
113 void BackendRecoverInsert();
114 void BackendRecoverRemove();
115 void BackendRecoverWithEviction();
116 void BackendInvalidEntry2();
117 void BackendInvalidEntry3();
118 void BackendInvalidEntry7();
119 void BackendInvalidEntry8();
120 void BackendInvalidEntry9(bool eviction);
121 void BackendInvalidEntry10(bool eviction);
122 void BackendInvalidEntry11(bool eviction);
123 void BackendTrimInvalidEntry12();
124 void BackendDoomAll();
125 void BackendDoomAll2();
126 void BackendInvalidRankings();
127 void BackendInvalidRankings2();
128 void BackendDisable();
129 void BackendDisable2();
130 void BackendDisable3();
131 void BackendDisable4();
132 void BackendDisabledAPI();
135 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
136 if (!use_current_thread_) {
137 ADD_FAILURE();
138 return net::ERR_FAILED;
141 disk_cache::Entry* entry;
142 int rv = cache_->CreateEntry("some key", &entry, cb->callback());
143 if (cb->GetResult(rv) != net::OK)
144 return net::ERR_CACHE_CREATE_FAILURE;
146 const int kSize = 25000;
147 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
148 CacheTestFillBuffer(buffer->data(), kSize, false);
150 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
151 // We are using the current thread as the cache thread because we want to
152 // be able to call directly this method to make sure that the OS (instead
153 // of us switching thread) is returning IO pending.
154 if (!simple_cache_mode_) {
155 rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
156 0, i, buffer.get(), kSize, cb->callback(), false);
157 } else {
158 rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
161 if (rv == net::ERR_IO_PENDING)
162 break;
163 if (rv != kSize)
164 rv = net::ERR_FAILED;
167 // Don't call Close() to avoid going through the queue or we'll deadlock
168 // waiting for the operation to finish.
169 if (!simple_cache_mode_)
170 static_cast<disk_cache::EntryImpl*>(entry)->Release();
171 else
172 entry->Close();
174 return rv;
177 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
178 base::Time* doomed_end) {
179 InitCache();
181 const int kSize = 50;
182 // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
183 const int kOffset = 10 + 1024 * 1024;
185 disk_cache::Entry* entry0 = NULL;
186 disk_cache::Entry* entry1 = NULL;
187 disk_cache::Entry* entry2 = NULL;
189 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
190 CacheTestFillBuffer(buffer->data(), kSize, false);
192 ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
193 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
194 ASSERT_EQ(kSize,
195 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
196 entry0->Close();
198 FlushQueueForTest();
199 AddDelay();
200 if (doomed_start)
201 *doomed_start = base::Time::Now();
203 // Order in rankings list:
204 // first_part1, first_part2, second_part1, second_part2
205 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
206 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
207 ASSERT_EQ(kSize,
208 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
209 entry1->Close();
211 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
212 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
213 ASSERT_EQ(kSize,
214 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
215 entry2->Close();
217 FlushQueueForTest();
218 AddDelay();
219 if (doomed_end)
220 *doomed_end = base::Time::Now();
222 // Order in rankings list:
223 // third_part1, fourth_part1, third_part2, fourth_part2
224 disk_cache::Entry* entry3 = NULL;
225 disk_cache::Entry* entry4 = NULL;
226 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
227 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
228 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
229 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
230 ASSERT_EQ(kSize,
231 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
232 ASSERT_EQ(kSize,
233 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
234 entry3->Close();
235 entry4->Close();
237 FlushQueueForTest();
238 AddDelay();
241 // Creates entries based on random keys. Stores these keys in |key_pool|.
242 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
243 std::set<std::string>* key_pool) {
244 const int kNumEntries = 10;
246 for (int i = 0; i < kNumEntries; ++i) {
247 std::string key = GenerateKey(true);
248 disk_cache::Entry* entry;
249 if (CreateEntry(key, &entry) != net::OK)
250 return false;
251 key_pool->insert(key);
252 entry->Close();
254 return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
257 // Performs iteration over the backend and checks that the keys of entries
258 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
259 // will be opened, if it is positive. Otherwise, iteration will continue until
260 // OpenNextEntry stops returning net::OK.
261 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
262 int max_to_open,
263 TestIterator* iter,
264 std::set<std::string>* keys_to_match,
265 size_t* count) {
266 disk_cache::Entry* entry;
268 if (!iter)
269 return false;
270 while (iter->OpenNextEntry(&entry) == net::OK) {
271 if (!entry)
272 return false;
273 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
274 entry->Close();
275 ++(*count);
276 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
277 break;
280 return true;
283 void DiskCacheBackendTest::BackendBasics() {
284 InitCache();
285 disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
286 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
287 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
288 ASSERT_TRUE(NULL != entry1);
289 entry1->Close();
290 entry1 = NULL;
292 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
293 ASSERT_TRUE(NULL != entry1);
294 entry1->Close();
295 entry1 = NULL;
297 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
298 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
299 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
300 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
301 ASSERT_TRUE(NULL != entry1);
302 ASSERT_TRUE(NULL != entry2);
303 EXPECT_EQ(2, cache_->GetEntryCount());
305 disk_cache::Entry* entry3 = NULL;
306 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3));
307 ASSERT_TRUE(NULL != entry3);
308 EXPECT_TRUE(entry2 == entry3);
309 EXPECT_EQ(2, cache_->GetEntryCount());
311 EXPECT_EQ(net::OK, DoomEntry("some other key"));
312 EXPECT_EQ(1, cache_->GetEntryCount());
313 entry1->Close();
314 entry2->Close();
315 entry3->Close();
317 EXPECT_EQ(net::OK, DoomEntry("the first key"));
318 EXPECT_EQ(0, cache_->GetEntryCount());
320 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
321 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
322 entry1->Doom();
323 entry1->Close();
324 EXPECT_EQ(net::OK, DoomEntry("some other key"));
325 EXPECT_EQ(0, cache_->GetEntryCount());
326 entry2->Close();
329 TEST_F(DiskCacheBackendTest, Basics) {
330 BackendBasics();
333 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
334 SetNewEviction();
335 BackendBasics();
338 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
339 SetMemoryOnlyMode();
340 BackendBasics();
343 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
344 SetCacheType(net::APP_CACHE);
345 BackendBasics();
348 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
349 SetCacheType(net::SHADER_CACHE);
350 BackendBasics();
353 void DiskCacheBackendTest::BackendKeying() {
354 InitCache();
355 const char kName1[] = "the first key";
356 const char kName2[] = "the first Key";
357 disk_cache::Entry *entry1, *entry2;
358 ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1));
360 ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2));
361 EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
362 entry2->Close();
364 char buffer[30];
365 base::strlcpy(buffer, kName1, arraysize(buffer));
366 ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2));
367 EXPECT_TRUE(entry1 == entry2);
368 entry2->Close();
370 base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
371 ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2));
372 EXPECT_TRUE(entry1 == entry2);
373 entry2->Close();
375 base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3);
376 ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2));
377 EXPECT_TRUE(entry1 == entry2);
378 entry2->Close();
380 // Now verify long keys.
381 char buffer2[20000];
382 memset(buffer2, 's', sizeof(buffer2));
383 buffer2[1023] = '\0';
384 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
385 entry2->Close();
387 buffer2[1023] = 'g';
388 buffer2[19999] = '\0';
389 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
390 entry2->Close();
391 entry1->Close();
394 TEST_F(DiskCacheBackendTest, Keying) {
395 BackendKeying();
398 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
399 SetNewEviction();
400 BackendKeying();
403 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
404 SetMemoryOnlyMode();
405 BackendKeying();
408 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
409 SetCacheType(net::APP_CACHE);
410 BackendKeying();
413 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
414 SetCacheType(net::SHADER_CACHE);
415 BackendKeying();
418 TEST_F(DiskCacheTest, CreateBackend) {
419 net::TestCompletionCallback cb;
422 ASSERT_TRUE(CleanupCacheDir());
423 base::Thread cache_thread("CacheThread");
424 ASSERT_TRUE(cache_thread.StartWithOptions(
425 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
427 // Test the private factory method(s).
428 scoped_ptr<disk_cache::Backend> cache;
429 cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
430 ASSERT_TRUE(cache.get());
431 cache.reset();
433 // Now test the public API.
434 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
435 net::CACHE_BACKEND_DEFAULT,
436 cache_path_,
438 false,
439 cache_thread.task_runner(),
440 NULL,
441 &cache,
442 cb.callback());
443 ASSERT_EQ(net::OK, cb.GetResult(rv));
444 ASSERT_TRUE(cache.get());
445 cache.reset();
447 rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
448 net::CACHE_BACKEND_DEFAULT,
449 base::FilePath(), 0,
450 false, NULL, NULL, &cache,
451 cb.callback());
452 ASSERT_EQ(net::OK, cb.GetResult(rv));
453 ASSERT_TRUE(cache.get());
454 cache.reset();
457 base::MessageLoop::current()->RunUntilIdle();
460 // Tests that |BackendImpl| fails to initialize with a missing file.
461 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
462 ASSERT_TRUE(CopyTestCache("bad_entry"));
463 base::FilePath filename = cache_path_.AppendASCII("data_1");
464 base::DeleteFile(filename, false);
465 base::Thread cache_thread("CacheThread");
466 ASSERT_TRUE(cache_thread.StartWithOptions(
467 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
468 net::TestCompletionCallback cb;
470 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
471 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
472 cache_path_, cache_thread.task_runner(), NULL));
473 int rv = cache->Init(cb.callback());
474 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
475 base::ThreadRestrictions::SetIOAllowed(prev);
477 cache.reset();
478 DisableIntegrityCheck();
481 TEST_F(DiskCacheBackendTest, ExternalFiles) {
482 InitCache();
483 // First, let's create a file on the folder.
484 base::FilePath filename = cache_path_.AppendASCII("f_000001");
486 const int kSize = 50;
487 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
488 CacheTestFillBuffer(buffer1->data(), kSize, false);
489 ASSERT_EQ(kSize, base::WriteFile(filename, buffer1->data(), kSize));
491 // Now let's create a file with the cache.
492 disk_cache::Entry* entry;
493 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
494 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
495 entry->Close();
497 // And verify that the first file is still there.
498 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
499 ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
500 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
503 // Tests that we deal with file-level pending operations at destruction time.
504 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
505 ASSERT_TRUE(CleanupCacheDir());
506 uint32 flags = disk_cache::kNoBuffering;
507 if (!fast)
508 flags |= disk_cache::kNoRandom;
510 UseCurrentThread();
511 CreateBackend(flags, NULL);
513 net::TestCompletionCallback cb;
514 int rv = GeneratePendingIO(&cb);
516 // The cache destructor will see one pending operation here.
517 cache_.reset();
519 if (rv == net::ERR_IO_PENDING) {
520 if (fast || simple_cache_mode_)
521 EXPECT_FALSE(cb.have_result());
522 else
523 EXPECT_TRUE(cb.have_result());
526 base::MessageLoop::current()->RunUntilIdle();
528 #if !defined(OS_IOS)
529 // Wait for the actual operation to complete, or we'll keep a file handle that
530 // may cause issues later. Note that on iOS systems even though this test
531 // uses a single thread, the actual IO is posted to a worker thread and the
532 // cache destructor breaks the link to reach cb when the operation completes.
533 rv = cb.GetResult(rv);
534 #endif
537 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
538 BackendShutdownWithPendingFileIO(false);
541 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
542 // builds because they contain a lot of intentional memory leaks.
543 // The wrapper scripts used to run tests under Valgrind Memcheck will also
544 // disable these tests. See:
545 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
546 #if !defined(LEAK_SANITIZER)
547 // We'll be leaking from this test.
548 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
549 // The integrity test sets kNoRandom so there's a version mismatch if we don't
550 // force new eviction.
551 SetNewEviction();
552 BackendShutdownWithPendingFileIO(true);
554 #endif
556 // See crbug.com/330074
557 #if !defined(OS_IOS)
558 // Tests that one cache instance is not affected by another one going away.
559 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
560 base::ScopedTempDir store;
561 ASSERT_TRUE(store.CreateUniqueTempDir());
563 net::TestCompletionCallback cb;
564 scoped_ptr<disk_cache::Backend> extra_cache;
565 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
566 net::CACHE_BACKEND_DEFAULT,
567 store.path(),
569 false,
570 base::ThreadTaskRunnerHandle::Get(),
571 NULL,
572 &extra_cache,
573 cb.callback());
574 ASSERT_EQ(net::OK, cb.GetResult(rv));
575 ASSERT_TRUE(extra_cache.get() != NULL);
577 ASSERT_TRUE(CleanupCacheDir());
578 SetNewEviction(); // Match the expected behavior for integrity verification.
579 UseCurrentThread();
581 CreateBackend(disk_cache::kNoBuffering, NULL);
582 rv = GeneratePendingIO(&cb);
584 // cache_ has a pending operation, and extra_cache will go away.
585 extra_cache.reset();
587 if (rv == net::ERR_IO_PENDING)
588 EXPECT_FALSE(cb.have_result());
590 base::MessageLoop::current()->RunUntilIdle();
592 // Wait for the actual operation to complete, or we'll keep a file handle that
593 // may cause issues later.
594 rv = cb.GetResult(rv);
596 #endif
598 // Tests that we deal with background-thread pending operations.
599 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
600 net::TestCompletionCallback cb;
603 ASSERT_TRUE(CleanupCacheDir());
604 base::Thread cache_thread("CacheThread");
605 ASSERT_TRUE(cache_thread.StartWithOptions(
606 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
608 uint32 flags = disk_cache::kNoBuffering;
609 if (!fast)
610 flags |= disk_cache::kNoRandom;
612 CreateBackend(flags, &cache_thread);
614 disk_cache::Entry* entry;
615 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
616 ASSERT_EQ(net::OK, cb.GetResult(rv));
618 entry->Close();
620 // The cache destructor will see one pending operation here.
621 cache_.reset();
624 base::MessageLoop::current()->RunUntilIdle();
627 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
628 BackendShutdownWithPendingIO(false);
631 #if !defined(LEAK_SANITIZER)
632 // We'll be leaking from this test.
633 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
634 // The integrity test sets kNoRandom so there's a version mismatch if we don't
635 // force new eviction.
636 SetNewEviction();
637 BackendShutdownWithPendingIO(true);
639 #endif
641 // Tests that we deal with create-type pending operations.
642 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
643 net::TestCompletionCallback cb;
646 ASSERT_TRUE(CleanupCacheDir());
647 base::Thread cache_thread("CacheThread");
648 ASSERT_TRUE(cache_thread.StartWithOptions(
649 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
651 disk_cache::BackendFlags flags =
652 fast ? disk_cache::kNone : disk_cache::kNoRandom;
653 CreateBackend(flags, &cache_thread);
655 disk_cache::Entry* entry;
656 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
657 ASSERT_EQ(net::ERR_IO_PENDING, rv);
659 cache_.reset();
660 EXPECT_FALSE(cb.have_result());
663 base::MessageLoop::current()->RunUntilIdle();
666 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
667 BackendShutdownWithPendingCreate(false);
670 #if !defined(LEAK_SANITIZER)
671 // We'll be leaking an entry from this test.
672 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
673 // The integrity test sets kNoRandom so there's a version mismatch if we don't
674 // force new eviction.
675 SetNewEviction();
676 BackendShutdownWithPendingCreate(true);
678 #endif
680 // Disabled on android since this test requires cache creator to create
681 // blockfile caches.
682 #if !defined(OS_ANDROID)
683 TEST_F(DiskCacheTest, TruncatedIndex) {
684 ASSERT_TRUE(CleanupCacheDir());
685 base::FilePath index = cache_path_.AppendASCII("index");
686 ASSERT_EQ(5, base::WriteFile(index, "hello", 5));
688 base::Thread cache_thread("CacheThread");
689 ASSERT_TRUE(cache_thread.StartWithOptions(
690 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
691 net::TestCompletionCallback cb;
693 scoped_ptr<disk_cache::Backend> backend;
694 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
695 net::CACHE_BACKEND_BLOCKFILE,
696 cache_path_,
698 false,
699 cache_thread.task_runner(),
700 NULL,
701 &backend,
702 cb.callback());
703 ASSERT_NE(net::OK, cb.GetResult(rv));
705 ASSERT_FALSE(backend);
707 #endif
709 void DiskCacheBackendTest::BackendSetSize() {
710 const int cache_size = 0x10000; // 64 kB
711 SetMaxSize(cache_size);
712 InitCache();
714 std::string first("some key");
715 std::string second("something else");
716 disk_cache::Entry* entry;
717 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
719 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
720 memset(buffer->data(), 0, cache_size);
721 EXPECT_EQ(cache_size / 10,
722 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
723 << "normal file";
725 EXPECT_EQ(net::ERR_FAILED,
726 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
727 << "file size above the limit";
729 // By doubling the total size, we make this file cacheable.
730 SetMaxSize(cache_size * 2);
731 EXPECT_EQ(cache_size / 5,
732 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
734 // Let's fill up the cache!.
735 SetMaxSize(cache_size * 10);
736 EXPECT_EQ(cache_size * 3 / 4,
737 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
738 entry->Close();
739 FlushQueueForTest();
741 SetMaxSize(cache_size);
743 // The cache is 95% full.
745 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
746 EXPECT_EQ(cache_size / 10,
747 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
749 disk_cache::Entry* entry2;
750 ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
751 EXPECT_EQ(cache_size / 10,
752 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
753 entry2->Close(); // This will trigger the cache trim.
755 EXPECT_NE(net::OK, OpenEntry(first, &entry2));
757 FlushQueueForTest(); // Make sure that we are done trimming the cache.
758 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
760 entry->Close();
761 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
762 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
763 entry->Close();
766 TEST_F(DiskCacheBackendTest, SetSize) {
767 BackendSetSize();
770 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
771 SetNewEviction();
772 BackendSetSize();
775 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
776 SetMemoryOnlyMode();
777 BackendSetSize();
780 void DiskCacheBackendTest::BackendLoad() {
781 InitCache();
782 int seed = static_cast<int>(Time::Now().ToInternalValue());
783 srand(seed);
785 disk_cache::Entry* entries[100];
786 for (int i = 0; i < 100; i++) {
787 std::string key = GenerateKey(true);
788 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
790 EXPECT_EQ(100, cache_->GetEntryCount());
792 for (int i = 0; i < 100; i++) {
793 int source1 = rand() % 100;
794 int source2 = rand() % 100;
795 disk_cache::Entry* temp = entries[source1];
796 entries[source1] = entries[source2];
797 entries[source2] = temp;
800 for (int i = 0; i < 100; i++) {
801 disk_cache::Entry* entry;
802 ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry));
803 EXPECT_TRUE(entry == entries[i]);
804 entry->Close();
805 entries[i]->Doom();
806 entries[i]->Close();
808 FlushQueueForTest();
809 EXPECT_EQ(0, cache_->GetEntryCount());
812 TEST_F(DiskCacheBackendTest, Load) {
813 // Work with a tiny index table (16 entries)
814 SetMask(0xf);
815 SetMaxSize(0x100000);
816 BackendLoad();
819 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
820 SetNewEviction();
821 // Work with a tiny index table (16 entries)
822 SetMask(0xf);
823 SetMaxSize(0x100000);
824 BackendLoad();
827 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
828 SetMaxSize(0x100000);
829 SetMemoryOnlyMode();
830 BackendLoad();
833 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
834 SetCacheType(net::APP_CACHE);
835 // Work with a tiny index table (16 entries)
836 SetMask(0xf);
837 SetMaxSize(0x100000);
838 BackendLoad();
841 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
842 SetCacheType(net::SHADER_CACHE);
843 // Work with a tiny index table (16 entries)
844 SetMask(0xf);
845 SetMaxSize(0x100000);
846 BackendLoad();
849 // Tests the chaining of an entry to the current head.
850 void DiskCacheBackendTest::BackendChain() {
851 SetMask(0x1); // 2-entry table.
852 SetMaxSize(0x3000); // 12 kB.
853 InitCache();
855 disk_cache::Entry* entry;
856 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
857 entry->Close();
858 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry));
859 entry->Close();
862 TEST_F(DiskCacheBackendTest, Chain) {
863 BackendChain();
866 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
867 SetNewEviction();
868 BackendChain();
871 TEST_F(DiskCacheBackendTest, AppCacheChain) {
872 SetCacheType(net::APP_CACHE);
873 BackendChain();
876 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
877 SetCacheType(net::SHADER_CACHE);
878 BackendChain();
881 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
882 SetNewEviction();
883 InitCache();
885 disk_cache::Entry* entry;
886 for (int i = 0; i < 100; i++) {
887 std::string name(base::StringPrintf("Key %d", i));
888 ASSERT_EQ(net::OK, CreateEntry(name, &entry));
889 entry->Close();
890 if (i < 90) {
891 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
892 ASSERT_EQ(net::OK, OpenEntry(name, &entry));
893 entry->Close();
897 // The first eviction must come from list 1 (10% limit), the second must come
898 // from list 0.
899 TrimForTest(false);
900 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
901 TrimForTest(false);
902 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
904 // Double check that we still have the list tails.
905 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry));
906 entry->Close();
907 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry));
908 entry->Close();
911 // Before looking for invalid entries, let's check a valid entry.
912 void DiskCacheBackendTest::BackendValidEntry() {
913 InitCache();
915 std::string key("Some key");
916 disk_cache::Entry* entry;
917 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
919 const int kSize = 50;
920 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
921 memset(buffer1->data(), 0, kSize);
922 base::strlcpy(buffer1->data(), "And the data to save", kSize);
923 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
924 entry->Close();
925 SimulateCrash();
927 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
929 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
930 memset(buffer2->data(), 0, kSize);
931 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
932 entry->Close();
933 EXPECT_STREQ(buffer1->data(), buffer2->data());
936 TEST_F(DiskCacheBackendTest, ValidEntry) {
937 BackendValidEntry();
940 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
941 SetNewEviction();
942 BackendValidEntry();
945 // The same logic of the previous test (ValidEntry), but this time force the
946 // entry to be invalid, simulating a crash in the middle.
947 // We'll be leaking memory from this test.
948 void DiskCacheBackendTest::BackendInvalidEntry() {
949 InitCache();
951 std::string key("Some key");
952 disk_cache::Entry* entry;
953 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
955 const int kSize = 50;
956 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
957 memset(buffer->data(), 0, kSize);
958 base::strlcpy(buffer->data(), "And the data to save", kSize);
959 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
960 SimulateCrash();
962 EXPECT_NE(net::OK, OpenEntry(key, &entry));
963 EXPECT_EQ(0, cache_->GetEntryCount());
966 #if !defined(LEAK_SANITIZER)
967 // We'll be leaking memory from this test.
968 TEST_F(DiskCacheBackendTest, InvalidEntry) {
969 BackendInvalidEntry();
972 // We'll be leaking memory from this test.
973 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
974 SetNewEviction();
975 BackendInvalidEntry();
978 // We'll be leaking memory from this test.
979 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
980 SetCacheType(net::APP_CACHE);
981 BackendInvalidEntry();
984 // We'll be leaking memory from this test.
985 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
986 SetCacheType(net::SHADER_CACHE);
987 BackendInvalidEntry();
990 // Almost the same test, but this time crash the cache after reading an entry.
991 // We'll be leaking memory from this test.
992 void DiskCacheBackendTest::BackendInvalidEntryRead() {
993 InitCache();
995 std::string key("Some key");
996 disk_cache::Entry* entry;
997 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
999 const int kSize = 50;
1000 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1001 memset(buffer->data(), 0, kSize);
1002 base::strlcpy(buffer->data(), "And the data to save", kSize);
1003 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1004 entry->Close();
1005 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1006 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
1008 SimulateCrash();
1010 if (type_ == net::APP_CACHE) {
1011 // Reading an entry and crashing should not make it dirty.
1012 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1013 EXPECT_EQ(1, cache_->GetEntryCount());
1014 entry->Close();
1015 } else {
1016 EXPECT_NE(net::OK, OpenEntry(key, &entry));
1017 EXPECT_EQ(0, cache_->GetEntryCount());
1021 // We'll be leaking memory from this test.
1022 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1023 BackendInvalidEntryRead();
1026 // We'll be leaking memory from this test.
1027 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1028 SetNewEviction();
1029 BackendInvalidEntryRead();
1032 // We'll be leaking memory from this test.
1033 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1034 SetCacheType(net::APP_CACHE);
1035 BackendInvalidEntryRead();
1038 // We'll be leaking memory from this test.
1039 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1040 SetCacheType(net::SHADER_CACHE);
1041 BackendInvalidEntryRead();
1044 // We'll be leaking memory from this test.
1045 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1046 // Work with a tiny index table (16 entries)
1047 SetMask(0xf);
1048 SetMaxSize(0x100000);
1049 InitCache();
1051 int seed = static_cast<int>(Time::Now().ToInternalValue());
1052 srand(seed);
1054 const int kNumEntries = 100;
1055 disk_cache::Entry* entries[kNumEntries];
1056 for (int i = 0; i < kNumEntries; i++) {
1057 std::string key = GenerateKey(true);
1058 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
1060 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1062 for (int i = 0; i < kNumEntries; i++) {
1063 int source1 = rand() % kNumEntries;
1064 int source2 = rand() % kNumEntries;
1065 disk_cache::Entry* temp = entries[source1];
1066 entries[source1] = entries[source2];
1067 entries[source2] = temp;
1070 std::string keys[kNumEntries];
1071 for (int i = 0; i < kNumEntries; i++) {
1072 keys[i] = entries[i]->GetKey();
1073 if (i < kNumEntries / 2)
1074 entries[i]->Close();
1077 SimulateCrash();
1079 for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1080 disk_cache::Entry* entry;
1081 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1084 for (int i = 0; i < kNumEntries / 2; i++) {
1085 disk_cache::Entry* entry;
1086 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry));
1087 entry->Close();
1090 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1093 // We'll be leaking memory from this test.
1094 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1095 BackendInvalidEntryWithLoad();
1098 // We'll be leaking memory from this test.
1099 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1100 SetNewEviction();
1101 BackendInvalidEntryWithLoad();
1104 // We'll be leaking memory from this test.
1105 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1106 SetCacheType(net::APP_CACHE);
1107 BackendInvalidEntryWithLoad();
1110 // We'll be leaking memory from this test.
1111 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1112 SetCacheType(net::SHADER_CACHE);
1113 BackendInvalidEntryWithLoad();
1116 // We'll be leaking memory from this test.
1117 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1118 const int kSize = 0x3000; // 12 kB
1119 SetMaxSize(kSize * 10);
1120 InitCache();
1122 std::string first("some key");
1123 std::string second("something else");
1124 disk_cache::Entry* entry;
1125 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
1127 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1128 memset(buffer->data(), 0, kSize);
1129 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1131 // Simulate a crash.
1132 SimulateCrash();
1134 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
1135 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1137 EXPECT_EQ(2, cache_->GetEntryCount());
1138 SetMaxSize(kSize);
1139 entry->Close(); // Trim the cache.
1140 FlushQueueForTest();
1142 // If we evicted the entry in less than 20mS, we have one entry in the cache;
1143 // if it took more than that, we posted a task and we'll delete the second
1144 // entry too.
1145 base::MessageLoop::current()->RunUntilIdle();
1147 // This may be not thread-safe in general, but for now it's OK so add some
1148 // ThreadSanitizer annotations to ignore data races on cache_.
1149 // See http://crbug.com/55970
1150 ANNOTATE_IGNORE_READS_BEGIN();
1151 EXPECT_GE(1, cache_->GetEntryCount());
1152 ANNOTATE_IGNORE_READS_END();
1154 EXPECT_NE(net::OK, OpenEntry(first, &entry));
1157 // We'll be leaking memory from this test.
1158 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1159 BackendTrimInvalidEntry();
1162 // We'll be leaking memory from this test.
1163 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1164 SetNewEviction();
1165 BackendTrimInvalidEntry();
1168 // We'll be leaking memory from this test.
1169 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1170 SetMask(0xf); // 16-entry table.
1172 const int kSize = 0x3000; // 12 kB
1173 SetMaxSize(kSize * 40);
1174 InitCache();
1176 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1177 memset(buffer->data(), 0, kSize);
1178 disk_cache::Entry* entry;
1180 // Writing 32 entries to this cache chains most of them.
1181 for (int i = 0; i < 32; i++) {
1182 std::string key(base::StringPrintf("some key %d", i));
1183 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1184 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1185 entry->Close();
1186 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1187 // Note that we are not closing the entries.
1190 // Simulate a crash.
1191 SimulateCrash();
1193 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
1194 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1196 FlushQueueForTest();
1197 EXPECT_EQ(33, cache_->GetEntryCount());
1198 SetMaxSize(kSize);
1200 // For the new eviction code, all corrupt entries are on the second list so
1201 // they are not going away that easy.
1202 if (new_eviction_) {
1203 EXPECT_EQ(net::OK, DoomAllEntries());
1206 entry->Close(); // Trim the cache.
1207 FlushQueueForTest();
1209 // We may abort the eviction before cleaning up everything.
1210 base::MessageLoop::current()->RunUntilIdle();
1211 FlushQueueForTest();
1212 // If it's not clear enough: we may still have eviction tasks running at this
1213 // time, so the number of entries is changing while we read it.
1214 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1215 EXPECT_GE(30, cache_->GetEntryCount());
1216 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1219 // We'll be leaking memory from this test.
1220 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1221 BackendTrimInvalidEntry2();
1224 // We'll be leaking memory from this test.
1225 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1226 SetNewEviction();
1227 BackendTrimInvalidEntry2();
1229 #endif // !defined(LEAK_SANITIZER)
1231 void DiskCacheBackendTest::BackendEnumerations() {
1232 InitCache();
1233 Time initial = Time::Now();
1235 const int kNumEntries = 100;
1236 for (int i = 0; i < kNumEntries; i++) {
1237 std::string key = GenerateKey(true);
1238 disk_cache::Entry* entry;
1239 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1240 entry->Close();
1242 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1243 Time final = Time::Now();
1245 disk_cache::Entry* entry;
1246 scoped_ptr<TestIterator> iter = CreateIterator();
1247 int count = 0;
1248 Time last_modified[kNumEntries];
1249 Time last_used[kNumEntries];
1250 while (iter->OpenNextEntry(&entry) == net::OK) {
1251 ASSERT_TRUE(NULL != entry);
1252 if (count < kNumEntries) {
1253 last_modified[count] = entry->GetLastModified();
1254 last_used[count] = entry->GetLastUsed();
1255 EXPECT_TRUE(initial <= last_modified[count]);
1256 EXPECT_TRUE(final >= last_modified[count]);
1259 entry->Close();
1260 count++;
1262 EXPECT_EQ(kNumEntries, count);
1264 iter = CreateIterator();
1265 count = 0;
1266 // The previous enumeration should not have changed the timestamps.
1267 while (iter->OpenNextEntry(&entry) == net::OK) {
1268 ASSERT_TRUE(NULL != entry);
1269 if (count < kNumEntries) {
1270 EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1271 EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1273 entry->Close();
1274 count++;
1276 EXPECT_EQ(kNumEntries, count);
1279 TEST_F(DiskCacheBackendTest, Enumerations) {
1280 BackendEnumerations();
1283 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1284 SetNewEviction();
1285 BackendEnumerations();
1288 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1289 SetMemoryOnlyMode();
1290 BackendEnumerations();
1293 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1294 SetCacheType(net::SHADER_CACHE);
1295 BackendEnumerations();
1298 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1299 SetCacheType(net::APP_CACHE);
1300 BackendEnumerations();
1303 // Verifies enumerations while entries are open.
1304 void DiskCacheBackendTest::BackendEnumerations2() {
1305 InitCache();
1306 const std::string first("first");
1307 const std::string second("second");
1308 disk_cache::Entry *entry1, *entry2;
1309 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1310 entry1->Close();
1311 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1312 entry2->Close();
1313 FlushQueueForTest();
1315 // Make sure that the timestamp is not the same.
1316 AddDelay();
1317 ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
1318 scoped_ptr<TestIterator> iter = CreateIterator();
1319 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1320 EXPECT_EQ(entry2->GetKey(), second);
1322 // Two entries and the iterator pointing at "first".
1323 entry1->Close();
1324 entry2->Close();
1326 // The iterator should still be valid, so we should not crash.
1327 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1328 EXPECT_EQ(entry2->GetKey(), first);
1329 entry2->Close();
1330 iter = CreateIterator();
1332 // Modify the oldest entry and get the newest element.
1333 ASSERT_EQ(net::OK, OpenEntry(first, &entry1));
1334 EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false));
1335 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1336 if (type_ == net::APP_CACHE) {
1337 // The list is not updated.
1338 EXPECT_EQ(entry2->GetKey(), second);
1339 } else {
1340 EXPECT_EQ(entry2->GetKey(), first);
1343 entry1->Close();
1344 entry2->Close();
1347 TEST_F(DiskCacheBackendTest, Enumerations2) {
1348 BackendEnumerations2();
1351 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1352 SetNewEviction();
1353 BackendEnumerations2();
1356 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
1357 SetMemoryOnlyMode();
1358 BackendEnumerations2();
1361 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1362 SetCacheType(net::APP_CACHE);
1363 BackendEnumerations2();
1366 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1367 SetCacheType(net::SHADER_CACHE);
1368 BackendEnumerations2();
1371 // Verify that ReadData calls do not update the LRU cache
1372 // when using the SHADER_CACHE type.
1373 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1374 SetCacheType(net::SHADER_CACHE);
1375 InitCache();
1376 const std::string first("first");
1377 const std::string second("second");
1378 disk_cache::Entry *entry1, *entry2;
1379 const int kSize = 50;
1380 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1382 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1383 memset(buffer1->data(), 0, kSize);
1384 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1385 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1387 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1388 entry2->Close();
1390 FlushQueueForTest();
1392 // Make sure that the timestamp is not the same.
1393 AddDelay();
1395 // Read from the last item in the LRU.
1396 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1397 entry1->Close();
1399 scoped_ptr<TestIterator> iter = CreateIterator();
1400 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1401 EXPECT_EQ(entry2->GetKey(), second);
1402 entry2->Close();
1405 #if !defined(LEAK_SANITIZER)
1406 // Verify handling of invalid entries while doing enumerations.
1407 // We'll be leaking memory from this test.
1408 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1409 InitCache();
1411 std::string key("Some key");
1412 disk_cache::Entry *entry, *entry1, *entry2;
1413 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1415 const int kSize = 50;
1416 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1417 memset(buffer1->data(), 0, kSize);
1418 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1419 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1420 entry1->Close();
1421 ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1422 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1424 std::string key2("Another key");
1425 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1426 entry2->Close();
1427 ASSERT_EQ(2, cache_->GetEntryCount());
1429 SimulateCrash();
1431 scoped_ptr<TestIterator> iter = CreateIterator();
1432 int count = 0;
1433 while (iter->OpenNextEntry(&entry) == net::OK) {
1434 ASSERT_TRUE(NULL != entry);
1435 EXPECT_EQ(key2, entry->GetKey());
1436 entry->Close();
1437 count++;
1439 EXPECT_EQ(1, count);
1440 EXPECT_EQ(1, cache_->GetEntryCount());
1443 // We'll be leaking memory from this test.
1444 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1445 BackendInvalidEntryEnumeration();
1448 // We'll be leaking memory from this test.
1449 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1450 SetNewEviction();
1451 BackendInvalidEntryEnumeration();
1453 #endif // !defined(LEAK_SANITIZER)
1455 // Tests that if for some reason entries are modified close to existing cache
1456 // iterators, we don't generate fatal errors or reset the cache.
1457 void DiskCacheBackendTest::BackendFixEnumerators() {
1458 InitCache();
1460 int seed = static_cast<int>(Time::Now().ToInternalValue());
1461 srand(seed);
1463 const int kNumEntries = 10;
1464 for (int i = 0; i < kNumEntries; i++) {
1465 std::string key = GenerateKey(true);
1466 disk_cache::Entry* entry;
1467 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1468 entry->Close();
1470 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1472 disk_cache::Entry *entry1, *entry2;
1473 scoped_ptr<TestIterator> iter1 = CreateIterator(), iter2 = CreateIterator();
1474 ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1));
1475 ASSERT_TRUE(NULL != entry1);
1476 entry1->Close();
1477 entry1 = NULL;
1479 // Let's go to the middle of the list.
1480 for (int i = 0; i < kNumEntries / 2; i++) {
1481 if (entry1)
1482 entry1->Close();
1483 ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1));
1484 ASSERT_TRUE(NULL != entry1);
1486 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1487 ASSERT_TRUE(NULL != entry2);
1488 entry2->Close();
1491 // Messing up with entry1 will modify entry2->next.
1492 entry1->Doom();
1493 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1494 ASSERT_TRUE(NULL != entry2);
1496 // The link entry2->entry1 should be broken.
1497 EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1498 entry1->Close();
1499 entry2->Close();
1501 // And the second iterator should keep working.
1502 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1503 ASSERT_TRUE(NULL != entry2);
1504 entry2->Close();
1507 TEST_F(DiskCacheBackendTest, FixEnumerators) {
1508 BackendFixEnumerators();
1511 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1512 SetNewEviction();
1513 BackendFixEnumerators();
1516 void DiskCacheBackendTest::BackendDoomRecent() {
1517 InitCache();
1519 disk_cache::Entry *entry;
1520 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1521 entry->Close();
1522 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1523 entry->Close();
1524 FlushQueueForTest();
1526 AddDelay();
1527 Time middle = Time::Now();
1529 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1530 entry->Close();
1531 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1532 entry->Close();
1533 FlushQueueForTest();
1535 AddDelay();
1536 Time final = Time::Now();
1538 ASSERT_EQ(4, cache_->GetEntryCount());
1539 EXPECT_EQ(net::OK, DoomEntriesSince(final));
1540 ASSERT_EQ(4, cache_->GetEntryCount());
1542 EXPECT_EQ(net::OK, DoomEntriesSince(middle));
1543 ASSERT_EQ(2, cache_->GetEntryCount());
1545 ASSERT_EQ(net::OK, OpenEntry("second", &entry));
1546 entry->Close();
1549 TEST_F(DiskCacheBackendTest, DoomRecent) {
1550 BackendDoomRecent();
1553 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
1554 SetNewEviction();
1555 BackendDoomRecent();
1558 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
1559 SetMemoryOnlyMode();
1560 BackendDoomRecent();
1563 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
1564 SetMemoryOnlyMode();
1565 base::Time start;
1566 InitSparseCache(&start, NULL);
1567 DoomEntriesSince(start);
1568 EXPECT_EQ(1, cache_->GetEntryCount());
1571 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
1572 base::Time start;
1573 InitSparseCache(&start, NULL);
1574 DoomEntriesSince(start);
1575 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1576 // MemBackendImpl does not. Thats why expected value differs here from
1577 // MemoryOnlyDoomEntriesSinceSparse.
1578 EXPECT_EQ(3, cache_->GetEntryCount());
1581 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
1582 SetMemoryOnlyMode();
1583 InitSparseCache(NULL, NULL);
1584 EXPECT_EQ(net::OK, DoomAllEntries());
1585 EXPECT_EQ(0, cache_->GetEntryCount());
1588 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
1589 InitSparseCache(NULL, NULL);
1590 EXPECT_EQ(net::OK, DoomAllEntries());
1591 EXPECT_EQ(0, cache_->GetEntryCount());
1594 void DiskCacheBackendTest::BackendDoomBetween() {
1595 InitCache();
1597 disk_cache::Entry *entry;
1598 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1599 entry->Close();
1600 FlushQueueForTest();
1602 AddDelay();
1603 Time middle_start = Time::Now();
1605 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1606 entry->Close();
1607 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1608 entry->Close();
1609 FlushQueueForTest();
1611 AddDelay();
1612 Time middle_end = Time::Now();
1614 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1615 entry->Close();
1616 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1617 entry->Close();
1618 FlushQueueForTest();
1620 AddDelay();
1621 Time final = Time::Now();
1623 ASSERT_EQ(4, cache_->GetEntryCount());
1624 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end));
1625 ASSERT_EQ(2, cache_->GetEntryCount());
1627 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1628 entry->Close();
1630 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final));
1631 ASSERT_EQ(1, cache_->GetEntryCount());
1633 ASSERT_EQ(net::OK, OpenEntry("first", &entry));
1634 entry->Close();
1637 TEST_F(DiskCacheBackendTest, DoomBetween) {
1638 BackendDoomBetween();
1641 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
1642 SetNewEviction();
1643 BackendDoomBetween();
1646 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
1647 SetMemoryOnlyMode();
1648 BackendDoomBetween();
1651 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
1652 SetMemoryOnlyMode();
1653 base::Time start, end;
1654 InitSparseCache(&start, &end);
1655 DoomEntriesBetween(start, end);
1656 EXPECT_EQ(3, cache_->GetEntryCount());
1658 start = end;
1659 end = base::Time::Now();
1660 DoomEntriesBetween(start, end);
1661 EXPECT_EQ(1, cache_->GetEntryCount());
1664 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
1665 base::Time start, end;
1666 InitSparseCache(&start, &end);
1667 DoomEntriesBetween(start, end);
1668 EXPECT_EQ(9, cache_->GetEntryCount());
1670 start = end;
1671 end = base::Time::Now();
1672 DoomEntriesBetween(start, end);
1673 EXPECT_EQ(3, cache_->GetEntryCount());
1676 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
1677 int num_entries, bool load) {
1678 success_ = false;
1679 ASSERT_TRUE(CopyTestCache(name));
1680 DisableFirstCleanup();
1682 uint32 mask;
1683 if (load) {
1684 mask = 0xf;
1685 SetMaxSize(0x100000);
1686 } else {
1687 // Clear the settings from the previous run.
1688 mask = 0;
1689 SetMaxSize(0);
1691 SetMask(mask);
1693 InitCache();
1694 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
1696 std::string key("the first key");
1697 disk_cache::Entry* entry1;
1698 ASSERT_NE(net::OK, OpenEntry(key, &entry1));
1700 int actual = cache_->GetEntryCount();
1701 if (num_entries != actual) {
1702 ASSERT_TRUE(load);
1703 // If there is a heavy load, inserting an entry will make another entry
1704 // dirty (on the hash bucket) so two entries are removed.
1705 ASSERT_EQ(num_entries - 1, actual);
1708 cache_.reset();
1709 cache_impl_ = NULL;
1711 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask));
1712 success_ = true;
1715 void DiskCacheBackendTest::BackendRecoverInsert() {
1716 // Tests with an empty cache.
1717 BackendTransaction("insert_empty1", 0, false);
1718 ASSERT_TRUE(success_) << "insert_empty1";
1719 BackendTransaction("insert_empty2", 0, false);
1720 ASSERT_TRUE(success_) << "insert_empty2";
1721 BackendTransaction("insert_empty3", 0, false);
1722 ASSERT_TRUE(success_) << "insert_empty3";
1724 // Tests with one entry on the cache.
1725 BackendTransaction("insert_one1", 1, false);
1726 ASSERT_TRUE(success_) << "insert_one1";
1727 BackendTransaction("insert_one2", 1, false);
1728 ASSERT_TRUE(success_) << "insert_one2";
1729 BackendTransaction("insert_one3", 1, false);
1730 ASSERT_TRUE(success_) << "insert_one3";
1732 // Tests with one hundred entries on the cache, tiny index.
1733 BackendTransaction("insert_load1", 100, true);
1734 ASSERT_TRUE(success_) << "insert_load1";
1735 BackendTransaction("insert_load2", 100, true);
1736 ASSERT_TRUE(success_) << "insert_load2";
1739 TEST_F(DiskCacheBackendTest, RecoverInsert) {
1740 BackendRecoverInsert();
1743 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
1744 SetNewEviction();
1745 BackendRecoverInsert();
1748 void DiskCacheBackendTest::BackendRecoverRemove() {
1749 // Removing the only element.
1750 BackendTransaction("remove_one1", 0, false);
1751 ASSERT_TRUE(success_) << "remove_one1";
1752 BackendTransaction("remove_one2", 0, false);
1753 ASSERT_TRUE(success_) << "remove_one2";
1754 BackendTransaction("remove_one3", 0, false);
1755 ASSERT_TRUE(success_) << "remove_one3";
1757 // Removing the head.
1758 BackendTransaction("remove_head1", 1, false);
1759 ASSERT_TRUE(success_) << "remove_head1";
1760 BackendTransaction("remove_head2", 1, false);
1761 ASSERT_TRUE(success_) << "remove_head2";
1762 BackendTransaction("remove_head3", 1, false);
1763 ASSERT_TRUE(success_) << "remove_head3";
1765 // Removing the tail.
1766 BackendTransaction("remove_tail1", 1, false);
1767 ASSERT_TRUE(success_) << "remove_tail1";
1768 BackendTransaction("remove_tail2", 1, false);
1769 ASSERT_TRUE(success_) << "remove_tail2";
1770 BackendTransaction("remove_tail3", 1, false);
1771 ASSERT_TRUE(success_) << "remove_tail3";
1773 // Removing with one hundred entries on the cache, tiny index.
1774 BackendTransaction("remove_load1", 100, true);
1775 ASSERT_TRUE(success_) << "remove_load1";
1776 BackendTransaction("remove_load2", 100, true);
1777 ASSERT_TRUE(success_) << "remove_load2";
1778 BackendTransaction("remove_load3", 100, true);
1779 ASSERT_TRUE(success_) << "remove_load3";
1781 // This case cannot be reverted.
1782 BackendTransaction("remove_one4", 0, false);
1783 ASSERT_TRUE(success_) << "remove_one4";
1784 BackendTransaction("remove_head4", 1, false);
1785 ASSERT_TRUE(success_) << "remove_head4";
1788 #if defined(OS_WIN)
1789 // http://crbug.com/396392
1790 #define MAYBE_RecoverRemove DISABLED_RecoverRemove
1791 #else
1792 #define MAYBE_RecoverRemove RecoverRemove
1793 #endif
1794 TEST_F(DiskCacheBackendTest, MAYBE_RecoverRemove) {
1795 BackendRecoverRemove();
1798 #if defined(OS_WIN)
1799 // http://crbug.com/396392
1800 #define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove
1801 #else
1802 #define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove
1803 #endif
1804 TEST_F(DiskCacheBackendTest, MAYBE_NewEvictionRecoverRemove) {
1805 SetNewEviction();
1806 BackendRecoverRemove();
1809 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1810 success_ = false;
1811 ASSERT_TRUE(CopyTestCache("insert_load1"));
1812 DisableFirstCleanup();
1814 SetMask(0xf);
1815 SetMaxSize(0x1000);
1817 // We should not crash here.
1818 InitCache();
1819 DisableIntegrityCheck();
1822 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
1823 BackendRecoverWithEviction();
1826 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
1827 SetNewEviction();
1828 BackendRecoverWithEviction();
1831 // Tests that the |BackendImpl| fails to start with the wrong cache version.
1832 TEST_F(DiskCacheTest, WrongVersion) {
1833 ASSERT_TRUE(CopyTestCache("wrong_version"));
1834 base::Thread cache_thread("CacheThread");
1835 ASSERT_TRUE(cache_thread.StartWithOptions(
1836 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1837 net::TestCompletionCallback cb;
1839 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1840 cache_path_, cache_thread.task_runner(), NULL));
1841 int rv = cache->Init(cb.callback());
1842 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
1845 // Tests that the disk cache successfully joins the control group, dropping the
1846 // existing cache in favour of a new empty cache.
1847 // Disabled on android since this test requires cache creator to create
1848 // blockfile caches.
1849 #if !defined(OS_ANDROID)
1850 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
1851 base::Thread cache_thread("CacheThread");
1852 ASSERT_TRUE(cache_thread.StartWithOptions(
1853 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1855 scoped_ptr<disk_cache::BackendImpl> cache =
1856 CreateExistingEntryCache(cache_thread, cache_path_);
1857 ASSERT_TRUE(cache.get());
1858 cache.reset();
1860 // Instantiate the SimpleCacheTrial, forcing this run into the
1861 // ExperimentControl group.
1862 base::FieldTrialList field_trial_list(new base::MockEntropyProvider());
1863 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1864 "ExperimentControl");
1865 net::TestCompletionCallback cb;
1866 scoped_ptr<disk_cache::Backend> base_cache;
1867 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
1868 net::CACHE_BACKEND_BLOCKFILE,
1869 cache_path_,
1871 true,
1872 cache_thread.task_runner(),
1873 NULL,
1874 &base_cache,
1875 cb.callback());
1876 ASSERT_EQ(net::OK, cb.GetResult(rv));
1877 EXPECT_EQ(0, base_cache->GetEntryCount());
1879 #endif
1881 // Tests that the disk cache can restart in the control group preserving
1882 // existing entries.
1883 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
1884 // Instantiate the SimpleCacheTrial, forcing this run into the
1885 // ExperimentControl group.
1886 base::FieldTrialList field_trial_list(new base::MockEntropyProvider());
1887 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1888 "ExperimentControl");
1890 base::Thread cache_thread("CacheThread");
1891 ASSERT_TRUE(cache_thread.StartWithOptions(
1892 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1894 scoped_ptr<disk_cache::BackendImpl> cache =
1895 CreateExistingEntryCache(cache_thread, cache_path_);
1896 ASSERT_TRUE(cache.get());
1898 net::TestCompletionCallback cb;
1900 const int kRestartCount = 5;
1901 for (int i = 0; i < kRestartCount; ++i) {
1902 cache.reset(new disk_cache::BackendImpl(
1903 cache_path_, cache_thread.message_loop_proxy(), NULL));
1904 int rv = cache->Init(cb.callback());
1905 ASSERT_EQ(net::OK, cb.GetResult(rv));
1906 EXPECT_EQ(1, cache->GetEntryCount());
1908 disk_cache::Entry* entry = NULL;
1909 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1910 EXPECT_EQ(net::OK, cb.GetResult(rv));
1911 EXPECT_TRUE(entry);
1912 entry->Close();
1916 // Tests that the disk cache can leave the control group preserving existing
1917 // entries.
1918 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
1919 base::Thread cache_thread("CacheThread");
1920 ASSERT_TRUE(cache_thread.StartWithOptions(
1921 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1924 // Instantiate the SimpleCacheTrial, forcing this run into the
1925 // ExperimentControl group.
1926 base::FieldTrialList field_trial_list(new base::MockEntropyProvider());
1927 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1928 "ExperimentControl");
1930 scoped_ptr<disk_cache::BackendImpl> cache =
1931 CreateExistingEntryCache(cache_thread, cache_path_);
1932 ASSERT_TRUE(cache.get());
1935 // Instantiate the SimpleCacheTrial, forcing this run into the
1936 // ExperimentNo group.
1937 base::FieldTrialList field_trial_list(new base::MockEntropyProvider());
1938 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1939 net::TestCompletionCallback cb;
1941 const int kRestartCount = 5;
1942 for (int i = 0; i < kRestartCount; ++i) {
1943 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1944 cache_path_, cache_thread.message_loop_proxy(), NULL));
1945 int rv = cache->Init(cb.callback());
1946 ASSERT_EQ(net::OK, cb.GetResult(rv));
1947 EXPECT_EQ(1, cache->GetEntryCount());
1949 disk_cache::Entry* entry = NULL;
1950 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1951 EXPECT_EQ(net::OK, cb.GetResult(rv));
1952 EXPECT_TRUE(entry);
1953 entry->Close();
1957 // Tests that the cache is properly restarted on recovery error.
1958 // Disabled on android since this test requires cache creator to create
1959 // blockfile caches.
1960 #if !defined(OS_ANDROID)
1961 TEST_F(DiskCacheBackendTest, DeleteOld) {
1962 ASSERT_TRUE(CopyTestCache("wrong_version"));
1963 SetNewEviction();
1964 base::Thread cache_thread("CacheThread");
1965 ASSERT_TRUE(cache_thread.StartWithOptions(
1966 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1968 net::TestCompletionCallback cb;
1969 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
1970 base::FilePath path(cache_path_);
1971 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
1972 net::CACHE_BACKEND_BLOCKFILE,
1973 path,
1975 true,
1976 cache_thread.task_runner(),
1977 NULL,
1978 &cache_,
1979 cb.callback());
1980 path.clear(); // Make sure path was captured by the previous call.
1981 ASSERT_EQ(net::OK, cb.GetResult(rv));
1982 base::ThreadRestrictions::SetIOAllowed(prev);
1983 cache_.reset();
1984 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
1986 #endif
1988 // We want to be able to deal with messed up entries on disk.
1989 void DiskCacheBackendTest::BackendInvalidEntry2() {
1990 ASSERT_TRUE(CopyTestCache("bad_entry"));
1991 DisableFirstCleanup();
1992 InitCache();
1994 disk_cache::Entry *entry1, *entry2;
1995 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
1996 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
1997 entry1->Close();
1999 // CheckCacheIntegrity will fail at this point.
2000 DisableIntegrityCheck();
2003 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
2004 BackendInvalidEntry2();
2007 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
2008 SetNewEviction();
2009 BackendInvalidEntry2();
2012 // Tests that we don't crash or hang when enumerating this cache.
2013 void DiskCacheBackendTest::BackendInvalidEntry3() {
2014 SetMask(0x1); // 2-entry table.
2015 SetMaxSize(0x3000); // 12 kB.
2016 DisableFirstCleanup();
2017 InitCache();
2019 disk_cache::Entry* entry;
2020 scoped_ptr<TestIterator> iter = CreateIterator();
2021 while (iter->OpenNextEntry(&entry) == net::OK) {
2022 entry->Close();
2026 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2027 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2028 BackendInvalidEntry3();
2031 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2032 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2033 SetNewEviction();
2034 BackendInvalidEntry3();
2035 DisableIntegrityCheck();
2038 // Test that we handle a dirty entry on the LRU list, already replaced with
2039 // the same key, and with hash collisions.
2040 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2041 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2042 SetMask(0x1); // 2-entry table.
2043 SetMaxSize(0x3000); // 12 kB.
2044 DisableFirstCleanup();
2045 InitCache();
2047 TrimForTest(false);
2050 // Test that we handle a dirty entry on the deleted list, already replaced with
2051 // the same key, and with hash collisions.
2052 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2053 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2054 SetNewEviction();
2055 SetMask(0x1); // 2-entry table.
2056 SetMaxSize(0x3000); // 12 kB.
2057 DisableFirstCleanup();
2058 InitCache();
2060 TrimDeletedListForTest(false);
2063 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2064 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2065 SetMask(0x1); // 2-entry table.
2066 SetMaxSize(0x3000); // 12 kB.
2067 DisableFirstCleanup();
2068 InitCache();
2070 // There is a dirty entry (but marked as clean) at the end, pointing to a
2071 // deleted entry through the hash collision list. We should not re-insert the
2072 // deleted entry into the index table.
2074 TrimForTest(false);
2075 // The cache should be clean (as detected by CheckCacheIntegrity).
2078 // Tests that we don't hang when there is a loop on the hash collision list.
2079 // The test cache could be a result of bug 69135.
2080 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2081 ASSERT_TRUE(CopyTestCache("list_loop2"));
2082 SetMask(0x1); // 2-entry table.
2083 SetMaxSize(0x3000); // 12 kB.
2084 DisableFirstCleanup();
2085 InitCache();
2087 // The second entry points at itselft, and the first entry is not accessible
2088 // though the index, but it is at the head of the LRU.
2090 disk_cache::Entry* entry;
2091 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
2092 entry->Close();
2094 TrimForTest(false);
2095 TrimForTest(false);
2096 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
2097 entry->Close();
2098 EXPECT_EQ(1, cache_->GetEntryCount());
2101 // Tests that we don't hang when there is a loop on the hash collision list.
2102 // The test cache could be a result of bug 69135.
2103 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2104 ASSERT_TRUE(CopyTestCache("list_loop3"));
2105 SetMask(0x1); // 2-entry table.
2106 SetMaxSize(0x3000); // 12 kB.
2107 DisableFirstCleanup();
2108 InitCache();
2110 // There is a wide loop of 5 entries.
2112 disk_cache::Entry* entry;
2113 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2116 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2117 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2118 DisableFirstCleanup();
2119 SetNewEviction();
2120 InitCache();
2122 // The second entry is dirty, but removing it should not corrupt the list.
2123 disk_cache::Entry* entry;
2124 ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2125 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
2127 // This should not delete the cache.
2128 entry->Doom();
2129 FlushQueueForTest();
2130 entry->Close();
2132 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry));
2133 entry->Close();
2136 // Tests handling of corrupt entries by keeping the rankings node around, with
2137 // a fatal failure.
2138 void DiskCacheBackendTest::BackendInvalidEntry7() {
2139 const int kSize = 0x3000; // 12 kB.
2140 SetMaxSize(kSize * 10);
2141 InitCache();
2143 std::string first("some key");
2144 std::string second("something else");
2145 disk_cache::Entry* entry;
2146 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2147 entry->Close();
2148 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2150 // Corrupt this entry.
2151 disk_cache::EntryImpl* entry_impl =
2152 static_cast<disk_cache::EntryImpl*>(entry);
2154 entry_impl->rankings()->Data()->next = 0;
2155 entry_impl->rankings()->Store();
2156 entry->Close();
2157 FlushQueueForTest();
2158 EXPECT_EQ(2, cache_->GetEntryCount());
2160 // This should detect the bad entry.
2161 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2162 EXPECT_EQ(1, cache_->GetEntryCount());
2164 // We should delete the cache. The list still has a corrupt node.
2165 scoped_ptr<TestIterator> iter = CreateIterator();
2166 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2167 FlushQueueForTest();
2168 EXPECT_EQ(0, cache_->GetEntryCount());
2171 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2172 BackendInvalidEntry7();
2175 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2176 SetNewEviction();
2177 BackendInvalidEntry7();
2180 // Tests handling of corrupt entries by keeping the rankings node around, with
2181 // a non fatal failure.
2182 void DiskCacheBackendTest::BackendInvalidEntry8() {
2183 const int kSize = 0x3000; // 12 kB
2184 SetMaxSize(kSize * 10);
2185 InitCache();
2187 std::string first("some key");
2188 std::string second("something else");
2189 disk_cache::Entry* entry;
2190 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2191 entry->Close();
2192 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2194 // Corrupt this entry.
2195 disk_cache::EntryImpl* entry_impl =
2196 static_cast<disk_cache::EntryImpl*>(entry);
2198 entry_impl->rankings()->Data()->contents = 0;
2199 entry_impl->rankings()->Store();
2200 entry->Close();
2201 FlushQueueForTest();
2202 EXPECT_EQ(2, cache_->GetEntryCount());
2204 // This should detect the bad entry.
2205 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2206 EXPECT_EQ(1, cache_->GetEntryCount());
2208 // We should not delete the cache.
2209 scoped_ptr<TestIterator> iter = CreateIterator();
2210 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2211 entry->Close();
2212 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2213 EXPECT_EQ(1, cache_->GetEntryCount());
2216 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2217 BackendInvalidEntry8();
2220 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2221 SetNewEviction();
2222 BackendInvalidEntry8();
2225 // Tests handling of corrupt entries detected by enumerations. Note that these
2226 // tests (xx9 to xx11) are basically just going though slightly different
2227 // codepaths so they are tighlty coupled with the code, but that is better than
2228 // not testing error handling code.
2229 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2230 const int kSize = 0x3000; // 12 kB.
2231 SetMaxSize(kSize * 10);
2232 InitCache();
2234 std::string first("some key");
2235 std::string second("something else");
2236 disk_cache::Entry* entry;
2237 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2238 entry->Close();
2239 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2241 // Corrupt this entry.
2242 disk_cache::EntryImpl* entry_impl =
2243 static_cast<disk_cache::EntryImpl*>(entry);
2245 entry_impl->entry()->Data()->state = 0xbad;
2246 entry_impl->entry()->Store();
2247 entry->Close();
2248 FlushQueueForTest();
2249 EXPECT_EQ(2, cache_->GetEntryCount());
2251 if (eviction) {
2252 TrimForTest(false);
2253 EXPECT_EQ(1, cache_->GetEntryCount());
2254 TrimForTest(false);
2255 EXPECT_EQ(1, cache_->GetEntryCount());
2256 } else {
2257 // We should detect the problem through the list, but we should not delete
2258 // the entry, just fail the iteration.
2259 scoped_ptr<TestIterator> iter = CreateIterator();
2260 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2262 // Now a full iteration will work, and return one entry.
2263 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2264 entry->Close();
2265 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2267 // This should detect what's left of the bad entry.
2268 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2269 EXPECT_EQ(2, cache_->GetEntryCount());
2271 DisableIntegrityCheck();
2274 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2275 BackendInvalidEntry9(false);
2278 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2279 SetNewEviction();
2280 BackendInvalidEntry9(false);
2283 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2284 BackendInvalidEntry9(true);
2287 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2288 SetNewEviction();
2289 BackendInvalidEntry9(true);
2292 // Tests handling of corrupt entries detected by enumerations.
2293 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2294 const int kSize = 0x3000; // 12 kB.
2295 SetMaxSize(kSize * 10);
2296 SetNewEviction();
2297 InitCache();
2299 std::string first("some key");
2300 std::string second("something else");
2301 disk_cache::Entry* entry;
2302 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2303 entry->Close();
2304 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2305 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2306 entry->Close();
2307 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2309 // Corrupt this entry.
2310 disk_cache::EntryImpl* entry_impl =
2311 static_cast<disk_cache::EntryImpl*>(entry);
2313 entry_impl->entry()->Data()->state = 0xbad;
2314 entry_impl->entry()->Store();
2315 entry->Close();
2316 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2317 entry->Close();
2318 EXPECT_EQ(3, cache_->GetEntryCount());
2320 // We have:
2321 // List 0: third -> second (bad).
2322 // List 1: first.
2324 if (eviction) {
2325 // Detection order: second -> first -> third.
2326 TrimForTest(false);
2327 EXPECT_EQ(3, cache_->GetEntryCount());
2328 TrimForTest(false);
2329 EXPECT_EQ(2, cache_->GetEntryCount());
2330 TrimForTest(false);
2331 EXPECT_EQ(1, cache_->GetEntryCount());
2332 } else {
2333 // Detection order: third -> second -> first.
2334 // We should detect the problem through the list, but we should not delete
2335 // the entry.
2336 scoped_ptr<TestIterator> iter = CreateIterator();
2337 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2338 entry->Close();
2339 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2340 EXPECT_EQ(first, entry->GetKey());
2341 entry->Close();
2342 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2344 DisableIntegrityCheck();
2347 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2348 BackendInvalidEntry10(false);
2351 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
2352 BackendInvalidEntry10(true);
2355 // Tests handling of corrupt entries detected by enumerations.
2356 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
2357 const int kSize = 0x3000; // 12 kB.
2358 SetMaxSize(kSize * 10);
2359 SetNewEviction();
2360 InitCache();
2362 std::string first("some key");
2363 std::string second("something else");
2364 disk_cache::Entry* entry;
2365 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2366 entry->Close();
2367 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2368 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2369 entry->Close();
2370 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2371 entry->Close();
2372 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
2373 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2375 // Corrupt this entry.
2376 disk_cache::EntryImpl* entry_impl =
2377 static_cast<disk_cache::EntryImpl*>(entry);
2379 entry_impl->entry()->Data()->state = 0xbad;
2380 entry_impl->entry()->Store();
2381 entry->Close();
2382 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2383 entry->Close();
2384 FlushQueueForTest();
2385 EXPECT_EQ(3, cache_->GetEntryCount());
2387 // We have:
2388 // List 0: third.
2389 // List 1: second (bad) -> first.
2391 if (eviction) {
2392 // Detection order: third -> first -> second.
2393 TrimForTest(false);
2394 EXPECT_EQ(2, cache_->GetEntryCount());
2395 TrimForTest(false);
2396 EXPECT_EQ(1, cache_->GetEntryCount());
2397 TrimForTest(false);
2398 EXPECT_EQ(1, cache_->GetEntryCount());
2399 } else {
2400 // Detection order: third -> second.
2401 // We should detect the problem through the list, but we should not delete
2402 // the entry, just fail the iteration.
2403 scoped_ptr<TestIterator> iter = CreateIterator();
2404 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2405 entry->Close();
2406 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2408 // Now a full iteration will work, and return two entries.
2409 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2410 entry->Close();
2411 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2412 entry->Close();
2413 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2415 DisableIntegrityCheck();
2418 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
2419 BackendInvalidEntry11(false);
2422 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
2423 BackendInvalidEntry11(true);
2426 // Tests handling of corrupt entries in the middle of a long eviction run.
2427 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2428 const int kSize = 0x3000; // 12 kB
2429 SetMaxSize(kSize * 10);
2430 InitCache();
2432 std::string first("some key");
2433 std::string second("something else");
2434 disk_cache::Entry* entry;
2435 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2436 entry->Close();
2437 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2439 // Corrupt this entry.
2440 disk_cache::EntryImpl* entry_impl =
2441 static_cast<disk_cache::EntryImpl*>(entry);
2443 entry_impl->entry()->Data()->state = 0xbad;
2444 entry_impl->entry()->Store();
2445 entry->Close();
2446 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2447 entry->Close();
2448 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
2449 TrimForTest(true);
2450 EXPECT_EQ(1, cache_->GetEntryCount());
2451 entry->Close();
2452 DisableIntegrityCheck();
2455 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
2456 BackendTrimInvalidEntry12();
2459 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
2460 SetNewEviction();
2461 BackendTrimInvalidEntry12();
2464 // We want to be able to deal with messed up entries on disk.
2465 void DiskCacheBackendTest::BackendInvalidRankings2() {
2466 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2467 DisableFirstCleanup();
2468 InitCache();
2470 disk_cache::Entry *entry1, *entry2;
2471 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
2472 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2));
2473 entry2->Close();
2475 // CheckCacheIntegrity will fail at this point.
2476 DisableIntegrityCheck();
2479 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
2480 BackendInvalidRankings2();
2483 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
2484 SetNewEviction();
2485 BackendInvalidRankings2();
2488 // If the LRU is corrupt, we delete the cache.
2489 void DiskCacheBackendTest::BackendInvalidRankings() {
2490 disk_cache::Entry* entry;
2491 scoped_ptr<TestIterator> iter = CreateIterator();
2492 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2493 entry->Close();
2494 EXPECT_EQ(2, cache_->GetEntryCount());
2496 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2497 FlushQueueForTest(); // Allow the restart to finish.
2498 EXPECT_EQ(0, cache_->GetEntryCount());
2501 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
2502 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2503 DisableFirstCleanup();
2504 InitCache();
2505 BackendInvalidRankings();
2508 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
2509 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2510 DisableFirstCleanup();
2511 SetNewEviction();
2512 InitCache();
2513 BackendInvalidRankings();
2516 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
2517 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2518 DisableFirstCleanup();
2519 InitCache();
2520 SetTestMode(); // Fail cache reinitialization.
2521 BackendInvalidRankings();
2524 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
2525 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2526 DisableFirstCleanup();
2527 SetNewEviction();
2528 InitCache();
2529 SetTestMode(); // Fail cache reinitialization.
2530 BackendInvalidRankings();
2533 // If the LRU is corrupt and we have open entries, we disable the cache.
2534 void DiskCacheBackendTest::BackendDisable() {
2535 disk_cache::Entry *entry1, *entry2;
2536 scoped_ptr<TestIterator> iter = CreateIterator();
2537 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2539 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2540 EXPECT_EQ(0, cache_->GetEntryCount());
2541 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2543 entry1->Close();
2544 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2545 FlushQueueForTest(); // This one actually allows that task to complete.
2547 EXPECT_EQ(0, cache_->GetEntryCount());
2550 TEST_F(DiskCacheBackendTest, DisableSuccess) {
2551 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2552 DisableFirstCleanup();
2553 InitCache();
2554 BackendDisable();
2557 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
2558 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2559 DisableFirstCleanup();
2560 SetNewEviction();
2561 InitCache();
2562 BackendDisable();
2565 TEST_F(DiskCacheBackendTest, DisableFailure) {
2566 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2567 DisableFirstCleanup();
2568 InitCache();
2569 SetTestMode(); // Fail cache reinitialization.
2570 BackendDisable();
2573 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
2574 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2575 DisableFirstCleanup();
2576 SetNewEviction();
2577 InitCache();
2578 SetTestMode(); // Fail cache reinitialization.
2579 BackendDisable();
2582 // This is another type of corruption on the LRU; disable the cache.
2583 void DiskCacheBackendTest::BackendDisable2() {
2584 EXPECT_EQ(8, cache_->GetEntryCount());
2586 disk_cache::Entry* entry;
2587 scoped_ptr<TestIterator> iter = CreateIterator();
2588 int count = 0;
2589 while (iter->OpenNextEntry(&entry) == net::OK) {
2590 ASSERT_TRUE(NULL != entry);
2591 entry->Close();
2592 count++;
2593 ASSERT_LT(count, 9);
2596 FlushQueueForTest();
2597 EXPECT_EQ(0, cache_->GetEntryCount());
2600 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
2601 ASSERT_TRUE(CopyTestCache("list_loop"));
2602 DisableFirstCleanup();
2603 InitCache();
2604 BackendDisable2();
2607 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
2608 ASSERT_TRUE(CopyTestCache("list_loop"));
2609 DisableFirstCleanup();
2610 SetNewEviction();
2611 InitCache();
2612 BackendDisable2();
2615 TEST_F(DiskCacheBackendTest, DisableFailure2) {
2616 ASSERT_TRUE(CopyTestCache("list_loop"));
2617 DisableFirstCleanup();
2618 InitCache();
2619 SetTestMode(); // Fail cache reinitialization.
2620 BackendDisable2();
2623 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
2624 ASSERT_TRUE(CopyTestCache("list_loop"));
2625 DisableFirstCleanup();
2626 SetNewEviction();
2627 InitCache();
2628 SetTestMode(); // Fail cache reinitialization.
2629 BackendDisable2();
2632 // If the index size changes when we disable the cache, we should not crash.
2633 void DiskCacheBackendTest::BackendDisable3() {
2634 disk_cache::Entry *entry1, *entry2;
2635 scoped_ptr<TestIterator> iter = CreateIterator();
2636 EXPECT_EQ(2, cache_->GetEntryCount());
2637 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2638 entry1->Close();
2640 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2641 FlushQueueForTest();
2643 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2));
2644 entry2->Close();
2646 EXPECT_EQ(1, cache_->GetEntryCount());
2649 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
2650 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2651 DisableFirstCleanup();
2652 SetMaxSize(20 * 1024 * 1024);
2653 InitCache();
2654 BackendDisable3();
2657 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
2658 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2659 DisableFirstCleanup();
2660 SetMaxSize(20 * 1024 * 1024);
2661 SetNewEviction();
2662 InitCache();
2663 BackendDisable3();
2666 // If we disable the cache, already open entries should work as far as possible.
2667 void DiskCacheBackendTest::BackendDisable4() {
2668 disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
2669 scoped_ptr<TestIterator> iter = CreateIterator();
2670 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2672 char key2[2000];
2673 char key3[20000];
2674 CacheTestFillBuffer(key2, sizeof(key2), true);
2675 CacheTestFillBuffer(key3, sizeof(key3), true);
2676 key2[sizeof(key2) - 1] = '\0';
2677 key3[sizeof(key3) - 1] = '\0';
2678 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2679 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3));
2681 const int kBufSize = 20000;
2682 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
2683 memset(buf->data(), 0, kBufSize);
2684 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2685 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2687 // This line should disable the cache but not delete it.
2688 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry4));
2689 EXPECT_EQ(0, cache_->GetEntryCount());
2691 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
2693 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
2694 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2695 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
2697 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
2698 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2699 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
2701 std::string key = entry2->GetKey();
2702 EXPECT_EQ(sizeof(key2) - 1, key.size());
2703 key = entry3->GetKey();
2704 EXPECT_EQ(sizeof(key3) - 1, key.size());
2706 entry1->Close();
2707 entry2->Close();
2708 entry3->Close();
2709 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2710 FlushQueueForTest(); // This one actually allows that task to complete.
2712 EXPECT_EQ(0, cache_->GetEntryCount());
2715 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
2716 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2717 DisableFirstCleanup();
2718 InitCache();
2719 BackendDisable4();
2722 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
2723 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2724 DisableFirstCleanup();
2725 SetNewEviction();
2726 InitCache();
2727 BackendDisable4();
2730 // Tests the exposed API with a disabled cache.
2731 void DiskCacheBackendTest::BackendDisabledAPI() {
2732 cache_impl_->SetUnitTestMode(); // Simulate failure restarting the cache.
2734 disk_cache::Entry* entry1, *entry2;
2735 scoped_ptr<TestIterator> iter = CreateIterator();
2736 EXPECT_EQ(2, cache_->GetEntryCount());
2737 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2738 entry1->Close();
2739 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2740 FlushQueueForTest();
2741 // The cache should be disabled.
2743 EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
2744 EXPECT_EQ(0, cache_->GetEntryCount());
2745 EXPECT_NE(net::OK, OpenEntry("First", &entry2));
2746 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2747 EXPECT_NE(net::OK, DoomEntry("First"));
2748 EXPECT_NE(net::OK, DoomAllEntries());
2749 EXPECT_NE(net::OK, DoomEntriesBetween(Time(), Time::Now()));
2750 EXPECT_NE(net::OK, DoomEntriesSince(Time()));
2751 iter = CreateIterator();
2752 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2754 base::StringPairs stats;
2755 cache_->GetStats(&stats);
2756 EXPECT_TRUE(stats.empty());
2757 cache_->OnExternalCacheHit("First");
2760 TEST_F(DiskCacheBackendTest, DisabledAPI) {
2761 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2762 DisableFirstCleanup();
2763 InitCache();
2764 BackendDisabledAPI();
2767 TEST_F(DiskCacheBackendTest, NewEvictionDisabledAPI) {
2768 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2769 DisableFirstCleanup();
2770 SetNewEviction();
2771 InitCache();
2772 BackendDisabledAPI();
2775 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
2776 MessageLoopHelper helper;
2778 ASSERT_TRUE(CleanupCacheDir());
2779 scoped_ptr<disk_cache::BackendImpl> cache;
2780 cache.reset(new disk_cache::BackendImpl(
2781 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
2782 ASSERT_TRUE(NULL != cache.get());
2783 cache->SetUnitTestMode();
2784 ASSERT_EQ(net::OK, cache->SyncInit());
2786 // Wait for a callback that never comes... about 2 secs :). The message loop
2787 // has to run to allow invocation of the usage timer.
2788 helper.WaitUntilCacheIoFinished(1);
2791 TEST_F(DiskCacheBackendTest, TimerNotCreated) {
2792 ASSERT_TRUE(CopyTestCache("wrong_version"));
2794 scoped_ptr<disk_cache::BackendImpl> cache;
2795 cache.reset(new disk_cache::BackendImpl(
2796 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
2797 ASSERT_TRUE(NULL != cache.get());
2798 cache->SetUnitTestMode();
2799 ASSERT_NE(net::OK, cache->SyncInit());
2801 ASSERT_TRUE(NULL == cache->GetTimerForTest());
2803 DisableIntegrityCheck();
2806 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
2807 InitCache();
2808 disk_cache::Entry* entry;
2809 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
2810 entry->Close();
2811 FlushQueueForTest();
2813 disk_cache::StatsItems stats;
2814 cache_->GetStats(&stats);
2815 EXPECT_FALSE(stats.empty());
2817 disk_cache::StatsItems::value_type hits("Create hit", "0x1");
2818 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2820 cache_.reset();
2822 // Now open the cache and verify that the stats are still there.
2823 DisableFirstCleanup();
2824 InitCache();
2825 EXPECT_EQ(1, cache_->GetEntryCount());
2827 stats.clear();
2828 cache_->GetStats(&stats);
2829 EXPECT_FALSE(stats.empty());
2831 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2834 void DiskCacheBackendTest::BackendDoomAll() {
2835 InitCache();
2837 disk_cache::Entry *entry1, *entry2;
2838 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
2839 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
2840 entry1->Close();
2841 entry2->Close();
2843 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2844 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2846 ASSERT_EQ(4, cache_->GetEntryCount());
2847 EXPECT_EQ(net::OK, DoomAllEntries());
2848 ASSERT_EQ(0, cache_->GetEntryCount());
2850 // We should stop posting tasks at some point (if we post any).
2851 base::MessageLoop::current()->RunUntilIdle();
2853 disk_cache::Entry *entry3, *entry4;
2854 EXPECT_NE(net::OK, OpenEntry("third", &entry3));
2855 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
2856 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
2858 EXPECT_EQ(net::OK, DoomAllEntries());
2859 ASSERT_EQ(0, cache_->GetEntryCount());
2861 entry1->Close();
2862 entry2->Close();
2863 entry3->Doom(); // The entry should be already doomed, but this must work.
2864 entry3->Close();
2865 entry4->Close();
2867 // Now try with all references released.
2868 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2869 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2870 entry1->Close();
2871 entry2->Close();
2873 ASSERT_EQ(2, cache_->GetEntryCount());
2874 EXPECT_EQ(net::OK, DoomAllEntries());
2875 ASSERT_EQ(0, cache_->GetEntryCount());
2877 EXPECT_EQ(net::OK, DoomAllEntries());
2880 TEST_F(DiskCacheBackendTest, DoomAll) {
2881 BackendDoomAll();
2884 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
2885 SetNewEviction();
2886 BackendDoomAll();
2889 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
2890 SetMemoryOnlyMode();
2891 BackendDoomAll();
2894 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
2895 SetCacheType(net::APP_CACHE);
2896 BackendDoomAll();
2899 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
2900 SetCacheType(net::SHADER_CACHE);
2901 BackendDoomAll();
2904 // If the index size changes when we doom the cache, we should not crash.
2905 void DiskCacheBackendTest::BackendDoomAll2() {
2906 EXPECT_EQ(2, cache_->GetEntryCount());
2907 EXPECT_EQ(net::OK, DoomAllEntries());
2909 disk_cache::Entry* entry;
2910 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry));
2911 entry->Close();
2913 EXPECT_EQ(1, cache_->GetEntryCount());
2916 TEST_F(DiskCacheBackendTest, DoomAll2) {
2917 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2918 DisableFirstCleanup();
2919 SetMaxSize(20 * 1024 * 1024);
2920 InitCache();
2921 BackendDoomAll2();
2924 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
2925 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2926 DisableFirstCleanup();
2927 SetMaxSize(20 * 1024 * 1024);
2928 SetNewEviction();
2929 InitCache();
2930 BackendDoomAll2();
2933 // We should be able to create the same entry on multiple simultaneous instances
2934 // of the cache.
2935 TEST_F(DiskCacheTest, MultipleInstances) {
2936 base::ScopedTempDir store1, store2;
2937 ASSERT_TRUE(store1.CreateUniqueTempDir());
2938 ASSERT_TRUE(store2.CreateUniqueTempDir());
2940 base::Thread cache_thread("CacheThread");
2941 ASSERT_TRUE(cache_thread.StartWithOptions(
2942 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
2943 net::TestCompletionCallback cb;
2945 const int kNumberOfCaches = 2;
2946 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
2948 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
2949 net::CACHE_BACKEND_DEFAULT,
2950 store1.path(),
2952 false,
2953 cache_thread.task_runner(),
2954 NULL,
2955 &cache[0],
2956 cb.callback());
2957 ASSERT_EQ(net::OK, cb.GetResult(rv));
2958 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE,
2959 net::CACHE_BACKEND_DEFAULT,
2960 store2.path(),
2962 false,
2963 cache_thread.task_runner(),
2964 NULL,
2965 &cache[1],
2966 cb.callback());
2967 ASSERT_EQ(net::OK, cb.GetResult(rv));
2969 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
2971 std::string key("the first key");
2972 disk_cache::Entry* entry;
2973 for (int i = 0; i < kNumberOfCaches; i++) {
2974 rv = cache[i]->CreateEntry(key, &entry, cb.callback());
2975 ASSERT_EQ(net::OK, cb.GetResult(rv));
2976 entry->Close();
2980 // Test the six regions of the curve that determines the max cache size.
2981 TEST_F(DiskCacheTest, AutomaticMaxSize) {
2982 using disk_cache::kDefaultCacheSize;
2983 int64 large_size = kDefaultCacheSize;
2985 // Region 1: expected = available * 0.8
2986 EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
2987 disk_cache::PreferredCacheSize(large_size - 1));
2988 EXPECT_EQ(kDefaultCacheSize * 8 / 10,
2989 disk_cache::PreferredCacheSize(large_size));
2990 EXPECT_EQ(kDefaultCacheSize - 1,
2991 disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
2993 // Region 2: expected = default_size
2994 EXPECT_EQ(kDefaultCacheSize,
2995 disk_cache::PreferredCacheSize(large_size * 10 / 8));
2996 EXPECT_EQ(kDefaultCacheSize,
2997 disk_cache::PreferredCacheSize(large_size * 10 - 1));
2999 // Region 3: expected = available * 0.1
3000 EXPECT_EQ(kDefaultCacheSize,
3001 disk_cache::PreferredCacheSize(large_size * 10));
3002 EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
3003 disk_cache::PreferredCacheSize(large_size * 25 - 1));
3005 // Region 4: expected = default_size * 2.5
3006 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3007 disk_cache::PreferredCacheSize(large_size * 25));
3008 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3009 disk_cache::PreferredCacheSize(large_size * 100 - 1));
3010 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3011 disk_cache::PreferredCacheSize(large_size * 100));
3012 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3013 disk_cache::PreferredCacheSize(large_size * 250 - 1));
3015 // Region 5: expected = available * 0.1
3016 int64 largest_size = kDefaultCacheSize * 4;
3017 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3018 disk_cache::PreferredCacheSize(large_size * 250));
3019 EXPECT_EQ(largest_size - 1,
3020 disk_cache::PreferredCacheSize(largest_size * 100 - 1));
3022 // Region 6: expected = largest possible size
3023 EXPECT_EQ(largest_size,
3024 disk_cache::PreferredCacheSize(largest_size * 100));
3025 EXPECT_EQ(largest_size,
3026 disk_cache::PreferredCacheSize(largest_size * 10000));
3029 // Tests that we can "migrate" a running instance from one experiment group to
3030 // another.
3031 TEST_F(DiskCacheBackendTest, Histograms) {
3032 InitCache();
3033 disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro.
3035 for (int i = 1; i < 3; i++) {
3036 CACHE_UMA(HOURS, "FillupTime", i, 28);
3040 // Make sure that we keep the total memory used by the internal buffers under
3041 // control.
3042 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
3043 InitCache();
3044 std::string key("the first key");
3045 disk_cache::Entry* entry;
3046 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3048 const int kSize = 200;
3049 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3050 CacheTestFillBuffer(buffer->data(), kSize, true);
3052 for (int i = 0; i < 10; i++) {
3053 SCOPED_TRACE(i);
3054 // Allocate 2MB for this entry.
3055 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
3056 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
3057 EXPECT_EQ(kSize,
3058 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
3059 EXPECT_EQ(kSize,
3060 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
3062 // Delete one of the buffers and truncate the other.
3063 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
3064 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
3066 // Delete the second buffer, writing 10 bytes to disk.
3067 entry->Close();
3068 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3071 entry->Close();
3072 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
3075 // This test assumes at least 150MB of system memory.
3076 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
3077 InitCache();
3079 const int kOneMB = 1024 * 1024;
3080 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3081 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3083 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3084 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3086 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3087 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3089 cache_impl_->BufferDeleted(kOneMB);
3090 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3092 // Check the upper limit.
3093 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3095 for (int i = 0; i < 30; i++)
3096 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
3098 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3101 // Tests that sharing of external files works and we are able to delete the
3102 // files when we need to.
3103 TEST_F(DiskCacheBackendTest, FileSharing) {
3104 InitCache();
3106 disk_cache::Addr address(0x80000001);
3107 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3108 base::FilePath name = cache_impl_->GetFileName(address);
3110 scoped_refptr<disk_cache::File> file(new disk_cache::File(false));
3111 file->Init(name);
3113 #if defined(OS_WIN)
3114 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3115 DWORD access = GENERIC_READ | GENERIC_WRITE;
3116 base::win::ScopedHandle file2(CreateFile(
3117 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
3118 EXPECT_FALSE(file2.IsValid());
3120 sharing |= FILE_SHARE_DELETE;
3121 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL,
3122 OPEN_EXISTING, 0, NULL));
3123 EXPECT_TRUE(file2.IsValid());
3124 #endif
3126 EXPECT_TRUE(base::DeleteFile(name, false));
3128 // We should be able to use the file.
3129 const int kSize = 200;
3130 char buffer1[kSize];
3131 char buffer2[kSize];
3132 memset(buffer1, 't', kSize);
3133 memset(buffer2, 0, kSize);
3134 EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3135 EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3136 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3138 EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
3141 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3142 InitCache();
3144 disk_cache::Entry* entry;
3146 for (int i = 0; i < 2; ++i) {
3147 std::string key = base::StringPrintf("key%d", i);
3148 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3149 entry->Close();
3152 // Ping the oldest entry.
3153 cache_->OnExternalCacheHit("key0");
3155 TrimForTest(false);
3157 // Make sure the older key remains.
3158 EXPECT_EQ(1, cache_->GetEntryCount());
3159 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3160 entry->Close();
3163 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3164 SetCacheType(net::SHADER_CACHE);
3165 InitCache();
3167 disk_cache::Entry* entry;
3169 for (int i = 0; i < 2; ++i) {
3170 std::string key = base::StringPrintf("key%d", i);
3171 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3172 entry->Close();
3175 // Ping the oldest entry.
3176 cache_->OnExternalCacheHit("key0");
3178 TrimForTest(false);
3180 // Make sure the older key remains.
3181 EXPECT_EQ(1, cache_->GetEntryCount());
3182 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3183 entry->Close();
3186 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3187 SetCacheType(net::APP_CACHE);
3188 SetSimpleCacheMode();
3189 BackendShutdownWithPendingCreate(false);
3192 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3193 SetCacheType(net::APP_CACHE);
3194 SetSimpleCacheMode();
3195 BackendShutdownWithPendingFileIO(false);
3198 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3199 SetSimpleCacheMode();
3200 BackendBasics();
3203 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3204 SetCacheType(net::APP_CACHE);
3205 SetSimpleCacheMode();
3206 BackendBasics();
3209 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3210 SetSimpleCacheMode();
3211 BackendKeying();
3214 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3215 SetSimpleCacheMode();
3216 SetCacheType(net::APP_CACHE);
3217 BackendKeying();
3220 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) {
3221 SetSimpleCacheMode();
3222 BackendSetSize();
3225 // MacOS has a default open file limit of 256 files, which is incompatible with
3226 // this simple cache test.
3227 #if defined(OS_MACOSX)
3228 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3229 #else
3230 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3231 #endif
3233 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) {
3234 SetMaxSize(0x100000);
3235 SetSimpleCacheMode();
3236 BackendLoad();
3239 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) {
3240 SetCacheType(net::APP_CACHE);
3241 SetSimpleCacheMode();
3242 SetMaxSize(0x100000);
3243 BackendLoad();
3246 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
3247 SetSimpleCacheMode();
3248 BackendDoomRecent();
3251 // crbug.com/330926, crbug.com/370677
3252 TEST_F(DiskCacheBackendTest, DISABLED_SimpleDoomBetween) {
3253 SetSimpleCacheMode();
3254 BackendDoomBetween();
3257 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
3258 SetSimpleCacheMode();
3259 BackendDoomAll();
3262 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
3263 SetCacheType(net::APP_CACHE);
3264 SetSimpleCacheMode();
3265 BackendDoomAll();
3268 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
3269 SetSimpleCacheMode();
3270 InitCache();
3272 const char key[] = "the first key";
3273 disk_cache::Entry* entry = NULL;
3275 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3276 ASSERT_TRUE(entry != NULL);
3277 entry->Close();
3278 entry = NULL;
3280 // To make sure the file creation completed we need to call open again so that
3281 // we block until it actually created the files.
3282 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3283 ASSERT_TRUE(entry != NULL);
3284 entry->Close();
3285 entry = NULL;
3287 // Delete one of the files in the entry.
3288 base::FilePath to_delete_file = cache_path_.AppendASCII(
3289 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3290 EXPECT_TRUE(base::PathExists(to_delete_file));
3291 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
3293 // Failing to open the entry should delete the rest of these files.
3294 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3296 // Confirm the rest of the files are gone.
3297 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
3298 base::FilePath should_be_gone_file(cache_path_.AppendASCII(
3299 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
3300 EXPECT_FALSE(base::PathExists(should_be_gone_file));
3304 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
3305 SetSimpleCacheMode();
3306 InitCache();
3308 const char key[] = "the first key";
3309 disk_cache::Entry* entry = NULL;
3311 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3312 disk_cache::Entry* null = NULL;
3313 ASSERT_NE(null, entry);
3314 entry->Close();
3315 entry = NULL;
3317 // To make sure the file creation completed we need to call open again so that
3318 // we block until it actually created the files.
3319 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3320 ASSERT_NE(null, entry);
3321 entry->Close();
3322 entry = NULL;
3324 // The entry is being closed on the Simple Cache worker pool
3325 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
3326 base::RunLoop().RunUntilIdle();
3328 // Write an invalid header for stream 0 and stream 1.
3329 base::FilePath entry_file1_path = cache_path_.AppendASCII(
3330 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3332 disk_cache::SimpleFileHeader header;
3333 header.initial_magic_number = GG_UINT64_C(0xbadf00d);
3334 EXPECT_EQ(
3335 implicit_cast<int>(sizeof(header)),
3336 base::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
3337 sizeof(header)));
3338 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3341 // Tests that the Simple Cache Backend fails to initialize with non-matching
3342 // file structure on disk.
3343 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
3344 // Create a cache structure with the |BackendImpl|.
3345 InitCache();
3346 disk_cache::Entry* entry;
3347 const int kSize = 50;
3348 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3349 CacheTestFillBuffer(buffer->data(), kSize, false);
3350 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3351 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3352 entry->Close();
3353 cache_.reset();
3355 // Check that the |SimpleBackendImpl| does not favor this structure.
3356 base::Thread cache_thread("CacheThread");
3357 ASSERT_TRUE(cache_thread.StartWithOptions(
3358 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3359 disk_cache::SimpleBackendImpl* simple_cache =
3360 new disk_cache::SimpleBackendImpl(
3361 cache_path_, 0, net::DISK_CACHE, cache_thread.task_runner(), NULL);
3362 net::TestCompletionCallback cb;
3363 int rv = simple_cache->Init(cb.callback());
3364 EXPECT_NE(net::OK, cb.GetResult(rv));
3365 delete simple_cache;
3366 DisableIntegrityCheck();
3369 // Tests that the |BackendImpl| refuses to initialize on top of the files
3370 // generated by the Simple Cache Backend.
3371 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
3372 // Create a cache structure with the |SimpleBackendImpl|.
3373 SetSimpleCacheMode();
3374 InitCache();
3375 disk_cache::Entry* entry;
3376 const int kSize = 50;
3377 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3378 CacheTestFillBuffer(buffer->data(), kSize, false);
3379 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3380 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3381 entry->Close();
3382 cache_.reset();
3384 // Check that the |BackendImpl| does not favor this structure.
3385 base::Thread cache_thread("CacheThread");
3386 ASSERT_TRUE(cache_thread.StartWithOptions(
3387 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3388 disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(
3389 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL);
3390 cache->SetUnitTestMode();
3391 net::TestCompletionCallback cb;
3392 int rv = cache->Init(cb.callback());
3393 EXPECT_NE(net::OK, cb.GetResult(rv));
3394 delete cache;
3395 DisableIntegrityCheck();
3398 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
3399 SetSimpleCacheMode();
3400 BackendFixEnumerators();
3403 // Tests basic functionality of the SimpleBackend implementation of the
3404 // enumeration API.
3405 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
3406 SetSimpleCacheMode();
3407 InitCache();
3408 std::set<std::string> key_pool;
3409 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3411 // Check that enumeration returns all entries.
3412 std::set<std::string> keys_to_match(key_pool);
3413 scoped_ptr<TestIterator> iter = CreateIterator();
3414 size_t count = 0;
3415 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3416 iter.reset();
3417 EXPECT_EQ(key_pool.size(), count);
3418 EXPECT_TRUE(keys_to_match.empty());
3420 // Check that opening entries does not affect enumeration.
3421 keys_to_match = key_pool;
3422 iter = CreateIterator();
3423 count = 0;
3424 disk_cache::Entry* entry_opened_before;
3425 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
3426 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3427 iter.get(),
3428 &keys_to_match,
3429 &count));
3431 disk_cache::Entry* entry_opened_middle;
3432 ASSERT_EQ(net::OK,
3433 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
3434 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3435 iter.reset();
3436 entry_opened_before->Close();
3437 entry_opened_middle->Close();
3439 EXPECT_EQ(key_pool.size(), count);
3440 EXPECT_TRUE(keys_to_match.empty());
3443 // Tests that the enumerations are not affected by dooming an entry in the
3444 // middle.
3445 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
3446 SetSimpleCacheMode();
3447 InitCache();
3448 std::set<std::string> key_pool;
3449 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3451 // Check that enumeration returns all entries but the doomed one.
3452 std::set<std::string> keys_to_match(key_pool);
3453 scoped_ptr<TestIterator> iter = CreateIterator();
3454 size_t count = 0;
3455 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3456 iter.get(),
3457 &keys_to_match,
3458 &count));
3460 std::string key_to_delete = *(keys_to_match.begin());
3461 DoomEntry(key_to_delete);
3462 keys_to_match.erase(key_to_delete);
3463 key_pool.erase(key_to_delete);
3464 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3465 iter.reset();
3467 EXPECT_EQ(key_pool.size(), count);
3468 EXPECT_TRUE(keys_to_match.empty());
3471 // Tests that enumerations are not affected by corrupt files.
3472 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
3473 SetSimpleCacheMode();
3474 InitCache();
3475 std::set<std::string> key_pool;
3476 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3478 // Create a corrupt entry. The write/read sequence ensures that the entry will
3479 // have been created before corrupting the platform files, in the case of
3480 // optimistic operations.
3481 const std::string key = "the key";
3482 disk_cache::Entry* corrupted_entry;
3484 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
3485 ASSERT_TRUE(corrupted_entry);
3486 const int kSize = 50;
3487 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3488 CacheTestFillBuffer(buffer->data(), kSize, false);
3489 ASSERT_EQ(kSize,
3490 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
3491 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
3492 corrupted_entry->Close();
3494 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3495 key, cache_path_));
3496 EXPECT_EQ(key_pool.size() + 1,
3497 implicit_cast<size_t>(cache_->GetEntryCount()));
3499 // Check that enumeration returns all entries but the corrupt one.
3500 std::set<std::string> keys_to_match(key_pool);
3501 scoped_ptr<TestIterator> iter = CreateIterator();
3502 size_t count = 0;
3503 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3504 iter.reset();
3506 EXPECT_EQ(key_pool.size(), count);
3507 EXPECT_TRUE(keys_to_match.empty());
3510 // Tests that enumerations don't leak memory when the backend is destructed
3511 // mid-enumeration.
3512 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) {
3513 SetSimpleCacheMode();
3514 InitCache();
3515 std::set<std::string> key_pool;
3516 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3518 scoped_ptr<TestIterator> iter = CreateIterator();
3519 disk_cache::Entry* entry = NULL;
3520 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
3521 EXPECT_TRUE(entry);
3522 disk_cache::ScopedEntryPtr entry_closer(entry);
3524 cache_.reset();
3525 // This test passes if we don't leak memory.
3528 // Tests that a SimpleCache doesn't crash when files are deleted very quickly
3529 // after closing.
3530 // NOTE: IF THIS TEST IS FLAKY THEN IT IS FAILING. See https://crbug.com/416940
3531 TEST_F(DiskCacheBackendTest, SimpleCacheDeleteQuickly) {
3532 SetSimpleCacheMode();
3533 for (int i = 0; i < 100; ++i) {
3534 InitCache();
3535 cache_.reset();
3536 EXPECT_TRUE(CleanupCacheDir());