[Cronet] Delay StartNetLog and StopNetLog until native request context is initialized
[chromium-blink-merge.git] / net / disk_cache / backend_unittest.cc
blob5536f3774879d71e89851375a5c2030f98b7c4be
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/files/file_util.h"
7 #include "base/metrics/field_trial.h"
8 #include "base/port.h"
9 #include "base/run_loop.h"
10 #include "base/strings/string_util.h"
11 #include "base/strings/stringprintf.h"
12 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
13 #include "base/thread_task_runner_handle.h"
14 #include "base/threading/platform_thread.h"
15 #include "base/threading/thread_restrictions.h"
16 #include "net/base/cache_type.h"
17 #include "net/base/io_buffer.h"
18 #include "net/base/net_errors.h"
19 #include "net/base/test_completion_callback.h"
20 #include "net/disk_cache/blockfile/backend_impl.h"
21 #include "net/disk_cache/blockfile/entry_impl.h"
22 #include "net/disk_cache/blockfile/experiments.h"
23 #include "net/disk_cache/blockfile/histogram_macros.h"
24 #include "net/disk_cache/blockfile/mapped_file.h"
25 #include "net/disk_cache/cache_util.h"
26 #include "net/disk_cache/disk_cache_test_base.h"
27 #include "net/disk_cache/disk_cache_test_util.h"
28 #include "net/disk_cache/memory/mem_backend_impl.h"
29 #include "net/disk_cache/simple/simple_backend_impl.h"
30 #include "net/disk_cache/simple/simple_entry_format.h"
31 #include "net/disk_cache/simple/simple_test_util.h"
32 #include "net/disk_cache/simple/simple_util.h"
33 #include "testing/gtest/include/gtest/gtest.h"
35 #if defined(OS_WIN)
36 #include "base/win/scoped_handle.h"
37 #endif
39 // Provide a BackendImpl object to macros from histogram_macros.h.
40 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
42 using base::Time;
44 namespace {
46 const char kExistingEntryKey[] = "existing entry key";
48 scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
49 const base::Thread& cache_thread,
50 base::FilePath& cache_path) {
51 net::TestCompletionCallback cb;
53 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
54 cache_path, cache_thread.message_loop_proxy(), NULL));
55 int rv = cache->Init(cb.callback());
56 if (cb.GetResult(rv) != net::OK)
57 return scoped_ptr<disk_cache::BackendImpl>();
59 disk_cache::Entry* entry = NULL;
60 rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
61 if (cb.GetResult(rv) != net::OK)
62 return scoped_ptr<disk_cache::BackendImpl>();
63 entry->Close();
65 return cache.Pass();
68 } // namespace
70 // Tests that can run with different types of caches.
71 class DiskCacheBackendTest : public DiskCacheTestWithCache {
72 protected:
73 // Some utility methods:
75 // Perform IO operations on the cache until there is pending IO.
76 int GeneratePendingIO(net::TestCompletionCallback* cb);
78 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
79 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
80 // There are 4 entries after doomed_start and 2 after doomed_end.
81 void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
83 bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
84 bool EnumerateAndMatchKeys(int max_to_open,
85 TestIterator* iter,
86 std::set<std::string>* keys_to_match,
87 size_t* count);
89 // Actual tests:
90 void BackendBasics();
91 void BackendKeying();
92 void BackendShutdownWithPendingFileIO(bool fast);
93 void BackendShutdownWithPendingIO(bool fast);
94 void BackendShutdownWithPendingCreate(bool fast);
95 void BackendSetSize();
96 void BackendLoad();
97 void BackendChain();
98 void BackendValidEntry();
99 void BackendInvalidEntry();
100 void BackendInvalidEntryRead();
101 void BackendInvalidEntryWithLoad();
102 void BackendTrimInvalidEntry();
103 void BackendTrimInvalidEntry2();
104 void BackendEnumerations();
105 void BackendEnumerations2();
106 void BackendInvalidEntryEnumeration();
107 void BackendFixEnumerators();
108 void BackendDoomRecent();
109 void BackendDoomBetween();
110 void BackendTransaction(const std::string& name, int num_entries, bool load);
111 void BackendRecoverInsert();
112 void BackendRecoverRemove();
113 void BackendRecoverWithEviction();
114 void BackendInvalidEntry2();
115 void BackendInvalidEntry3();
116 void BackendInvalidEntry7();
117 void BackendInvalidEntry8();
118 void BackendInvalidEntry9(bool eviction);
119 void BackendInvalidEntry10(bool eviction);
120 void BackendInvalidEntry11(bool eviction);
121 void BackendTrimInvalidEntry12();
122 void BackendDoomAll();
123 void BackendDoomAll2();
124 void BackendInvalidRankings();
125 void BackendInvalidRankings2();
126 void BackendDisable();
127 void BackendDisable2();
128 void BackendDisable3();
129 void BackendDisable4();
130 void BackendDisabledAPI();
133 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
134 if (!use_current_thread_) {
135 ADD_FAILURE();
136 return net::ERR_FAILED;
139 disk_cache::Entry* entry;
140 int rv = cache_->CreateEntry("some key", &entry, cb->callback());
141 if (cb->GetResult(rv) != net::OK)
142 return net::ERR_CACHE_CREATE_FAILURE;
144 const int kSize = 25000;
145 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
146 CacheTestFillBuffer(buffer->data(), kSize, false);
148 for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
149 // We are using the current thread as the cache thread because we want to
150 // be able to call directly this method to make sure that the OS (instead
151 // of us switching thread) is returning IO pending.
152 if (!simple_cache_mode_) {
153 rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
154 0, i, buffer.get(), kSize, cb->callback(), false);
155 } else {
156 rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
159 if (rv == net::ERR_IO_PENDING)
160 break;
161 if (rv != kSize)
162 rv = net::ERR_FAILED;
165 // Don't call Close() to avoid going through the queue or we'll deadlock
166 // waiting for the operation to finish.
167 if (!simple_cache_mode_)
168 static_cast<disk_cache::EntryImpl*>(entry)->Release();
169 else
170 entry->Close();
172 return rv;
175 void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
176 base::Time* doomed_end) {
177 InitCache();
179 const int kSize = 50;
180 // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
181 const int kOffset = 10 + 1024 * 1024;
183 disk_cache::Entry* entry0 = NULL;
184 disk_cache::Entry* entry1 = NULL;
185 disk_cache::Entry* entry2 = NULL;
187 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
188 CacheTestFillBuffer(buffer->data(), kSize, false);
190 ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
191 ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
192 ASSERT_EQ(kSize,
193 WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
194 entry0->Close();
196 FlushQueueForTest();
197 AddDelay();
198 if (doomed_start)
199 *doomed_start = base::Time::Now();
201 // Order in rankings list:
202 // first_part1, first_part2, second_part1, second_part2
203 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
204 ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
205 ASSERT_EQ(kSize,
206 WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
207 entry1->Close();
209 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
210 ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
211 ASSERT_EQ(kSize,
212 WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
213 entry2->Close();
215 FlushQueueForTest();
216 AddDelay();
217 if (doomed_end)
218 *doomed_end = base::Time::Now();
220 // Order in rankings list:
221 // third_part1, fourth_part1, third_part2, fourth_part2
222 disk_cache::Entry* entry3 = NULL;
223 disk_cache::Entry* entry4 = NULL;
224 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
225 ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
226 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
227 ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
228 ASSERT_EQ(kSize,
229 WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
230 ASSERT_EQ(kSize,
231 WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
232 entry3->Close();
233 entry4->Close();
235 FlushQueueForTest();
236 AddDelay();
239 // Creates entries based on random keys. Stores these keys in |key_pool|.
240 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
241 std::set<std::string>* key_pool) {
242 const int kNumEntries = 10;
244 for (int i = 0; i < kNumEntries; ++i) {
245 std::string key = GenerateKey(true);
246 disk_cache::Entry* entry;
247 if (CreateEntry(key, &entry) != net::OK)
248 return false;
249 key_pool->insert(key);
250 entry->Close();
252 return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
255 // Performs iteration over the backend and checks that the keys of entries
256 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
257 // will be opened, if it is positive. Otherwise, iteration will continue until
258 // OpenNextEntry stops returning net::OK.
259 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
260 int max_to_open,
261 TestIterator* iter,
262 std::set<std::string>* keys_to_match,
263 size_t* count) {
264 disk_cache::Entry* entry;
266 if (!iter)
267 return false;
268 while (iter->OpenNextEntry(&entry) == net::OK) {
269 if (!entry)
270 return false;
271 EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
272 entry->Close();
273 ++(*count);
274 if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
275 break;
278 return true;
281 void DiskCacheBackendTest::BackendBasics() {
282 InitCache();
283 disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
284 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
285 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
286 ASSERT_TRUE(NULL != entry1);
287 entry1->Close();
288 entry1 = NULL;
290 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
291 ASSERT_TRUE(NULL != entry1);
292 entry1->Close();
293 entry1 = NULL;
295 EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
296 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
297 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
298 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
299 ASSERT_TRUE(NULL != entry1);
300 ASSERT_TRUE(NULL != entry2);
301 EXPECT_EQ(2, cache_->GetEntryCount());
303 disk_cache::Entry* entry3 = NULL;
304 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3));
305 ASSERT_TRUE(NULL != entry3);
306 EXPECT_TRUE(entry2 == entry3);
307 EXPECT_EQ(2, cache_->GetEntryCount());
309 EXPECT_EQ(net::OK, DoomEntry("some other key"));
310 EXPECT_EQ(1, cache_->GetEntryCount());
311 entry1->Close();
312 entry2->Close();
313 entry3->Close();
315 EXPECT_EQ(net::OK, DoomEntry("the first key"));
316 EXPECT_EQ(0, cache_->GetEntryCount());
318 ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
319 ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
320 entry1->Doom();
321 entry1->Close();
322 EXPECT_EQ(net::OK, DoomEntry("some other key"));
323 EXPECT_EQ(0, cache_->GetEntryCount());
324 entry2->Close();
327 TEST_F(DiskCacheBackendTest, Basics) {
328 BackendBasics();
331 TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
332 SetNewEviction();
333 BackendBasics();
336 TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
337 SetMemoryOnlyMode();
338 BackendBasics();
341 TEST_F(DiskCacheBackendTest, AppCacheBasics) {
342 SetCacheType(net::APP_CACHE);
343 BackendBasics();
346 TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
347 SetCacheType(net::SHADER_CACHE);
348 BackendBasics();
351 void DiskCacheBackendTest::BackendKeying() {
352 InitCache();
353 const char kName1[] = "the first key";
354 const char kName2[] = "the first Key";
355 disk_cache::Entry *entry1, *entry2;
356 ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1));
358 ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2));
359 EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
360 entry2->Close();
362 char buffer[30];
363 base::strlcpy(buffer, kName1, arraysize(buffer));
364 ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2));
365 EXPECT_TRUE(entry1 == entry2);
366 entry2->Close();
368 base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
369 ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2));
370 EXPECT_TRUE(entry1 == entry2);
371 entry2->Close();
373 base::strlcpy(buffer + 3, kName1, arraysize(buffer) - 3);
374 ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2));
375 EXPECT_TRUE(entry1 == entry2);
376 entry2->Close();
378 // Now verify long keys.
379 char buffer2[20000];
380 memset(buffer2, 's', sizeof(buffer2));
381 buffer2[1023] = '\0';
382 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
383 entry2->Close();
385 buffer2[1023] = 'g';
386 buffer2[19999] = '\0';
387 ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
388 entry2->Close();
389 entry1->Close();
392 TEST_F(DiskCacheBackendTest, Keying) {
393 BackendKeying();
396 TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
397 SetNewEviction();
398 BackendKeying();
401 TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
402 SetMemoryOnlyMode();
403 BackendKeying();
406 TEST_F(DiskCacheBackendTest, AppCacheKeying) {
407 SetCacheType(net::APP_CACHE);
408 BackendKeying();
411 TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
412 SetCacheType(net::SHADER_CACHE);
413 BackendKeying();
416 TEST_F(DiskCacheTest, CreateBackend) {
417 net::TestCompletionCallback cb;
420 ASSERT_TRUE(CleanupCacheDir());
421 base::Thread cache_thread("CacheThread");
422 ASSERT_TRUE(cache_thread.StartWithOptions(
423 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
425 // Test the private factory method(s).
426 scoped_ptr<disk_cache::Backend> cache;
427 cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
428 ASSERT_TRUE(cache.get());
429 cache.reset();
431 // Now test the public API.
432 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
433 net::CACHE_BACKEND_DEFAULT,
434 cache_path_,
436 false,
437 cache_thread.task_runner(),
438 NULL,
439 &cache,
440 cb.callback());
441 ASSERT_EQ(net::OK, cb.GetResult(rv));
442 ASSERT_TRUE(cache.get());
443 cache.reset();
445 rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
446 net::CACHE_BACKEND_DEFAULT,
447 base::FilePath(), 0,
448 false, NULL, NULL, &cache,
449 cb.callback());
450 ASSERT_EQ(net::OK, cb.GetResult(rv));
451 ASSERT_TRUE(cache.get());
452 cache.reset();
455 base::MessageLoop::current()->RunUntilIdle();
458 // Tests that |BackendImpl| fails to initialize with a missing file.
459 TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
460 ASSERT_TRUE(CopyTestCache("bad_entry"));
461 base::FilePath filename = cache_path_.AppendASCII("data_1");
462 base::DeleteFile(filename, false);
463 base::Thread cache_thread("CacheThread");
464 ASSERT_TRUE(cache_thread.StartWithOptions(
465 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
466 net::TestCompletionCallback cb;
468 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
469 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
470 cache_path_, cache_thread.task_runner(), NULL));
471 int rv = cache->Init(cb.callback());
472 EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
473 base::ThreadRestrictions::SetIOAllowed(prev);
475 cache.reset();
476 DisableIntegrityCheck();
479 TEST_F(DiskCacheBackendTest, ExternalFiles) {
480 InitCache();
481 // First, let's create a file on the folder.
482 base::FilePath filename = cache_path_.AppendASCII("f_000001");
484 const int kSize = 50;
485 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
486 CacheTestFillBuffer(buffer1->data(), kSize, false);
487 ASSERT_EQ(kSize, base::WriteFile(filename, buffer1->data(), kSize));
489 // Now let's create a file with the cache.
490 disk_cache::Entry* entry;
491 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
492 ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
493 entry->Close();
495 // And verify that the first file is still there.
496 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
497 ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
498 EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
501 // Tests that we deal with file-level pending operations at destruction time.
502 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
503 ASSERT_TRUE(CleanupCacheDir());
504 uint32 flags = disk_cache::kNoBuffering;
505 if (!fast)
506 flags |= disk_cache::kNoRandom;
508 UseCurrentThread();
509 CreateBackend(flags, NULL);
511 net::TestCompletionCallback cb;
512 int rv = GeneratePendingIO(&cb);
514 // The cache destructor will see one pending operation here.
515 cache_.reset();
517 if (rv == net::ERR_IO_PENDING) {
518 if (fast || simple_cache_mode_)
519 EXPECT_FALSE(cb.have_result());
520 else
521 EXPECT_TRUE(cb.have_result());
524 base::MessageLoop::current()->RunUntilIdle();
526 #if !defined(OS_IOS)
527 // Wait for the actual operation to complete, or we'll keep a file handle that
528 // may cause issues later. Note that on iOS systems even though this test
529 // uses a single thread, the actual IO is posted to a worker thread and the
530 // cache destructor breaks the link to reach cb when the operation completes.
531 rv = cb.GetResult(rv);
532 #endif
535 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
536 BackendShutdownWithPendingFileIO(false);
539 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
540 // builds because they contain a lot of intentional memory leaks.
541 // The wrapper scripts used to run tests under Valgrind Memcheck will also
542 // disable these tests. See:
543 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
544 #if !defined(LEAK_SANITIZER)
545 // We'll be leaking from this test.
546 TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
547 // The integrity test sets kNoRandom so there's a version mismatch if we don't
548 // force new eviction.
549 SetNewEviction();
550 BackendShutdownWithPendingFileIO(true);
552 #endif
554 // See crbug.com/330074
555 #if !defined(OS_IOS)
556 // Tests that one cache instance is not affected by another one going away.
557 TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
558 base::ScopedTempDir store;
559 ASSERT_TRUE(store.CreateUniqueTempDir());
561 net::TestCompletionCallback cb;
562 scoped_ptr<disk_cache::Backend> extra_cache;
563 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
564 net::CACHE_BACKEND_DEFAULT,
565 store.path(),
567 false,
568 base::ThreadTaskRunnerHandle::Get(),
569 NULL,
570 &extra_cache,
571 cb.callback());
572 ASSERT_EQ(net::OK, cb.GetResult(rv));
573 ASSERT_TRUE(extra_cache.get() != NULL);
575 ASSERT_TRUE(CleanupCacheDir());
576 SetNewEviction(); // Match the expected behavior for integrity verification.
577 UseCurrentThread();
579 CreateBackend(disk_cache::kNoBuffering, NULL);
580 rv = GeneratePendingIO(&cb);
582 // cache_ has a pending operation, and extra_cache will go away.
583 extra_cache.reset();
585 if (rv == net::ERR_IO_PENDING)
586 EXPECT_FALSE(cb.have_result());
588 base::MessageLoop::current()->RunUntilIdle();
590 // Wait for the actual operation to complete, or we'll keep a file handle that
591 // may cause issues later.
592 rv = cb.GetResult(rv);
594 #endif
596 // Tests that we deal with background-thread pending operations.
597 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
598 net::TestCompletionCallback cb;
601 ASSERT_TRUE(CleanupCacheDir());
602 base::Thread cache_thread("CacheThread");
603 ASSERT_TRUE(cache_thread.StartWithOptions(
604 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
606 uint32 flags = disk_cache::kNoBuffering;
607 if (!fast)
608 flags |= disk_cache::kNoRandom;
610 CreateBackend(flags, &cache_thread);
612 disk_cache::Entry* entry;
613 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
614 ASSERT_EQ(net::OK, cb.GetResult(rv));
616 entry->Close();
618 // The cache destructor will see one pending operation here.
619 cache_.reset();
622 base::MessageLoop::current()->RunUntilIdle();
625 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
626 BackendShutdownWithPendingIO(false);
629 #if !defined(LEAK_SANITIZER)
630 // We'll be leaking from this test.
631 TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
632 // The integrity test sets kNoRandom so there's a version mismatch if we don't
633 // force new eviction.
634 SetNewEviction();
635 BackendShutdownWithPendingIO(true);
637 #endif
639 // Tests that we deal with create-type pending operations.
640 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
641 net::TestCompletionCallback cb;
644 ASSERT_TRUE(CleanupCacheDir());
645 base::Thread cache_thread("CacheThread");
646 ASSERT_TRUE(cache_thread.StartWithOptions(
647 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
649 disk_cache::BackendFlags flags =
650 fast ? disk_cache::kNone : disk_cache::kNoRandom;
651 CreateBackend(flags, &cache_thread);
653 disk_cache::Entry* entry;
654 int rv = cache_->CreateEntry("some key", &entry, cb.callback());
655 ASSERT_EQ(net::ERR_IO_PENDING, rv);
657 cache_.reset();
658 EXPECT_FALSE(cb.have_result());
661 base::MessageLoop::current()->RunUntilIdle();
664 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
665 BackendShutdownWithPendingCreate(false);
668 #if !defined(LEAK_SANITIZER)
669 // We'll be leaking an entry from this test.
670 TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
671 // The integrity test sets kNoRandom so there's a version mismatch if we don't
672 // force new eviction.
673 SetNewEviction();
674 BackendShutdownWithPendingCreate(true);
676 #endif
678 // Disabled on android since this test requires cache creator to create
679 // blockfile caches.
680 #if !defined(OS_ANDROID)
681 TEST_F(DiskCacheTest, TruncatedIndex) {
682 ASSERT_TRUE(CleanupCacheDir());
683 base::FilePath index = cache_path_.AppendASCII("index");
684 ASSERT_EQ(5, base::WriteFile(index, "hello", 5));
686 base::Thread cache_thread("CacheThread");
687 ASSERT_TRUE(cache_thread.StartWithOptions(
688 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
689 net::TestCompletionCallback cb;
691 scoped_ptr<disk_cache::Backend> backend;
692 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
693 net::CACHE_BACKEND_BLOCKFILE,
694 cache_path_,
696 false,
697 cache_thread.task_runner(),
698 NULL,
699 &backend,
700 cb.callback());
701 ASSERT_NE(net::OK, cb.GetResult(rv));
703 ASSERT_FALSE(backend);
705 #endif
707 void DiskCacheBackendTest::BackendSetSize() {
708 const int cache_size = 0x10000; // 64 kB
709 SetMaxSize(cache_size);
710 InitCache();
712 std::string first("some key");
713 std::string second("something else");
714 disk_cache::Entry* entry;
715 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
717 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
718 memset(buffer->data(), 0, cache_size);
719 EXPECT_EQ(cache_size / 10,
720 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
721 << "normal file";
723 EXPECT_EQ(net::ERR_FAILED,
724 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
725 << "file size above the limit";
727 // By doubling the total size, we make this file cacheable.
728 SetMaxSize(cache_size * 2);
729 EXPECT_EQ(cache_size / 5,
730 WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
732 // Let's fill up the cache!.
733 SetMaxSize(cache_size * 10);
734 EXPECT_EQ(cache_size * 3 / 4,
735 WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
736 entry->Close();
737 FlushQueueForTest();
739 SetMaxSize(cache_size);
741 // The cache is 95% full.
743 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
744 EXPECT_EQ(cache_size / 10,
745 WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
747 disk_cache::Entry* entry2;
748 ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
749 EXPECT_EQ(cache_size / 10,
750 WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
751 entry2->Close(); // This will trigger the cache trim.
753 EXPECT_NE(net::OK, OpenEntry(first, &entry2));
755 FlushQueueForTest(); // Make sure that we are done trimming the cache.
756 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
758 entry->Close();
759 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
760 EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
761 entry->Close();
764 TEST_F(DiskCacheBackendTest, SetSize) {
765 BackendSetSize();
768 TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
769 SetNewEviction();
770 BackendSetSize();
773 TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
774 SetMemoryOnlyMode();
775 BackendSetSize();
778 void DiskCacheBackendTest::BackendLoad() {
779 InitCache();
780 int seed = static_cast<int>(Time::Now().ToInternalValue());
781 srand(seed);
783 disk_cache::Entry* entries[100];
784 for (int i = 0; i < 100; i++) {
785 std::string key = GenerateKey(true);
786 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
788 EXPECT_EQ(100, cache_->GetEntryCount());
790 for (int i = 0; i < 100; i++) {
791 int source1 = rand() % 100;
792 int source2 = rand() % 100;
793 disk_cache::Entry* temp = entries[source1];
794 entries[source1] = entries[source2];
795 entries[source2] = temp;
798 for (int i = 0; i < 100; i++) {
799 disk_cache::Entry* entry;
800 ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry));
801 EXPECT_TRUE(entry == entries[i]);
802 entry->Close();
803 entries[i]->Doom();
804 entries[i]->Close();
806 FlushQueueForTest();
807 EXPECT_EQ(0, cache_->GetEntryCount());
810 TEST_F(DiskCacheBackendTest, Load) {
811 // Work with a tiny index table (16 entries)
812 SetMask(0xf);
813 SetMaxSize(0x100000);
814 BackendLoad();
817 TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
818 SetNewEviction();
819 // Work with a tiny index table (16 entries)
820 SetMask(0xf);
821 SetMaxSize(0x100000);
822 BackendLoad();
825 TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
826 SetMaxSize(0x100000);
827 SetMemoryOnlyMode();
828 BackendLoad();
831 TEST_F(DiskCacheBackendTest, AppCacheLoad) {
832 SetCacheType(net::APP_CACHE);
833 // Work with a tiny index table (16 entries)
834 SetMask(0xf);
835 SetMaxSize(0x100000);
836 BackendLoad();
839 TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
840 SetCacheType(net::SHADER_CACHE);
841 // Work with a tiny index table (16 entries)
842 SetMask(0xf);
843 SetMaxSize(0x100000);
844 BackendLoad();
847 // Tests the chaining of an entry to the current head.
848 void DiskCacheBackendTest::BackendChain() {
849 SetMask(0x1); // 2-entry table.
850 SetMaxSize(0x3000); // 12 kB.
851 InitCache();
853 disk_cache::Entry* entry;
854 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
855 entry->Close();
856 ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry));
857 entry->Close();
860 TEST_F(DiskCacheBackendTest, Chain) {
861 BackendChain();
864 TEST_F(DiskCacheBackendTest, NewEvictionChain) {
865 SetNewEviction();
866 BackendChain();
869 TEST_F(DiskCacheBackendTest, AppCacheChain) {
870 SetCacheType(net::APP_CACHE);
871 BackendChain();
874 TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
875 SetCacheType(net::SHADER_CACHE);
876 BackendChain();
879 TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
880 SetNewEviction();
881 InitCache();
883 disk_cache::Entry* entry;
884 for (int i = 0; i < 100; i++) {
885 std::string name(base::StringPrintf("Key %d", i));
886 ASSERT_EQ(net::OK, CreateEntry(name, &entry));
887 entry->Close();
888 if (i < 90) {
889 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
890 ASSERT_EQ(net::OK, OpenEntry(name, &entry));
891 entry->Close();
895 // The first eviction must come from list 1 (10% limit), the second must come
896 // from list 0.
897 TrimForTest(false);
898 EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
899 TrimForTest(false);
900 EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
902 // Double check that we still have the list tails.
903 ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry));
904 entry->Close();
905 ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry));
906 entry->Close();
909 // Before looking for invalid entries, let's check a valid entry.
910 void DiskCacheBackendTest::BackendValidEntry() {
911 InitCache();
913 std::string key("Some key");
914 disk_cache::Entry* entry;
915 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
917 const int kSize = 50;
918 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
919 memset(buffer1->data(), 0, kSize);
920 base::strlcpy(buffer1->data(), "And the data to save", kSize);
921 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
922 entry->Close();
923 SimulateCrash();
925 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
927 scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
928 memset(buffer2->data(), 0, kSize);
929 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
930 entry->Close();
931 EXPECT_STREQ(buffer1->data(), buffer2->data());
934 TEST_F(DiskCacheBackendTest, ValidEntry) {
935 BackendValidEntry();
938 TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
939 SetNewEviction();
940 BackendValidEntry();
943 // The same logic of the previous test (ValidEntry), but this time force the
944 // entry to be invalid, simulating a crash in the middle.
945 // We'll be leaking memory from this test.
946 void DiskCacheBackendTest::BackendInvalidEntry() {
947 InitCache();
949 std::string key("Some key");
950 disk_cache::Entry* entry;
951 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
953 const int kSize = 50;
954 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
955 memset(buffer->data(), 0, kSize);
956 base::strlcpy(buffer->data(), "And the data to save", kSize);
957 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
958 SimulateCrash();
960 EXPECT_NE(net::OK, OpenEntry(key, &entry));
961 EXPECT_EQ(0, cache_->GetEntryCount());
964 #if !defined(LEAK_SANITIZER)
965 // We'll be leaking memory from this test.
966 TEST_F(DiskCacheBackendTest, InvalidEntry) {
967 BackendInvalidEntry();
970 // We'll be leaking memory from this test.
971 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
972 SetNewEviction();
973 BackendInvalidEntry();
976 // We'll be leaking memory from this test.
977 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
978 SetCacheType(net::APP_CACHE);
979 BackendInvalidEntry();
982 // We'll be leaking memory from this test.
983 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
984 SetCacheType(net::SHADER_CACHE);
985 BackendInvalidEntry();
988 // Almost the same test, but this time crash the cache after reading an entry.
989 // We'll be leaking memory from this test.
990 void DiskCacheBackendTest::BackendInvalidEntryRead() {
991 InitCache();
993 std::string key("Some key");
994 disk_cache::Entry* entry;
995 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
997 const int kSize = 50;
998 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
999 memset(buffer->data(), 0, kSize);
1000 base::strlcpy(buffer->data(), "And the data to save", kSize);
1001 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1002 entry->Close();
1003 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1004 EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
1006 SimulateCrash();
1008 if (type_ == net::APP_CACHE) {
1009 // Reading an entry and crashing should not make it dirty.
1010 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1011 EXPECT_EQ(1, cache_->GetEntryCount());
1012 entry->Close();
1013 } else {
1014 EXPECT_NE(net::OK, OpenEntry(key, &entry));
1015 EXPECT_EQ(0, cache_->GetEntryCount());
1019 // We'll be leaking memory from this test.
1020 TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1021 BackendInvalidEntryRead();
1024 // We'll be leaking memory from this test.
1025 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1026 SetNewEviction();
1027 BackendInvalidEntryRead();
1030 // We'll be leaking memory from this test.
1031 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1032 SetCacheType(net::APP_CACHE);
1033 BackendInvalidEntryRead();
1036 // We'll be leaking memory from this test.
1037 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1038 SetCacheType(net::SHADER_CACHE);
1039 BackendInvalidEntryRead();
1042 // We'll be leaking memory from this test.
1043 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1044 // Work with a tiny index table (16 entries)
1045 SetMask(0xf);
1046 SetMaxSize(0x100000);
1047 InitCache();
1049 int seed = static_cast<int>(Time::Now().ToInternalValue());
1050 srand(seed);
1052 const int kNumEntries = 100;
1053 disk_cache::Entry* entries[kNumEntries];
1054 for (int i = 0; i < kNumEntries; i++) {
1055 std::string key = GenerateKey(true);
1056 ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
1058 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1060 for (int i = 0; i < kNumEntries; i++) {
1061 int source1 = rand() % kNumEntries;
1062 int source2 = rand() % kNumEntries;
1063 disk_cache::Entry* temp = entries[source1];
1064 entries[source1] = entries[source2];
1065 entries[source2] = temp;
1068 std::string keys[kNumEntries];
1069 for (int i = 0; i < kNumEntries; i++) {
1070 keys[i] = entries[i]->GetKey();
1071 if (i < kNumEntries / 2)
1072 entries[i]->Close();
1075 SimulateCrash();
1077 for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1078 disk_cache::Entry* entry;
1079 EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1082 for (int i = 0; i < kNumEntries / 2; i++) {
1083 disk_cache::Entry* entry;
1084 ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry));
1085 entry->Close();
1088 EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1091 // We'll be leaking memory from this test.
1092 TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1093 BackendInvalidEntryWithLoad();
1096 // We'll be leaking memory from this test.
1097 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1098 SetNewEviction();
1099 BackendInvalidEntryWithLoad();
1102 // We'll be leaking memory from this test.
1103 TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1104 SetCacheType(net::APP_CACHE);
1105 BackendInvalidEntryWithLoad();
1108 // We'll be leaking memory from this test.
1109 TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1110 SetCacheType(net::SHADER_CACHE);
1111 BackendInvalidEntryWithLoad();
1114 // We'll be leaking memory from this test.
1115 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1116 const int kSize = 0x3000; // 12 kB
1117 SetMaxSize(kSize * 10);
1118 InitCache();
1120 std::string first("some key");
1121 std::string second("something else");
1122 disk_cache::Entry* entry;
1123 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
1125 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1126 memset(buffer->data(), 0, kSize);
1127 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1129 // Simulate a crash.
1130 SimulateCrash();
1132 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
1133 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1135 EXPECT_EQ(2, cache_->GetEntryCount());
1136 SetMaxSize(kSize);
1137 entry->Close(); // Trim the cache.
1138 FlushQueueForTest();
1140 // If we evicted the entry in less than 20mS, we have one entry in the cache;
1141 // if it took more than that, we posted a task and we'll delete the second
1142 // entry too.
1143 base::MessageLoop::current()->RunUntilIdle();
1145 // This may be not thread-safe in general, but for now it's OK so add some
1146 // ThreadSanitizer annotations to ignore data races on cache_.
1147 // See http://crbug.com/55970
1148 ANNOTATE_IGNORE_READS_BEGIN();
1149 EXPECT_GE(1, cache_->GetEntryCount());
1150 ANNOTATE_IGNORE_READS_END();
1152 EXPECT_NE(net::OK, OpenEntry(first, &entry));
1155 // We'll be leaking memory from this test.
1156 TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1157 BackendTrimInvalidEntry();
1160 // We'll be leaking memory from this test.
1161 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1162 SetNewEviction();
1163 BackendTrimInvalidEntry();
1166 // We'll be leaking memory from this test.
1167 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1168 SetMask(0xf); // 16-entry table.
1170 const int kSize = 0x3000; // 12 kB
1171 SetMaxSize(kSize * 40);
1172 InitCache();
1174 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1175 memset(buffer->data(), 0, kSize);
1176 disk_cache::Entry* entry;
1178 // Writing 32 entries to this cache chains most of them.
1179 for (int i = 0; i < 32; i++) {
1180 std::string key(base::StringPrintf("some key %d", i));
1181 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1182 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1183 entry->Close();
1184 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1185 // Note that we are not closing the entries.
1188 // Simulate a crash.
1189 SimulateCrash();
1191 ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
1192 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1194 FlushQueueForTest();
1195 EXPECT_EQ(33, cache_->GetEntryCount());
1196 SetMaxSize(kSize);
1198 // For the new eviction code, all corrupt entries are on the second list so
1199 // they are not going away that easy.
1200 if (new_eviction_) {
1201 EXPECT_EQ(net::OK, DoomAllEntries());
1204 entry->Close(); // Trim the cache.
1205 FlushQueueForTest();
1207 // We may abort the eviction before cleaning up everything.
1208 base::MessageLoop::current()->RunUntilIdle();
1209 FlushQueueForTest();
1210 // If it's not clear enough: we may still have eviction tasks running at this
1211 // time, so the number of entries is changing while we read it.
1212 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1213 EXPECT_GE(30, cache_->GetEntryCount());
1214 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1217 // We'll be leaking memory from this test.
1218 TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1219 BackendTrimInvalidEntry2();
1222 // We'll be leaking memory from this test.
1223 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1224 SetNewEviction();
1225 BackendTrimInvalidEntry2();
1227 #endif // !defined(LEAK_SANITIZER)
1229 void DiskCacheBackendTest::BackendEnumerations() {
1230 InitCache();
1231 Time initial = Time::Now();
1233 const int kNumEntries = 100;
1234 for (int i = 0; i < kNumEntries; i++) {
1235 std::string key = GenerateKey(true);
1236 disk_cache::Entry* entry;
1237 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1238 entry->Close();
1240 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1241 Time final = Time::Now();
1243 disk_cache::Entry* entry;
1244 scoped_ptr<TestIterator> iter = CreateIterator();
1245 int count = 0;
1246 Time last_modified[kNumEntries];
1247 Time last_used[kNumEntries];
1248 while (iter->OpenNextEntry(&entry) == net::OK) {
1249 ASSERT_TRUE(NULL != entry);
1250 if (count < kNumEntries) {
1251 last_modified[count] = entry->GetLastModified();
1252 last_used[count] = entry->GetLastUsed();
1253 EXPECT_TRUE(initial <= last_modified[count]);
1254 EXPECT_TRUE(final >= last_modified[count]);
1257 entry->Close();
1258 count++;
1260 EXPECT_EQ(kNumEntries, count);
1262 iter = CreateIterator();
1263 count = 0;
1264 // The previous enumeration should not have changed the timestamps.
1265 while (iter->OpenNextEntry(&entry) == net::OK) {
1266 ASSERT_TRUE(NULL != entry);
1267 if (count < kNumEntries) {
1268 EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1269 EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1271 entry->Close();
1272 count++;
1274 EXPECT_EQ(kNumEntries, count);
1277 TEST_F(DiskCacheBackendTest, Enumerations) {
1278 BackendEnumerations();
1281 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1282 SetNewEviction();
1283 BackendEnumerations();
1286 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1287 SetMemoryOnlyMode();
1288 BackendEnumerations();
1291 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1292 SetCacheType(net::SHADER_CACHE);
1293 BackendEnumerations();
1296 TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1297 SetCacheType(net::APP_CACHE);
1298 BackendEnumerations();
1301 // Verifies enumerations while entries are open.
1302 void DiskCacheBackendTest::BackendEnumerations2() {
1303 InitCache();
1304 const std::string first("first");
1305 const std::string second("second");
1306 disk_cache::Entry *entry1, *entry2;
1307 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1308 entry1->Close();
1309 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1310 entry2->Close();
1311 FlushQueueForTest();
1313 // Make sure that the timestamp is not the same.
1314 AddDelay();
1315 ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
1316 scoped_ptr<TestIterator> iter = CreateIterator();
1317 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1318 EXPECT_EQ(entry2->GetKey(), second);
1320 // Two entries and the iterator pointing at "first".
1321 entry1->Close();
1322 entry2->Close();
1324 // The iterator should still be valid, so we should not crash.
1325 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1326 EXPECT_EQ(entry2->GetKey(), first);
1327 entry2->Close();
1328 iter = CreateIterator();
1330 // Modify the oldest entry and get the newest element.
1331 ASSERT_EQ(net::OK, OpenEntry(first, &entry1));
1332 EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false));
1333 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1334 if (type_ == net::APP_CACHE) {
1335 // The list is not updated.
1336 EXPECT_EQ(entry2->GetKey(), second);
1337 } else {
1338 EXPECT_EQ(entry2->GetKey(), first);
1341 entry1->Close();
1342 entry2->Close();
1345 TEST_F(DiskCacheBackendTest, Enumerations2) {
1346 BackendEnumerations2();
1349 TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1350 SetNewEviction();
1351 BackendEnumerations2();
1354 TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
1355 SetMemoryOnlyMode();
1356 BackendEnumerations2();
1359 TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1360 SetCacheType(net::APP_CACHE);
1361 BackendEnumerations2();
1364 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1365 SetCacheType(net::SHADER_CACHE);
1366 BackendEnumerations2();
1369 // Verify that ReadData calls do not update the LRU cache
1370 // when using the SHADER_CACHE type.
1371 TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1372 SetCacheType(net::SHADER_CACHE);
1373 InitCache();
1374 const std::string first("first");
1375 const std::string second("second");
1376 disk_cache::Entry *entry1, *entry2;
1377 const int kSize = 50;
1378 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1380 ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1381 memset(buffer1->data(), 0, kSize);
1382 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1383 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1385 ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1386 entry2->Close();
1388 FlushQueueForTest();
1390 // Make sure that the timestamp is not the same.
1391 AddDelay();
1393 // Read from the last item in the LRU.
1394 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1395 entry1->Close();
1397 scoped_ptr<TestIterator> iter = CreateIterator();
1398 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1399 EXPECT_EQ(entry2->GetKey(), second);
1400 entry2->Close();
1403 #if !defined(LEAK_SANITIZER)
1404 // Verify handling of invalid entries while doing enumerations.
1405 // We'll be leaking memory from this test.
1406 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1407 InitCache();
1409 std::string key("Some key");
1410 disk_cache::Entry *entry, *entry1, *entry2;
1411 ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1413 const int kSize = 50;
1414 scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1415 memset(buffer1->data(), 0, kSize);
1416 base::strlcpy(buffer1->data(), "And the data to save", kSize);
1417 EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1418 entry1->Close();
1419 ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1420 EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1422 std::string key2("Another key");
1423 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1424 entry2->Close();
1425 ASSERT_EQ(2, cache_->GetEntryCount());
1427 SimulateCrash();
1429 scoped_ptr<TestIterator> iter = CreateIterator();
1430 int count = 0;
1431 while (iter->OpenNextEntry(&entry) == net::OK) {
1432 ASSERT_TRUE(NULL != entry);
1433 EXPECT_EQ(key2, entry->GetKey());
1434 entry->Close();
1435 count++;
1437 EXPECT_EQ(1, count);
1438 EXPECT_EQ(1, cache_->GetEntryCount());
1441 // We'll be leaking memory from this test.
1442 TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1443 BackendInvalidEntryEnumeration();
1446 // We'll be leaking memory from this test.
1447 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1448 SetNewEviction();
1449 BackendInvalidEntryEnumeration();
1451 #endif // !defined(LEAK_SANITIZER)
1453 // Tests that if for some reason entries are modified close to existing cache
1454 // iterators, we don't generate fatal errors or reset the cache.
1455 void DiskCacheBackendTest::BackendFixEnumerators() {
1456 InitCache();
1458 int seed = static_cast<int>(Time::Now().ToInternalValue());
1459 srand(seed);
1461 const int kNumEntries = 10;
1462 for (int i = 0; i < kNumEntries; i++) {
1463 std::string key = GenerateKey(true);
1464 disk_cache::Entry* entry;
1465 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1466 entry->Close();
1468 EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1470 disk_cache::Entry *entry1, *entry2;
1471 scoped_ptr<TestIterator> iter1 = CreateIterator(), iter2 = CreateIterator();
1472 ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1));
1473 ASSERT_TRUE(NULL != entry1);
1474 entry1->Close();
1475 entry1 = NULL;
1477 // Let's go to the middle of the list.
1478 for (int i = 0; i < kNumEntries / 2; i++) {
1479 if (entry1)
1480 entry1->Close();
1481 ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1));
1482 ASSERT_TRUE(NULL != entry1);
1484 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1485 ASSERT_TRUE(NULL != entry2);
1486 entry2->Close();
1489 // Messing up with entry1 will modify entry2->next.
1490 entry1->Doom();
1491 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1492 ASSERT_TRUE(NULL != entry2);
1494 // The link entry2->entry1 should be broken.
1495 EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1496 entry1->Close();
1497 entry2->Close();
1499 // And the second iterator should keep working.
1500 ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1501 ASSERT_TRUE(NULL != entry2);
1502 entry2->Close();
1505 TEST_F(DiskCacheBackendTest, FixEnumerators) {
1506 BackendFixEnumerators();
1509 TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1510 SetNewEviction();
1511 BackendFixEnumerators();
1514 void DiskCacheBackendTest::BackendDoomRecent() {
1515 InitCache();
1517 disk_cache::Entry *entry;
1518 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1519 entry->Close();
1520 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1521 entry->Close();
1522 FlushQueueForTest();
1524 AddDelay();
1525 Time middle = Time::Now();
1527 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1528 entry->Close();
1529 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1530 entry->Close();
1531 FlushQueueForTest();
1533 AddDelay();
1534 Time final = Time::Now();
1536 ASSERT_EQ(4, cache_->GetEntryCount());
1537 EXPECT_EQ(net::OK, DoomEntriesSince(final));
1538 ASSERT_EQ(4, cache_->GetEntryCount());
1540 EXPECT_EQ(net::OK, DoomEntriesSince(middle));
1541 ASSERT_EQ(2, cache_->GetEntryCount());
1543 ASSERT_EQ(net::OK, OpenEntry("second", &entry));
1544 entry->Close();
1547 TEST_F(DiskCacheBackendTest, DoomRecent) {
1548 BackendDoomRecent();
1551 TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
1552 SetNewEviction();
1553 BackendDoomRecent();
1556 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
1557 SetMemoryOnlyMode();
1558 BackendDoomRecent();
1561 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
1562 SetMemoryOnlyMode();
1563 base::Time start;
1564 InitSparseCache(&start, NULL);
1565 DoomEntriesSince(start);
1566 EXPECT_EQ(1, cache_->GetEntryCount());
1569 TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
1570 base::Time start;
1571 InitSparseCache(&start, NULL);
1572 DoomEntriesSince(start);
1573 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1574 // MemBackendImpl does not. Thats why expected value differs here from
1575 // MemoryOnlyDoomEntriesSinceSparse.
1576 EXPECT_EQ(3, cache_->GetEntryCount());
1579 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
1580 SetMemoryOnlyMode();
1581 InitSparseCache(NULL, NULL);
1582 EXPECT_EQ(net::OK, DoomAllEntries());
1583 EXPECT_EQ(0, cache_->GetEntryCount());
1586 TEST_F(DiskCacheBackendTest, DoomAllSparse) {
1587 InitSparseCache(NULL, NULL);
1588 EXPECT_EQ(net::OK, DoomAllEntries());
1589 EXPECT_EQ(0, cache_->GetEntryCount());
1592 void DiskCacheBackendTest::BackendDoomBetween() {
1593 InitCache();
1595 disk_cache::Entry *entry;
1596 ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1597 entry->Close();
1598 FlushQueueForTest();
1600 AddDelay();
1601 Time middle_start = Time::Now();
1603 ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1604 entry->Close();
1605 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1606 entry->Close();
1607 FlushQueueForTest();
1609 AddDelay();
1610 Time middle_end = Time::Now();
1612 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1613 entry->Close();
1614 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1615 entry->Close();
1616 FlushQueueForTest();
1618 AddDelay();
1619 Time final = Time::Now();
1621 ASSERT_EQ(4, cache_->GetEntryCount());
1622 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end));
1623 ASSERT_EQ(2, cache_->GetEntryCount());
1625 ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1626 entry->Close();
1628 EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final));
1629 ASSERT_EQ(1, cache_->GetEntryCount());
1631 ASSERT_EQ(net::OK, OpenEntry("first", &entry));
1632 entry->Close();
1635 TEST_F(DiskCacheBackendTest, DoomBetween) {
1636 BackendDoomBetween();
1639 TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
1640 SetNewEviction();
1641 BackendDoomBetween();
1644 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
1645 SetMemoryOnlyMode();
1646 BackendDoomBetween();
1649 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
1650 SetMemoryOnlyMode();
1651 base::Time start, end;
1652 InitSparseCache(&start, &end);
1653 DoomEntriesBetween(start, end);
1654 EXPECT_EQ(3, cache_->GetEntryCount());
1656 start = end;
1657 end = base::Time::Now();
1658 DoomEntriesBetween(start, end);
1659 EXPECT_EQ(1, cache_->GetEntryCount());
1662 TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
1663 base::Time start, end;
1664 InitSparseCache(&start, &end);
1665 DoomEntriesBetween(start, end);
1666 EXPECT_EQ(9, cache_->GetEntryCount());
1668 start = end;
1669 end = base::Time::Now();
1670 DoomEntriesBetween(start, end);
1671 EXPECT_EQ(3, cache_->GetEntryCount());
1674 void DiskCacheBackendTest::BackendTransaction(const std::string& name,
1675 int num_entries, bool load) {
1676 success_ = false;
1677 ASSERT_TRUE(CopyTestCache(name));
1678 DisableFirstCleanup();
1680 uint32 mask;
1681 if (load) {
1682 mask = 0xf;
1683 SetMaxSize(0x100000);
1684 } else {
1685 // Clear the settings from the previous run.
1686 mask = 0;
1687 SetMaxSize(0);
1689 SetMask(mask);
1691 InitCache();
1692 ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
1694 std::string key("the first key");
1695 disk_cache::Entry* entry1;
1696 ASSERT_NE(net::OK, OpenEntry(key, &entry1));
1698 int actual = cache_->GetEntryCount();
1699 if (num_entries != actual) {
1700 ASSERT_TRUE(load);
1701 // If there is a heavy load, inserting an entry will make another entry
1702 // dirty (on the hash bucket) so two entries are removed.
1703 ASSERT_EQ(num_entries - 1, actual);
1706 cache_.reset();
1707 cache_impl_ = NULL;
1709 ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask));
1710 success_ = true;
1713 void DiskCacheBackendTest::BackendRecoverInsert() {
1714 // Tests with an empty cache.
1715 BackendTransaction("insert_empty1", 0, false);
1716 ASSERT_TRUE(success_) << "insert_empty1";
1717 BackendTransaction("insert_empty2", 0, false);
1718 ASSERT_TRUE(success_) << "insert_empty2";
1719 BackendTransaction("insert_empty3", 0, false);
1720 ASSERT_TRUE(success_) << "insert_empty3";
1722 // Tests with one entry on the cache.
1723 BackendTransaction("insert_one1", 1, false);
1724 ASSERT_TRUE(success_) << "insert_one1";
1725 BackendTransaction("insert_one2", 1, false);
1726 ASSERT_TRUE(success_) << "insert_one2";
1727 BackendTransaction("insert_one3", 1, false);
1728 ASSERT_TRUE(success_) << "insert_one3";
1730 // Tests with one hundred entries on the cache, tiny index.
1731 BackendTransaction("insert_load1", 100, true);
1732 ASSERT_TRUE(success_) << "insert_load1";
1733 BackendTransaction("insert_load2", 100, true);
1734 ASSERT_TRUE(success_) << "insert_load2";
1737 TEST_F(DiskCacheBackendTest, RecoverInsert) {
1738 BackendRecoverInsert();
1741 TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
1742 SetNewEviction();
1743 BackendRecoverInsert();
1746 void DiskCacheBackendTest::BackendRecoverRemove() {
1747 // Removing the only element.
1748 BackendTransaction("remove_one1", 0, false);
1749 ASSERT_TRUE(success_) << "remove_one1";
1750 BackendTransaction("remove_one2", 0, false);
1751 ASSERT_TRUE(success_) << "remove_one2";
1752 BackendTransaction("remove_one3", 0, false);
1753 ASSERT_TRUE(success_) << "remove_one3";
1755 // Removing the head.
1756 BackendTransaction("remove_head1", 1, false);
1757 ASSERT_TRUE(success_) << "remove_head1";
1758 BackendTransaction("remove_head2", 1, false);
1759 ASSERT_TRUE(success_) << "remove_head2";
1760 BackendTransaction("remove_head3", 1, false);
1761 ASSERT_TRUE(success_) << "remove_head3";
1763 // Removing the tail.
1764 BackendTransaction("remove_tail1", 1, false);
1765 ASSERT_TRUE(success_) << "remove_tail1";
1766 BackendTransaction("remove_tail2", 1, false);
1767 ASSERT_TRUE(success_) << "remove_tail2";
1768 BackendTransaction("remove_tail3", 1, false);
1769 ASSERT_TRUE(success_) << "remove_tail3";
1771 // Removing with one hundred entries on the cache, tiny index.
1772 BackendTransaction("remove_load1", 100, true);
1773 ASSERT_TRUE(success_) << "remove_load1";
1774 BackendTransaction("remove_load2", 100, true);
1775 ASSERT_TRUE(success_) << "remove_load2";
1776 BackendTransaction("remove_load3", 100, true);
1777 ASSERT_TRUE(success_) << "remove_load3";
1779 // This case cannot be reverted.
1780 BackendTransaction("remove_one4", 0, false);
1781 ASSERT_TRUE(success_) << "remove_one4";
1782 BackendTransaction("remove_head4", 1, false);
1783 ASSERT_TRUE(success_) << "remove_head4";
1786 #if defined(OS_WIN)
1787 // http://crbug.com/396392
1788 #define MAYBE_RecoverRemove DISABLED_RecoverRemove
1789 #else
1790 #define MAYBE_RecoverRemove RecoverRemove
1791 #endif
1792 TEST_F(DiskCacheBackendTest, MAYBE_RecoverRemove) {
1793 BackendRecoverRemove();
1796 #if defined(OS_WIN)
1797 // http://crbug.com/396392
1798 #define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove
1799 #else
1800 #define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove
1801 #endif
1802 TEST_F(DiskCacheBackendTest, MAYBE_NewEvictionRecoverRemove) {
1803 SetNewEviction();
1804 BackendRecoverRemove();
1807 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1808 success_ = false;
1809 ASSERT_TRUE(CopyTestCache("insert_load1"));
1810 DisableFirstCleanup();
1812 SetMask(0xf);
1813 SetMaxSize(0x1000);
1815 // We should not crash here.
1816 InitCache();
1817 DisableIntegrityCheck();
1820 TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
1821 BackendRecoverWithEviction();
1824 TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
1825 SetNewEviction();
1826 BackendRecoverWithEviction();
1829 // Tests that the |BackendImpl| fails to start with the wrong cache version.
1830 TEST_F(DiskCacheTest, WrongVersion) {
1831 ASSERT_TRUE(CopyTestCache("wrong_version"));
1832 base::Thread cache_thread("CacheThread");
1833 ASSERT_TRUE(cache_thread.StartWithOptions(
1834 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1835 net::TestCompletionCallback cb;
1837 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1838 cache_path_, cache_thread.task_runner(), NULL));
1839 int rv = cache->Init(cb.callback());
1840 ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
1843 class BadEntropyProvider : public base::FieldTrial::EntropyProvider {
1844 public:
1845 ~BadEntropyProvider() override {}
1847 double GetEntropyForTrial(const std::string& trial_name,
1848 uint32 randomization_seed) const override {
1849 return 0.5;
1853 // Tests that the disk cache successfully joins the control group, dropping the
1854 // existing cache in favour of a new empty cache.
1855 // Disabled on android since this test requires cache creator to create
1856 // blockfile caches.
1857 #if !defined(OS_ANDROID)
1858 TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
1859 base::Thread cache_thread("CacheThread");
1860 ASSERT_TRUE(cache_thread.StartWithOptions(
1861 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1863 scoped_ptr<disk_cache::BackendImpl> cache =
1864 CreateExistingEntryCache(cache_thread, cache_path_);
1865 ASSERT_TRUE(cache.get());
1866 cache.reset();
1868 // Instantiate the SimpleCacheTrial, forcing this run into the
1869 // ExperimentControl group.
1870 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1871 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1872 "ExperimentControl");
1873 net::TestCompletionCallback cb;
1874 scoped_ptr<disk_cache::Backend> base_cache;
1875 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
1876 net::CACHE_BACKEND_BLOCKFILE,
1877 cache_path_,
1879 true,
1880 cache_thread.task_runner(),
1881 NULL,
1882 &base_cache,
1883 cb.callback());
1884 ASSERT_EQ(net::OK, cb.GetResult(rv));
1885 EXPECT_EQ(0, base_cache->GetEntryCount());
1887 #endif
1889 // Tests that the disk cache can restart in the control group preserving
1890 // existing entries.
1891 TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
1892 // Instantiate the SimpleCacheTrial, forcing this run into the
1893 // ExperimentControl group.
1894 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1895 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1896 "ExperimentControl");
1898 base::Thread cache_thread("CacheThread");
1899 ASSERT_TRUE(cache_thread.StartWithOptions(
1900 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1902 scoped_ptr<disk_cache::BackendImpl> cache =
1903 CreateExistingEntryCache(cache_thread, cache_path_);
1904 ASSERT_TRUE(cache.get());
1906 net::TestCompletionCallback cb;
1908 const int kRestartCount = 5;
1909 for (int i = 0; i < kRestartCount; ++i) {
1910 cache.reset(new disk_cache::BackendImpl(
1911 cache_path_, cache_thread.message_loop_proxy(), NULL));
1912 int rv = cache->Init(cb.callback());
1913 ASSERT_EQ(net::OK, cb.GetResult(rv));
1914 EXPECT_EQ(1, cache->GetEntryCount());
1916 disk_cache::Entry* entry = NULL;
1917 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1918 EXPECT_EQ(net::OK, cb.GetResult(rv));
1919 EXPECT_TRUE(entry);
1920 entry->Close();
1924 // Tests that the disk cache can leave the control group preserving existing
1925 // entries.
1926 TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
1927 base::Thread cache_thread("CacheThread");
1928 ASSERT_TRUE(cache_thread.StartWithOptions(
1929 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1932 // Instantiate the SimpleCacheTrial, forcing this run into the
1933 // ExperimentControl group.
1934 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1935 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1936 "ExperimentControl");
1938 scoped_ptr<disk_cache::BackendImpl> cache =
1939 CreateExistingEntryCache(cache_thread, cache_path_);
1940 ASSERT_TRUE(cache.get());
1943 // Instantiate the SimpleCacheTrial, forcing this run into the
1944 // ExperimentNo group.
1945 base::FieldTrialList field_trial_list(new BadEntropyProvider());
1946 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1947 net::TestCompletionCallback cb;
1949 const int kRestartCount = 5;
1950 for (int i = 0; i < kRestartCount; ++i) {
1951 scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1952 cache_path_, cache_thread.message_loop_proxy(), NULL));
1953 int rv = cache->Init(cb.callback());
1954 ASSERT_EQ(net::OK, cb.GetResult(rv));
1955 EXPECT_EQ(1, cache->GetEntryCount());
1957 disk_cache::Entry* entry = NULL;
1958 rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1959 EXPECT_EQ(net::OK, cb.GetResult(rv));
1960 EXPECT_TRUE(entry);
1961 entry->Close();
1965 // Tests that the cache is properly restarted on recovery error.
1966 // Disabled on android since this test requires cache creator to create
1967 // blockfile caches.
1968 #if !defined(OS_ANDROID)
1969 TEST_F(DiskCacheBackendTest, DeleteOld) {
1970 ASSERT_TRUE(CopyTestCache("wrong_version"));
1971 SetNewEviction();
1972 base::Thread cache_thread("CacheThread");
1973 ASSERT_TRUE(cache_thread.StartWithOptions(
1974 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1976 net::TestCompletionCallback cb;
1977 bool prev = base::ThreadRestrictions::SetIOAllowed(false);
1978 base::FilePath path(cache_path_);
1979 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
1980 net::CACHE_BACKEND_BLOCKFILE,
1981 path,
1983 true,
1984 cache_thread.task_runner(),
1985 NULL,
1986 &cache_,
1987 cb.callback());
1988 path.clear(); // Make sure path was captured by the previous call.
1989 ASSERT_EQ(net::OK, cb.GetResult(rv));
1990 base::ThreadRestrictions::SetIOAllowed(prev);
1991 cache_.reset();
1992 EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
1994 #endif
1996 // We want to be able to deal with messed up entries on disk.
1997 void DiskCacheBackendTest::BackendInvalidEntry2() {
1998 ASSERT_TRUE(CopyTestCache("bad_entry"));
1999 DisableFirstCleanup();
2000 InitCache();
2002 disk_cache::Entry *entry1, *entry2;
2003 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
2004 EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
2005 entry1->Close();
2007 // CheckCacheIntegrity will fail at this point.
2008 DisableIntegrityCheck();
2011 TEST_F(DiskCacheBackendTest, InvalidEntry2) {
2012 BackendInvalidEntry2();
2015 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
2016 SetNewEviction();
2017 BackendInvalidEntry2();
2020 // Tests that we don't crash or hang when enumerating this cache.
2021 void DiskCacheBackendTest::BackendInvalidEntry3() {
2022 SetMask(0x1); // 2-entry table.
2023 SetMaxSize(0x3000); // 12 kB.
2024 DisableFirstCleanup();
2025 InitCache();
2027 disk_cache::Entry* entry;
2028 scoped_ptr<TestIterator> iter = CreateIterator();
2029 while (iter->OpenNextEntry(&entry) == net::OK) {
2030 entry->Close();
2034 TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2035 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2036 BackendInvalidEntry3();
2039 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2040 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2041 SetNewEviction();
2042 BackendInvalidEntry3();
2043 DisableIntegrityCheck();
2046 // Test that we handle a dirty entry on the LRU list, already replaced with
2047 // the same key, and with hash collisions.
2048 TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2049 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2050 SetMask(0x1); // 2-entry table.
2051 SetMaxSize(0x3000); // 12 kB.
2052 DisableFirstCleanup();
2053 InitCache();
2055 TrimForTest(false);
2058 // Test that we handle a dirty entry on the deleted list, already replaced with
2059 // the same key, and with hash collisions.
2060 TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2061 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2062 SetNewEviction();
2063 SetMask(0x1); // 2-entry table.
2064 SetMaxSize(0x3000); // 12 kB.
2065 DisableFirstCleanup();
2066 InitCache();
2068 TrimDeletedListForTest(false);
2071 TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2072 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2073 SetMask(0x1); // 2-entry table.
2074 SetMaxSize(0x3000); // 12 kB.
2075 DisableFirstCleanup();
2076 InitCache();
2078 // There is a dirty entry (but marked as clean) at the end, pointing to a
2079 // deleted entry through the hash collision list. We should not re-insert the
2080 // deleted entry into the index table.
2082 TrimForTest(false);
2083 // The cache should be clean (as detected by CheckCacheIntegrity).
2086 // Tests that we don't hang when there is a loop on the hash collision list.
2087 // The test cache could be a result of bug 69135.
2088 TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2089 ASSERT_TRUE(CopyTestCache("list_loop2"));
2090 SetMask(0x1); // 2-entry table.
2091 SetMaxSize(0x3000); // 12 kB.
2092 DisableFirstCleanup();
2093 InitCache();
2095 // The second entry points at itselft, and the first entry is not accessible
2096 // though the index, but it is at the head of the LRU.
2098 disk_cache::Entry* entry;
2099 ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
2100 entry->Close();
2102 TrimForTest(false);
2103 TrimForTest(false);
2104 ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
2105 entry->Close();
2106 EXPECT_EQ(1, cache_->GetEntryCount());
2109 // Tests that we don't hang when there is a loop on the hash collision list.
2110 // The test cache could be a result of bug 69135.
2111 TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2112 ASSERT_TRUE(CopyTestCache("list_loop3"));
2113 SetMask(0x1); // 2-entry table.
2114 SetMaxSize(0x3000); // 12 kB.
2115 DisableFirstCleanup();
2116 InitCache();
2118 // There is a wide loop of 5 entries.
2120 disk_cache::Entry* entry;
2121 ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2124 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2125 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2126 DisableFirstCleanup();
2127 SetNewEviction();
2128 InitCache();
2130 // The second entry is dirty, but removing it should not corrupt the list.
2131 disk_cache::Entry* entry;
2132 ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2133 ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
2135 // This should not delete the cache.
2136 entry->Doom();
2137 FlushQueueForTest();
2138 entry->Close();
2140 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry));
2141 entry->Close();
2144 // Tests handling of corrupt entries by keeping the rankings node around, with
2145 // a fatal failure.
2146 void DiskCacheBackendTest::BackendInvalidEntry7() {
2147 const int kSize = 0x3000; // 12 kB.
2148 SetMaxSize(kSize * 10);
2149 InitCache();
2151 std::string first("some key");
2152 std::string second("something else");
2153 disk_cache::Entry* entry;
2154 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2155 entry->Close();
2156 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2158 // Corrupt this entry.
2159 disk_cache::EntryImpl* entry_impl =
2160 static_cast<disk_cache::EntryImpl*>(entry);
2162 entry_impl->rankings()->Data()->next = 0;
2163 entry_impl->rankings()->Store();
2164 entry->Close();
2165 FlushQueueForTest();
2166 EXPECT_EQ(2, cache_->GetEntryCount());
2168 // This should detect the bad entry.
2169 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2170 EXPECT_EQ(1, cache_->GetEntryCount());
2172 // We should delete the cache. The list still has a corrupt node.
2173 scoped_ptr<TestIterator> iter = CreateIterator();
2174 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2175 FlushQueueForTest();
2176 EXPECT_EQ(0, cache_->GetEntryCount());
2179 TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2180 BackendInvalidEntry7();
2183 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2184 SetNewEviction();
2185 BackendInvalidEntry7();
2188 // Tests handling of corrupt entries by keeping the rankings node around, with
2189 // a non fatal failure.
2190 void DiskCacheBackendTest::BackendInvalidEntry8() {
2191 const int kSize = 0x3000; // 12 kB
2192 SetMaxSize(kSize * 10);
2193 InitCache();
2195 std::string first("some key");
2196 std::string second("something else");
2197 disk_cache::Entry* entry;
2198 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2199 entry->Close();
2200 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2202 // Corrupt this entry.
2203 disk_cache::EntryImpl* entry_impl =
2204 static_cast<disk_cache::EntryImpl*>(entry);
2206 entry_impl->rankings()->Data()->contents = 0;
2207 entry_impl->rankings()->Store();
2208 entry->Close();
2209 FlushQueueForTest();
2210 EXPECT_EQ(2, cache_->GetEntryCount());
2212 // This should detect the bad entry.
2213 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2214 EXPECT_EQ(1, cache_->GetEntryCount());
2216 // We should not delete the cache.
2217 scoped_ptr<TestIterator> iter = CreateIterator();
2218 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2219 entry->Close();
2220 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2221 EXPECT_EQ(1, cache_->GetEntryCount());
2224 TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2225 BackendInvalidEntry8();
2228 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2229 SetNewEviction();
2230 BackendInvalidEntry8();
2233 // Tests handling of corrupt entries detected by enumerations. Note that these
2234 // tests (xx9 to xx11) are basically just going though slightly different
2235 // codepaths so they are tighlty coupled with the code, but that is better than
2236 // not testing error handling code.
2237 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2238 const int kSize = 0x3000; // 12 kB.
2239 SetMaxSize(kSize * 10);
2240 InitCache();
2242 std::string first("some key");
2243 std::string second("something else");
2244 disk_cache::Entry* entry;
2245 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2246 entry->Close();
2247 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2249 // Corrupt this entry.
2250 disk_cache::EntryImpl* entry_impl =
2251 static_cast<disk_cache::EntryImpl*>(entry);
2253 entry_impl->entry()->Data()->state = 0xbad;
2254 entry_impl->entry()->Store();
2255 entry->Close();
2256 FlushQueueForTest();
2257 EXPECT_EQ(2, cache_->GetEntryCount());
2259 if (eviction) {
2260 TrimForTest(false);
2261 EXPECT_EQ(1, cache_->GetEntryCount());
2262 TrimForTest(false);
2263 EXPECT_EQ(1, cache_->GetEntryCount());
2264 } else {
2265 // We should detect the problem through the list, but we should not delete
2266 // the entry, just fail the iteration.
2267 scoped_ptr<TestIterator> iter = CreateIterator();
2268 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2270 // Now a full iteration will work, and return one entry.
2271 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2272 entry->Close();
2273 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2275 // This should detect what's left of the bad entry.
2276 EXPECT_NE(net::OK, OpenEntry(second, &entry));
2277 EXPECT_EQ(2, cache_->GetEntryCount());
2279 DisableIntegrityCheck();
2282 TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2283 BackendInvalidEntry9(false);
2286 TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2287 SetNewEviction();
2288 BackendInvalidEntry9(false);
2291 TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2292 BackendInvalidEntry9(true);
2295 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2296 SetNewEviction();
2297 BackendInvalidEntry9(true);
2300 // Tests handling of corrupt entries detected by enumerations.
2301 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2302 const int kSize = 0x3000; // 12 kB.
2303 SetMaxSize(kSize * 10);
2304 SetNewEviction();
2305 InitCache();
2307 std::string first("some key");
2308 std::string second("something else");
2309 disk_cache::Entry* entry;
2310 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2311 entry->Close();
2312 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2313 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2314 entry->Close();
2315 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2317 // Corrupt this entry.
2318 disk_cache::EntryImpl* entry_impl =
2319 static_cast<disk_cache::EntryImpl*>(entry);
2321 entry_impl->entry()->Data()->state = 0xbad;
2322 entry_impl->entry()->Store();
2323 entry->Close();
2324 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2325 entry->Close();
2326 EXPECT_EQ(3, cache_->GetEntryCount());
2328 // We have:
2329 // List 0: third -> second (bad).
2330 // List 1: first.
2332 if (eviction) {
2333 // Detection order: second -> first -> third.
2334 TrimForTest(false);
2335 EXPECT_EQ(3, cache_->GetEntryCount());
2336 TrimForTest(false);
2337 EXPECT_EQ(2, cache_->GetEntryCount());
2338 TrimForTest(false);
2339 EXPECT_EQ(1, cache_->GetEntryCount());
2340 } else {
2341 // Detection order: third -> second -> first.
2342 // We should detect the problem through the list, but we should not delete
2343 // the entry.
2344 scoped_ptr<TestIterator> iter = CreateIterator();
2345 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2346 entry->Close();
2347 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2348 EXPECT_EQ(first, entry->GetKey());
2349 entry->Close();
2350 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2352 DisableIntegrityCheck();
2355 TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2356 BackendInvalidEntry10(false);
2359 TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
2360 BackendInvalidEntry10(true);
2363 // Tests handling of corrupt entries detected by enumerations.
2364 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
2365 const int kSize = 0x3000; // 12 kB.
2366 SetMaxSize(kSize * 10);
2367 SetNewEviction();
2368 InitCache();
2370 std::string first("some key");
2371 std::string second("something else");
2372 disk_cache::Entry* entry;
2373 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2374 entry->Close();
2375 ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2376 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2377 entry->Close();
2378 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2379 entry->Close();
2380 ASSERT_EQ(net::OK, OpenEntry(second, &entry));
2381 EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2383 // Corrupt this entry.
2384 disk_cache::EntryImpl* entry_impl =
2385 static_cast<disk_cache::EntryImpl*>(entry);
2387 entry_impl->entry()->Data()->state = 0xbad;
2388 entry_impl->entry()->Store();
2389 entry->Close();
2390 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2391 entry->Close();
2392 FlushQueueForTest();
2393 EXPECT_EQ(3, cache_->GetEntryCount());
2395 // We have:
2396 // List 0: third.
2397 // List 1: second (bad) -> first.
2399 if (eviction) {
2400 // Detection order: third -> first -> second.
2401 TrimForTest(false);
2402 EXPECT_EQ(2, cache_->GetEntryCount());
2403 TrimForTest(false);
2404 EXPECT_EQ(1, cache_->GetEntryCount());
2405 TrimForTest(false);
2406 EXPECT_EQ(1, cache_->GetEntryCount());
2407 } else {
2408 // Detection order: third -> second.
2409 // We should detect the problem through the list, but we should not delete
2410 // the entry, just fail the iteration.
2411 scoped_ptr<TestIterator> iter = CreateIterator();
2412 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2413 entry->Close();
2414 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2416 // Now a full iteration will work, and return two entries.
2417 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2418 entry->Close();
2419 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2420 entry->Close();
2421 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2423 DisableIntegrityCheck();
2426 TEST_F(DiskCacheBackendTest, InvalidEntry11) {
2427 BackendInvalidEntry11(false);
2430 TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
2431 BackendInvalidEntry11(true);
2434 // Tests handling of corrupt entries in the middle of a long eviction run.
2435 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2436 const int kSize = 0x3000; // 12 kB
2437 SetMaxSize(kSize * 10);
2438 InitCache();
2440 std::string first("some key");
2441 std::string second("something else");
2442 disk_cache::Entry* entry;
2443 ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2444 entry->Close();
2445 ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2447 // Corrupt this entry.
2448 disk_cache::EntryImpl* entry_impl =
2449 static_cast<disk_cache::EntryImpl*>(entry);
2451 entry_impl->entry()->Data()->state = 0xbad;
2452 entry_impl->entry()->Store();
2453 entry->Close();
2454 ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2455 entry->Close();
2456 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
2457 TrimForTest(true);
2458 EXPECT_EQ(1, cache_->GetEntryCount());
2459 entry->Close();
2460 DisableIntegrityCheck();
2463 TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
2464 BackendTrimInvalidEntry12();
2467 TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
2468 SetNewEviction();
2469 BackendTrimInvalidEntry12();
2472 // We want to be able to deal with messed up entries on disk.
2473 void DiskCacheBackendTest::BackendInvalidRankings2() {
2474 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2475 DisableFirstCleanup();
2476 InitCache();
2478 disk_cache::Entry *entry1, *entry2;
2479 EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
2480 ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2));
2481 entry2->Close();
2483 // CheckCacheIntegrity will fail at this point.
2484 DisableIntegrityCheck();
2487 TEST_F(DiskCacheBackendTest, InvalidRankings2) {
2488 BackendInvalidRankings2();
2491 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
2492 SetNewEviction();
2493 BackendInvalidRankings2();
2496 // If the LRU is corrupt, we delete the cache.
2497 void DiskCacheBackendTest::BackendInvalidRankings() {
2498 disk_cache::Entry* entry;
2499 scoped_ptr<TestIterator> iter = CreateIterator();
2500 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2501 entry->Close();
2502 EXPECT_EQ(2, cache_->GetEntryCount());
2504 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2505 FlushQueueForTest(); // Allow the restart to finish.
2506 EXPECT_EQ(0, cache_->GetEntryCount());
2509 TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
2510 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2511 DisableFirstCleanup();
2512 InitCache();
2513 BackendInvalidRankings();
2516 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
2517 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2518 DisableFirstCleanup();
2519 SetNewEviction();
2520 InitCache();
2521 BackendInvalidRankings();
2524 TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
2525 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2526 DisableFirstCleanup();
2527 InitCache();
2528 SetTestMode(); // Fail cache reinitialization.
2529 BackendInvalidRankings();
2532 TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
2533 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2534 DisableFirstCleanup();
2535 SetNewEviction();
2536 InitCache();
2537 SetTestMode(); // Fail cache reinitialization.
2538 BackendInvalidRankings();
2541 // If the LRU is corrupt and we have open entries, we disable the cache.
2542 void DiskCacheBackendTest::BackendDisable() {
2543 disk_cache::Entry *entry1, *entry2;
2544 scoped_ptr<TestIterator> iter = CreateIterator();
2545 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2547 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2548 EXPECT_EQ(0, cache_->GetEntryCount());
2549 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2551 entry1->Close();
2552 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2553 FlushQueueForTest(); // This one actually allows that task to complete.
2555 EXPECT_EQ(0, cache_->GetEntryCount());
2558 TEST_F(DiskCacheBackendTest, DisableSuccess) {
2559 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2560 DisableFirstCleanup();
2561 InitCache();
2562 BackendDisable();
2565 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
2566 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2567 DisableFirstCleanup();
2568 SetNewEviction();
2569 InitCache();
2570 BackendDisable();
2573 TEST_F(DiskCacheBackendTest, DisableFailure) {
2574 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2575 DisableFirstCleanup();
2576 InitCache();
2577 SetTestMode(); // Fail cache reinitialization.
2578 BackendDisable();
2581 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
2582 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2583 DisableFirstCleanup();
2584 SetNewEviction();
2585 InitCache();
2586 SetTestMode(); // Fail cache reinitialization.
2587 BackendDisable();
2590 // This is another type of corruption on the LRU; disable the cache.
2591 void DiskCacheBackendTest::BackendDisable2() {
2592 EXPECT_EQ(8, cache_->GetEntryCount());
2594 disk_cache::Entry* entry;
2595 scoped_ptr<TestIterator> iter = CreateIterator();
2596 int count = 0;
2597 while (iter->OpenNextEntry(&entry) == net::OK) {
2598 ASSERT_TRUE(NULL != entry);
2599 entry->Close();
2600 count++;
2601 ASSERT_LT(count, 9);
2604 FlushQueueForTest();
2605 EXPECT_EQ(0, cache_->GetEntryCount());
2608 TEST_F(DiskCacheBackendTest, DisableSuccess2) {
2609 ASSERT_TRUE(CopyTestCache("list_loop"));
2610 DisableFirstCleanup();
2611 InitCache();
2612 BackendDisable2();
2615 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
2616 ASSERT_TRUE(CopyTestCache("list_loop"));
2617 DisableFirstCleanup();
2618 SetNewEviction();
2619 InitCache();
2620 BackendDisable2();
2623 TEST_F(DiskCacheBackendTest, DisableFailure2) {
2624 ASSERT_TRUE(CopyTestCache("list_loop"));
2625 DisableFirstCleanup();
2626 InitCache();
2627 SetTestMode(); // Fail cache reinitialization.
2628 BackendDisable2();
2631 TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
2632 ASSERT_TRUE(CopyTestCache("list_loop"));
2633 DisableFirstCleanup();
2634 SetNewEviction();
2635 InitCache();
2636 SetTestMode(); // Fail cache reinitialization.
2637 BackendDisable2();
2640 // If the index size changes when we disable the cache, we should not crash.
2641 void DiskCacheBackendTest::BackendDisable3() {
2642 disk_cache::Entry *entry1, *entry2;
2643 scoped_ptr<TestIterator> iter = CreateIterator();
2644 EXPECT_EQ(2, cache_->GetEntryCount());
2645 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2646 entry1->Close();
2648 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2649 FlushQueueForTest();
2651 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2));
2652 entry2->Close();
2654 EXPECT_EQ(1, cache_->GetEntryCount());
2657 TEST_F(DiskCacheBackendTest, DisableSuccess3) {
2658 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2659 DisableFirstCleanup();
2660 SetMaxSize(20 * 1024 * 1024);
2661 InitCache();
2662 BackendDisable3();
2665 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
2666 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2667 DisableFirstCleanup();
2668 SetMaxSize(20 * 1024 * 1024);
2669 SetNewEviction();
2670 InitCache();
2671 BackendDisable3();
2674 // If we disable the cache, already open entries should work as far as possible.
2675 void DiskCacheBackendTest::BackendDisable4() {
2676 disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
2677 scoped_ptr<TestIterator> iter = CreateIterator();
2678 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2680 char key2[2000];
2681 char key3[20000];
2682 CacheTestFillBuffer(key2, sizeof(key2), true);
2683 CacheTestFillBuffer(key3, sizeof(key3), true);
2684 key2[sizeof(key2) - 1] = '\0';
2685 key3[sizeof(key3) - 1] = '\0';
2686 ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2687 ASSERT_EQ(net::OK, CreateEntry(key3, &entry3));
2689 const int kBufSize = 20000;
2690 scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
2691 memset(buf->data(), 0, kBufSize);
2692 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2693 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2695 // This line should disable the cache but not delete it.
2696 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry4));
2697 EXPECT_EQ(0, cache_->GetEntryCount());
2699 EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
2701 EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
2702 EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2703 EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
2705 EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
2706 EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2707 EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
2709 std::string key = entry2->GetKey();
2710 EXPECT_EQ(sizeof(key2) - 1, key.size());
2711 key = entry3->GetKey();
2712 EXPECT_EQ(sizeof(key3) - 1, key.size());
2714 entry1->Close();
2715 entry2->Close();
2716 entry3->Close();
2717 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2718 FlushQueueForTest(); // This one actually allows that task to complete.
2720 EXPECT_EQ(0, cache_->GetEntryCount());
2723 TEST_F(DiskCacheBackendTest, DisableSuccess4) {
2724 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2725 DisableFirstCleanup();
2726 InitCache();
2727 BackendDisable4();
2730 TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
2731 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2732 DisableFirstCleanup();
2733 SetNewEviction();
2734 InitCache();
2735 BackendDisable4();
2738 // Tests the exposed API with a disabled cache.
2739 void DiskCacheBackendTest::BackendDisabledAPI() {
2740 cache_impl_->SetUnitTestMode(); // Simulate failure restarting the cache.
2742 disk_cache::Entry* entry1, *entry2;
2743 scoped_ptr<TestIterator> iter = CreateIterator();
2744 EXPECT_EQ(2, cache_->GetEntryCount());
2745 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2746 entry1->Close();
2747 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2748 FlushQueueForTest();
2749 // The cache should be disabled.
2751 EXPECT_EQ(net::DISK_CACHE, cache_->GetCacheType());
2752 EXPECT_EQ(0, cache_->GetEntryCount());
2753 EXPECT_NE(net::OK, OpenEntry("First", &entry2));
2754 EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2755 EXPECT_NE(net::OK, DoomEntry("First"));
2756 EXPECT_NE(net::OK, DoomAllEntries());
2757 EXPECT_NE(net::OK, DoomEntriesBetween(Time(), Time::Now()));
2758 EXPECT_NE(net::OK, DoomEntriesSince(Time()));
2759 iter = CreateIterator();
2760 EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2762 std::vector<std::pair<std::string, std::string>> stats;
2763 cache_->GetStats(&stats);
2764 EXPECT_TRUE(stats.empty());
2765 cache_->OnExternalCacheHit("First");
2768 TEST_F(DiskCacheBackendTest, DisabledAPI) {
2769 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2770 DisableFirstCleanup();
2771 InitCache();
2772 BackendDisabledAPI();
2775 TEST_F(DiskCacheBackendTest, NewEvictionDisabledAPI) {
2776 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2777 DisableFirstCleanup();
2778 SetNewEviction();
2779 InitCache();
2780 BackendDisabledAPI();
2783 TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
2784 MessageLoopHelper helper;
2786 ASSERT_TRUE(CleanupCacheDir());
2787 scoped_ptr<disk_cache::BackendImpl> cache;
2788 cache.reset(new disk_cache::BackendImpl(
2789 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
2790 ASSERT_TRUE(NULL != cache.get());
2791 cache->SetUnitTestMode();
2792 ASSERT_EQ(net::OK, cache->SyncInit());
2794 // Wait for a callback that never comes... about 2 secs :). The message loop
2795 // has to run to allow invocation of the usage timer.
2796 helper.WaitUntilCacheIoFinished(1);
2799 TEST_F(DiskCacheBackendTest, TimerNotCreated) {
2800 ASSERT_TRUE(CopyTestCache("wrong_version"));
2802 scoped_ptr<disk_cache::BackendImpl> cache;
2803 cache.reset(new disk_cache::BackendImpl(
2804 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
2805 ASSERT_TRUE(NULL != cache.get());
2806 cache->SetUnitTestMode();
2807 ASSERT_NE(net::OK, cache->SyncInit());
2809 ASSERT_TRUE(NULL == cache->GetTimerForTest());
2811 DisableIntegrityCheck();
2814 TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
2815 InitCache();
2816 disk_cache::Entry* entry;
2817 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
2818 entry->Close();
2819 FlushQueueForTest();
2821 disk_cache::StatsItems stats;
2822 cache_->GetStats(&stats);
2823 EXPECT_FALSE(stats.empty());
2825 disk_cache::StatsItems::value_type hits("Create hit", "0x1");
2826 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2828 cache_.reset();
2830 // Now open the cache and verify that the stats are still there.
2831 DisableFirstCleanup();
2832 InitCache();
2833 EXPECT_EQ(1, cache_->GetEntryCount());
2835 stats.clear();
2836 cache_->GetStats(&stats);
2837 EXPECT_FALSE(stats.empty());
2839 EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2842 void DiskCacheBackendTest::BackendDoomAll() {
2843 InitCache();
2845 disk_cache::Entry *entry1, *entry2;
2846 ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
2847 ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
2848 entry1->Close();
2849 entry2->Close();
2851 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2852 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2854 ASSERT_EQ(4, cache_->GetEntryCount());
2855 EXPECT_EQ(net::OK, DoomAllEntries());
2856 ASSERT_EQ(0, cache_->GetEntryCount());
2858 // We should stop posting tasks at some point (if we post any).
2859 base::MessageLoop::current()->RunUntilIdle();
2861 disk_cache::Entry *entry3, *entry4;
2862 EXPECT_NE(net::OK, OpenEntry("third", &entry3));
2863 ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
2864 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
2866 EXPECT_EQ(net::OK, DoomAllEntries());
2867 ASSERT_EQ(0, cache_->GetEntryCount());
2869 entry1->Close();
2870 entry2->Close();
2871 entry3->Doom(); // The entry should be already doomed, but this must work.
2872 entry3->Close();
2873 entry4->Close();
2875 // Now try with all references released.
2876 ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2877 ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2878 entry1->Close();
2879 entry2->Close();
2881 ASSERT_EQ(2, cache_->GetEntryCount());
2882 EXPECT_EQ(net::OK, DoomAllEntries());
2883 ASSERT_EQ(0, cache_->GetEntryCount());
2885 EXPECT_EQ(net::OK, DoomAllEntries());
2888 TEST_F(DiskCacheBackendTest, DoomAll) {
2889 BackendDoomAll();
2892 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
2893 SetNewEviction();
2894 BackendDoomAll();
2897 TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
2898 SetMemoryOnlyMode();
2899 BackendDoomAll();
2902 TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
2903 SetCacheType(net::APP_CACHE);
2904 BackendDoomAll();
2907 TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
2908 SetCacheType(net::SHADER_CACHE);
2909 BackendDoomAll();
2912 // If the index size changes when we doom the cache, we should not crash.
2913 void DiskCacheBackendTest::BackendDoomAll2() {
2914 EXPECT_EQ(2, cache_->GetEntryCount());
2915 EXPECT_EQ(net::OK, DoomAllEntries());
2917 disk_cache::Entry* entry;
2918 ASSERT_EQ(net::OK, CreateEntry("Something new", &entry));
2919 entry->Close();
2921 EXPECT_EQ(1, cache_->GetEntryCount());
2924 TEST_F(DiskCacheBackendTest, DoomAll2) {
2925 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2926 DisableFirstCleanup();
2927 SetMaxSize(20 * 1024 * 1024);
2928 InitCache();
2929 BackendDoomAll2();
2932 TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
2933 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2934 DisableFirstCleanup();
2935 SetMaxSize(20 * 1024 * 1024);
2936 SetNewEviction();
2937 InitCache();
2938 BackendDoomAll2();
2941 // We should be able to create the same entry on multiple simultaneous instances
2942 // of the cache.
2943 TEST_F(DiskCacheTest, MultipleInstances) {
2944 base::ScopedTempDir store1, store2;
2945 ASSERT_TRUE(store1.CreateUniqueTempDir());
2946 ASSERT_TRUE(store2.CreateUniqueTempDir());
2948 base::Thread cache_thread("CacheThread");
2949 ASSERT_TRUE(cache_thread.StartWithOptions(
2950 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
2951 net::TestCompletionCallback cb;
2953 const int kNumberOfCaches = 2;
2954 scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
2956 int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
2957 net::CACHE_BACKEND_DEFAULT,
2958 store1.path(),
2960 false,
2961 cache_thread.task_runner(),
2962 NULL,
2963 &cache[0],
2964 cb.callback());
2965 ASSERT_EQ(net::OK, cb.GetResult(rv));
2966 rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE,
2967 net::CACHE_BACKEND_DEFAULT,
2968 store2.path(),
2970 false,
2971 cache_thread.task_runner(),
2972 NULL,
2973 &cache[1],
2974 cb.callback());
2975 ASSERT_EQ(net::OK, cb.GetResult(rv));
2977 ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
2979 std::string key("the first key");
2980 disk_cache::Entry* entry;
2981 for (int i = 0; i < kNumberOfCaches; i++) {
2982 rv = cache[i]->CreateEntry(key, &entry, cb.callback());
2983 ASSERT_EQ(net::OK, cb.GetResult(rv));
2984 entry->Close();
2988 // Test the six regions of the curve that determines the max cache size.
2989 TEST_F(DiskCacheTest, AutomaticMaxSize) {
2990 using disk_cache::kDefaultCacheSize;
2991 int64 large_size = kDefaultCacheSize;
2993 // Region 1: expected = available * 0.8
2994 EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
2995 disk_cache::PreferredCacheSize(large_size - 1));
2996 EXPECT_EQ(kDefaultCacheSize * 8 / 10,
2997 disk_cache::PreferredCacheSize(large_size));
2998 EXPECT_EQ(kDefaultCacheSize - 1,
2999 disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
3001 // Region 2: expected = default_size
3002 EXPECT_EQ(kDefaultCacheSize,
3003 disk_cache::PreferredCacheSize(large_size * 10 / 8));
3004 EXPECT_EQ(kDefaultCacheSize,
3005 disk_cache::PreferredCacheSize(large_size * 10 - 1));
3007 // Region 3: expected = available * 0.1
3008 EXPECT_EQ(kDefaultCacheSize,
3009 disk_cache::PreferredCacheSize(large_size * 10));
3010 EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
3011 disk_cache::PreferredCacheSize(large_size * 25 - 1));
3013 // Region 4: expected = default_size * 2.5
3014 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3015 disk_cache::PreferredCacheSize(large_size * 25));
3016 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3017 disk_cache::PreferredCacheSize(large_size * 100 - 1));
3018 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3019 disk_cache::PreferredCacheSize(large_size * 100));
3020 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3021 disk_cache::PreferredCacheSize(large_size * 250 - 1));
3023 // Region 5: expected = available * 0.1
3024 int64 largest_size = kDefaultCacheSize * 4;
3025 EXPECT_EQ(kDefaultCacheSize * 25 / 10,
3026 disk_cache::PreferredCacheSize(large_size * 250));
3027 EXPECT_EQ(largest_size - 1,
3028 disk_cache::PreferredCacheSize(largest_size * 100 - 1));
3030 // Region 6: expected = largest possible size
3031 EXPECT_EQ(largest_size,
3032 disk_cache::PreferredCacheSize(largest_size * 100));
3033 EXPECT_EQ(largest_size,
3034 disk_cache::PreferredCacheSize(largest_size * 10000));
3037 // Tests that we can "migrate" a running instance from one experiment group to
3038 // another.
3039 TEST_F(DiskCacheBackendTest, Histograms) {
3040 InitCache();
3041 disk_cache::BackendImpl* backend_ = cache_impl_; // Needed be the macro.
3043 for (int i = 1; i < 3; i++) {
3044 CACHE_UMA(HOURS, "FillupTime", i, 28);
3048 // Make sure that we keep the total memory used by the internal buffers under
3049 // control.
3050 TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
3051 InitCache();
3052 std::string key("the first key");
3053 disk_cache::Entry* entry;
3054 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3056 const int kSize = 200;
3057 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3058 CacheTestFillBuffer(buffer->data(), kSize, true);
3060 for (int i = 0; i < 10; i++) {
3061 SCOPED_TRACE(i);
3062 // Allocate 2MB for this entry.
3063 EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
3064 EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
3065 EXPECT_EQ(kSize,
3066 WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
3067 EXPECT_EQ(kSize,
3068 WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
3070 // Delete one of the buffers and truncate the other.
3071 EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
3072 EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
3074 // Delete the second buffer, writing 10 bytes to disk.
3075 entry->Close();
3076 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3079 entry->Close();
3080 EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
3083 // This test assumes at least 150MB of system memory.
3084 TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
3085 InitCache();
3087 const int kOneMB = 1024 * 1024;
3088 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3089 EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3091 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3092 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3094 EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3095 EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3097 cache_impl_->BufferDeleted(kOneMB);
3098 EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3100 // Check the upper limit.
3101 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3103 for (int i = 0; i < 30; i++)
3104 cache_impl_->IsAllocAllowed(0, kOneMB); // Ignore the result.
3106 EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3109 // Tests that sharing of external files works and we are able to delete the
3110 // files when we need to.
3111 TEST_F(DiskCacheBackendTest, FileSharing) {
3112 InitCache();
3114 disk_cache::Addr address(0x80000001);
3115 ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3116 base::FilePath name = cache_impl_->GetFileName(address);
3118 scoped_refptr<disk_cache::File> file(new disk_cache::File(false));
3119 file->Init(name);
3121 #if defined(OS_WIN)
3122 DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3123 DWORD access = GENERIC_READ | GENERIC_WRITE;
3124 base::win::ScopedHandle file2(CreateFile(
3125 name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
3126 EXPECT_FALSE(file2.IsValid());
3128 sharing |= FILE_SHARE_DELETE;
3129 file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL,
3130 OPEN_EXISTING, 0, NULL));
3131 EXPECT_TRUE(file2.IsValid());
3132 #endif
3134 EXPECT_TRUE(base::DeleteFile(name, false));
3136 // We should be able to use the file.
3137 const int kSize = 200;
3138 char buffer1[kSize];
3139 char buffer2[kSize];
3140 memset(buffer1, 't', kSize);
3141 memset(buffer2, 0, kSize);
3142 EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3143 EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3144 EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3146 EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
3149 TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3150 InitCache();
3152 disk_cache::Entry* entry;
3154 for (int i = 0; i < 2; ++i) {
3155 std::string key = base::StringPrintf("key%d", i);
3156 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3157 entry->Close();
3160 // Ping the oldest entry.
3161 cache_->OnExternalCacheHit("key0");
3163 TrimForTest(false);
3165 // Make sure the older key remains.
3166 EXPECT_EQ(1, cache_->GetEntryCount());
3167 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3168 entry->Close();
3171 TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3172 SetCacheType(net::SHADER_CACHE);
3173 InitCache();
3175 disk_cache::Entry* entry;
3177 for (int i = 0; i < 2; ++i) {
3178 std::string key = base::StringPrintf("key%d", i);
3179 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3180 entry->Close();
3183 // Ping the oldest entry.
3184 cache_->OnExternalCacheHit("key0");
3186 TrimForTest(false);
3188 // Make sure the older key remains.
3189 EXPECT_EQ(1, cache_->GetEntryCount());
3190 ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3191 entry->Close();
3194 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3195 SetCacheType(net::APP_CACHE);
3196 SetSimpleCacheMode();
3197 BackendShutdownWithPendingCreate(false);
3200 TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3201 SetCacheType(net::APP_CACHE);
3202 SetSimpleCacheMode();
3203 BackendShutdownWithPendingFileIO(false);
3206 TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3207 SetSimpleCacheMode();
3208 BackendBasics();
3211 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3212 SetCacheType(net::APP_CACHE);
3213 SetSimpleCacheMode();
3214 BackendBasics();
3217 TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3218 SetSimpleCacheMode();
3219 BackendKeying();
3222 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3223 SetSimpleCacheMode();
3224 SetCacheType(net::APP_CACHE);
3225 BackendKeying();
3228 TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) {
3229 SetSimpleCacheMode();
3230 BackendSetSize();
3233 // MacOS has a default open file limit of 256 files, which is incompatible with
3234 // this simple cache test.
3235 #if defined(OS_MACOSX)
3236 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3237 #else
3238 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3239 #endif
3241 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) {
3242 SetMaxSize(0x100000);
3243 SetSimpleCacheMode();
3244 BackendLoad();
3247 TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) {
3248 SetCacheType(net::APP_CACHE);
3249 SetSimpleCacheMode();
3250 SetMaxSize(0x100000);
3251 BackendLoad();
3254 TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
3255 SetSimpleCacheMode();
3256 BackendDoomRecent();
3259 // crbug.com/330926, crbug.com/370677
3260 TEST_F(DiskCacheBackendTest, DISABLED_SimpleDoomBetween) {
3261 SetSimpleCacheMode();
3262 BackendDoomBetween();
3265 TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
3266 SetSimpleCacheMode();
3267 BackendDoomAll();
3270 TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
3271 SetCacheType(net::APP_CACHE);
3272 SetSimpleCacheMode();
3273 BackendDoomAll();
3276 TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
3277 SetSimpleCacheMode();
3278 InitCache();
3280 const char key[] = "the first key";
3281 disk_cache::Entry* entry = NULL;
3283 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3284 ASSERT_TRUE(entry != NULL);
3285 entry->Close();
3286 entry = NULL;
3288 // To make sure the file creation completed we need to call open again so that
3289 // we block until it actually created the files.
3290 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3291 ASSERT_TRUE(entry != NULL);
3292 entry->Close();
3293 entry = NULL;
3295 // Delete one of the files in the entry.
3296 base::FilePath to_delete_file = cache_path_.AppendASCII(
3297 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3298 EXPECT_TRUE(base::PathExists(to_delete_file));
3299 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
3301 // Failing to open the entry should delete the rest of these files.
3302 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3304 // Confirm the rest of the files are gone.
3305 for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
3306 base::FilePath should_be_gone_file(cache_path_.AppendASCII(
3307 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
3308 EXPECT_FALSE(base::PathExists(should_be_gone_file));
3312 TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
3313 SetSimpleCacheMode();
3314 InitCache();
3316 const char key[] = "the first key";
3317 disk_cache::Entry* entry = NULL;
3319 ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3320 disk_cache::Entry* null = NULL;
3321 ASSERT_NE(null, entry);
3322 entry->Close();
3323 entry = NULL;
3325 // To make sure the file creation completed we need to call open again so that
3326 // we block until it actually created the files.
3327 ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3328 ASSERT_NE(null, entry);
3329 entry->Close();
3330 entry = NULL;
3332 // The entry is being closed on the Simple Cache worker pool
3333 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
3334 base::RunLoop().RunUntilIdle();
3336 // Write an invalid header for stream 0 and stream 1.
3337 base::FilePath entry_file1_path = cache_path_.AppendASCII(
3338 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3340 disk_cache::SimpleFileHeader header;
3341 header.initial_magic_number = GG_UINT64_C(0xbadf00d);
3342 EXPECT_EQ(
3343 implicit_cast<int>(sizeof(header)),
3344 base::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
3345 sizeof(header)));
3346 ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3349 // Tests that the Simple Cache Backend fails to initialize with non-matching
3350 // file structure on disk.
3351 TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
3352 // Create a cache structure with the |BackendImpl|.
3353 InitCache();
3354 disk_cache::Entry* entry;
3355 const int kSize = 50;
3356 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3357 CacheTestFillBuffer(buffer->data(), kSize, false);
3358 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3359 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3360 entry->Close();
3361 cache_.reset();
3363 // Check that the |SimpleBackendImpl| does not favor this structure.
3364 base::Thread cache_thread("CacheThread");
3365 ASSERT_TRUE(cache_thread.StartWithOptions(
3366 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3367 disk_cache::SimpleBackendImpl* simple_cache =
3368 new disk_cache::SimpleBackendImpl(
3369 cache_path_, 0, net::DISK_CACHE, cache_thread.task_runner(), NULL);
3370 net::TestCompletionCallback cb;
3371 int rv = simple_cache->Init(cb.callback());
3372 EXPECT_NE(net::OK, cb.GetResult(rv));
3373 delete simple_cache;
3374 DisableIntegrityCheck();
3377 // Tests that the |BackendImpl| refuses to initialize on top of the files
3378 // generated by the Simple Cache Backend.
3379 TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
3380 // Create a cache structure with the |SimpleBackendImpl|.
3381 SetSimpleCacheMode();
3382 InitCache();
3383 disk_cache::Entry* entry;
3384 const int kSize = 50;
3385 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3386 CacheTestFillBuffer(buffer->data(), kSize, false);
3387 ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3388 ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3389 entry->Close();
3390 cache_.reset();
3392 // Check that the |BackendImpl| does not favor this structure.
3393 base::Thread cache_thread("CacheThread");
3394 ASSERT_TRUE(cache_thread.StartWithOptions(
3395 base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3396 disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(
3397 cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL);
3398 cache->SetUnitTestMode();
3399 net::TestCompletionCallback cb;
3400 int rv = cache->Init(cb.callback());
3401 EXPECT_NE(net::OK, cb.GetResult(rv));
3402 delete cache;
3403 DisableIntegrityCheck();
3406 TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
3407 SetSimpleCacheMode();
3408 BackendFixEnumerators();
3411 // Tests basic functionality of the SimpleBackend implementation of the
3412 // enumeration API.
3413 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
3414 SetSimpleCacheMode();
3415 InitCache();
3416 std::set<std::string> key_pool;
3417 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3419 // Check that enumeration returns all entries.
3420 std::set<std::string> keys_to_match(key_pool);
3421 scoped_ptr<TestIterator> iter = CreateIterator();
3422 size_t count = 0;
3423 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3424 iter.reset();
3425 EXPECT_EQ(key_pool.size(), count);
3426 EXPECT_TRUE(keys_to_match.empty());
3428 // Check that opening entries does not affect enumeration.
3429 keys_to_match = key_pool;
3430 iter = CreateIterator();
3431 count = 0;
3432 disk_cache::Entry* entry_opened_before;
3433 ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
3434 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3435 iter.get(),
3436 &keys_to_match,
3437 &count));
3439 disk_cache::Entry* entry_opened_middle;
3440 ASSERT_EQ(net::OK,
3441 OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
3442 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3443 iter.reset();
3444 entry_opened_before->Close();
3445 entry_opened_middle->Close();
3447 EXPECT_EQ(key_pool.size(), count);
3448 EXPECT_TRUE(keys_to_match.empty());
3451 // Tests that the enumerations are not affected by dooming an entry in the
3452 // middle.
3453 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
3454 SetSimpleCacheMode();
3455 InitCache();
3456 std::set<std::string> key_pool;
3457 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3459 // Check that enumeration returns all entries but the doomed one.
3460 std::set<std::string> keys_to_match(key_pool);
3461 scoped_ptr<TestIterator> iter = CreateIterator();
3462 size_t count = 0;
3463 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3464 iter.get(),
3465 &keys_to_match,
3466 &count));
3468 std::string key_to_delete = *(keys_to_match.begin());
3469 DoomEntry(key_to_delete);
3470 keys_to_match.erase(key_to_delete);
3471 key_pool.erase(key_to_delete);
3472 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3473 iter.reset();
3475 EXPECT_EQ(key_pool.size(), count);
3476 EXPECT_TRUE(keys_to_match.empty());
3479 // Tests that enumerations are not affected by corrupt files.
3480 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
3481 SetSimpleCacheMode();
3482 InitCache();
3483 std::set<std::string> key_pool;
3484 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3486 // Create a corrupt entry. The write/read sequence ensures that the entry will
3487 // have been created before corrupting the platform files, in the case of
3488 // optimistic operations.
3489 const std::string key = "the key";
3490 disk_cache::Entry* corrupted_entry;
3492 ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
3493 ASSERT_TRUE(corrupted_entry);
3494 const int kSize = 50;
3495 scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3496 CacheTestFillBuffer(buffer->data(), kSize, false);
3497 ASSERT_EQ(kSize,
3498 WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
3499 ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
3500 corrupted_entry->Close();
3502 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3503 key, cache_path_));
3504 EXPECT_EQ(key_pool.size() + 1,
3505 implicit_cast<size_t>(cache_->GetEntryCount()));
3507 // Check that enumeration returns all entries but the corrupt one.
3508 std::set<std::string> keys_to_match(key_pool);
3509 scoped_ptr<TestIterator> iter = CreateIterator();
3510 size_t count = 0;
3511 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3512 iter.reset();
3514 EXPECT_EQ(key_pool.size(), count);
3515 EXPECT_TRUE(keys_to_match.empty());
3518 // Tests that enumerations don't leak memory when the backend is destructed
3519 // mid-enumeration.
3520 TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) {
3521 SetSimpleCacheMode();
3522 InitCache();
3523 std::set<std::string> key_pool;
3524 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3526 scoped_ptr<TestIterator> iter = CreateIterator();
3527 disk_cache::Entry* entry = NULL;
3528 ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
3529 EXPECT_TRUE(entry);
3530 disk_cache::ScopedEntryPtr entry_closer(entry);
3532 cache_.reset();
3533 // This test passes if we don't leak memory.
3536 // Tests that a SimpleCache doesn't crash when files are deleted very quickly
3537 // after closing.
3538 // NOTE: IF THIS TEST IS FLAKY THEN IT IS FAILING. See https://crbug.com/416940
3539 TEST_F(DiskCacheBackendTest, SimpleCacheDeleteQuickly) {
3540 SetSimpleCacheMode();
3541 for (int i = 0; i < 100; ++i) {
3542 InitCache();
3543 cache_.reset();
3544 EXPECT_TRUE(CleanupCacheDir());