1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/files/file_util.h"
7 #include "base/metrics/field_trial.h"
9 #include "base/run_loop.h"
10 #include "base/strings/string_util.h"
11 #include "base/strings/stringprintf.h"
12 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
13 #include "base/thread_task_runner_handle.h"
14 #include "base/threading/platform_thread.h"
15 #include "base/threading/thread_restrictions.h"
16 #include "net/base/cache_type.h"
17 #include "net/base/io_buffer.h"
18 #include "net/base/net_errors.h"
19 #include "net/base/test_completion_callback.h"
20 #include "net/disk_cache/blockfile/backend_impl.h"
21 #include "net/disk_cache/blockfile/entry_impl.h"
22 #include "net/disk_cache/blockfile/experiments.h"
23 #include "net/disk_cache/blockfile/histogram_macros.h"
24 #include "net/disk_cache/blockfile/mapped_file.h"
25 #include "net/disk_cache/cache_util.h"
26 #include "net/disk_cache/disk_cache_test_base.h"
27 #include "net/disk_cache/disk_cache_test_util.h"
28 #include "net/disk_cache/memory/mem_backend_impl.h"
29 #include "net/disk_cache/simple/simple_backend_impl.h"
30 #include "net/disk_cache/simple/simple_entry_format.h"
31 #include "net/disk_cache/simple/simple_test_util.h"
32 #include "net/disk_cache/simple/simple_util.h"
33 #include "testing/gtest/include/gtest/gtest.h"
36 #include "base/win/scoped_handle.h"
39 // Provide a BackendImpl object to macros from histogram_macros.h.
40 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
46 const char kExistingEntryKey
[] = "existing entry key";
48 scoped_ptr
<disk_cache::BackendImpl
> CreateExistingEntryCache(
49 const base::Thread
& cache_thread
,
50 base::FilePath
& cache_path
) {
51 net::TestCompletionCallback cb
;
53 scoped_ptr
<disk_cache::BackendImpl
> cache(new disk_cache::BackendImpl(
54 cache_path
, cache_thread
.message_loop_proxy(), NULL
));
55 int rv
= cache
->Init(cb
.callback());
56 if (cb
.GetResult(rv
) != net::OK
)
57 return scoped_ptr
<disk_cache::BackendImpl
>();
59 disk_cache::Entry
* entry
= NULL
;
60 rv
= cache
->CreateEntry(kExistingEntryKey
, &entry
, cb
.callback());
61 if (cb
.GetResult(rv
) != net::OK
)
62 return scoped_ptr
<disk_cache::BackendImpl
>();
70 // Tests that can run with different types of caches.
71 class DiskCacheBackendTest
: public DiskCacheTestWithCache
{
73 // Some utility methods:
75 // Perform IO operations on the cache until there is pending IO.
76 int GeneratePendingIO(net::TestCompletionCallback
* cb
);
78 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
79 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
80 // There are 4 entries after doomed_start and 2 after doomed_end.
81 void InitSparseCache(base::Time
* doomed_start
, base::Time
* doomed_end
);
83 bool CreateSetOfRandomEntries(std::set
<std::string
>* key_pool
);
84 bool EnumerateAndMatchKeys(int max_to_open
,
86 std::set
<std::string
>* keys_to_match
,
92 void BackendShutdownWithPendingFileIO(bool fast
);
93 void BackendShutdownWithPendingIO(bool fast
);
94 void BackendShutdownWithPendingCreate(bool fast
);
95 void BackendSetSize();
98 void BackendValidEntry();
99 void BackendInvalidEntry();
100 void BackendInvalidEntryRead();
101 void BackendInvalidEntryWithLoad();
102 void BackendTrimInvalidEntry();
103 void BackendTrimInvalidEntry2();
104 void BackendEnumerations();
105 void BackendEnumerations2();
106 void BackendInvalidEntryEnumeration();
107 void BackendFixEnumerators();
108 void BackendDoomRecent();
109 void BackendDoomBetween();
110 void BackendTransaction(const std::string
& name
, int num_entries
, bool load
);
111 void BackendRecoverInsert();
112 void BackendRecoverRemove();
113 void BackendRecoverWithEviction();
114 void BackendInvalidEntry2();
115 void BackendInvalidEntry3();
116 void BackendInvalidEntry7();
117 void BackendInvalidEntry8();
118 void BackendInvalidEntry9(bool eviction
);
119 void BackendInvalidEntry10(bool eviction
);
120 void BackendInvalidEntry11(bool eviction
);
121 void BackendTrimInvalidEntry12();
122 void BackendDoomAll();
123 void BackendDoomAll2();
124 void BackendInvalidRankings();
125 void BackendInvalidRankings2();
126 void BackendDisable();
127 void BackendDisable2();
128 void BackendDisable3();
129 void BackendDisable4();
132 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback
* cb
) {
133 if (!use_current_thread_
) {
135 return net::ERR_FAILED
;
138 disk_cache::Entry
* entry
;
139 int rv
= cache_
->CreateEntry("some key", &entry
, cb
->callback());
140 if (cb
->GetResult(rv
) != net::OK
)
141 return net::ERR_CACHE_CREATE_FAILURE
;
143 const int kSize
= 25000;
144 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
145 CacheTestFillBuffer(buffer
->data(), kSize
, false);
147 for (int i
= 0; i
< 10 * 1024 * 1024; i
+= 64 * 1024) {
148 // We are using the current thread as the cache thread because we want to
149 // be able to call directly this method to make sure that the OS (instead
150 // of us switching thread) is returning IO pending.
151 if (!simple_cache_mode_
) {
152 rv
= static_cast<disk_cache::EntryImpl
*>(entry
)->WriteDataImpl(
153 0, i
, buffer
.get(), kSize
, cb
->callback(), false);
155 rv
= entry
->WriteData(0, i
, buffer
.get(), kSize
, cb
->callback(), false);
158 if (rv
== net::ERR_IO_PENDING
)
161 rv
= net::ERR_FAILED
;
164 // Don't call Close() to avoid going through the queue or we'll deadlock
165 // waiting for the operation to finish.
166 if (!simple_cache_mode_
)
167 static_cast<disk_cache::EntryImpl
*>(entry
)->Release();
174 void DiskCacheBackendTest::InitSparseCache(base::Time
* doomed_start
,
175 base::Time
* doomed_end
) {
178 const int kSize
= 50;
179 // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
180 const int kOffset
= 10 + 1024 * 1024;
182 disk_cache::Entry
* entry0
= NULL
;
183 disk_cache::Entry
* entry1
= NULL
;
184 disk_cache::Entry
* entry2
= NULL
;
186 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
187 CacheTestFillBuffer(buffer
->data(), kSize
, false);
189 ASSERT_EQ(net::OK
, CreateEntry("zeroth", &entry0
));
190 ASSERT_EQ(kSize
, WriteSparseData(entry0
, 0, buffer
.get(), kSize
));
192 WriteSparseData(entry0
, kOffset
+ kSize
, buffer
.get(), kSize
));
198 *doomed_start
= base::Time::Now();
200 // Order in rankings list:
201 // first_part1, first_part2, second_part1, second_part2
202 ASSERT_EQ(net::OK
, CreateEntry("first", &entry1
));
203 ASSERT_EQ(kSize
, WriteSparseData(entry1
, 0, buffer
.get(), kSize
));
205 WriteSparseData(entry1
, kOffset
+ kSize
, buffer
.get(), kSize
));
208 ASSERT_EQ(net::OK
, CreateEntry("second", &entry2
));
209 ASSERT_EQ(kSize
, WriteSparseData(entry2
, 0, buffer
.get(), kSize
));
211 WriteSparseData(entry2
, kOffset
+ kSize
, buffer
.get(), kSize
));
217 *doomed_end
= base::Time::Now();
219 // Order in rankings list:
220 // third_part1, fourth_part1, third_part2, fourth_part2
221 disk_cache::Entry
* entry3
= NULL
;
222 disk_cache::Entry
* entry4
= NULL
;
223 ASSERT_EQ(net::OK
, CreateEntry("third", &entry3
));
224 ASSERT_EQ(kSize
, WriteSparseData(entry3
, 0, buffer
.get(), kSize
));
225 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry4
));
226 ASSERT_EQ(kSize
, WriteSparseData(entry4
, 0, buffer
.get(), kSize
));
228 WriteSparseData(entry3
, kOffset
+ kSize
, buffer
.get(), kSize
));
230 WriteSparseData(entry4
, kOffset
+ kSize
, buffer
.get(), kSize
));
238 // Creates entries based on random keys. Stores these keys in |key_pool|.
239 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
240 std::set
<std::string
>* key_pool
) {
241 const int kNumEntries
= 10;
243 for (int i
= 0; i
< kNumEntries
; ++i
) {
244 std::string key
= GenerateKey(true);
245 disk_cache::Entry
* entry
;
246 if (CreateEntry(key
, &entry
) != net::OK
)
248 key_pool
->insert(key
);
251 return key_pool
->size() == implicit_cast
<size_t>(cache_
->GetEntryCount());
254 // Performs iteration over the backend and checks that the keys of entries
255 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
256 // will be opened, if it is positive. Otherwise, iteration will continue until
257 // OpenNextEntry stops returning net::OK.
258 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
261 std::set
<std::string
>* keys_to_match
,
263 disk_cache::Entry
* entry
;
267 while (iter
->OpenNextEntry(&entry
) == net::OK
) {
270 EXPECT_EQ(1U, keys_to_match
->erase(entry
->GetKey()));
273 if (max_to_open
>= 0 && implicit_cast
<int>(*count
) >= max_to_open
)
280 void DiskCacheBackendTest::BackendBasics() {
282 disk_cache::Entry
*entry1
= NULL
, *entry2
= NULL
;
283 EXPECT_NE(net::OK
, OpenEntry("the first key", &entry1
));
284 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry1
));
285 ASSERT_TRUE(NULL
!= entry1
);
289 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry1
));
290 ASSERT_TRUE(NULL
!= entry1
);
294 EXPECT_NE(net::OK
, CreateEntry("the first key", &entry1
));
295 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry1
));
296 EXPECT_NE(net::OK
, OpenEntry("some other key", &entry2
));
297 ASSERT_EQ(net::OK
, CreateEntry("some other key", &entry2
));
298 ASSERT_TRUE(NULL
!= entry1
);
299 ASSERT_TRUE(NULL
!= entry2
);
300 EXPECT_EQ(2, cache_
->GetEntryCount());
302 disk_cache::Entry
* entry3
= NULL
;
303 ASSERT_EQ(net::OK
, OpenEntry("some other key", &entry3
));
304 ASSERT_TRUE(NULL
!= entry3
);
305 EXPECT_TRUE(entry2
== entry3
);
306 EXPECT_EQ(2, cache_
->GetEntryCount());
308 EXPECT_EQ(net::OK
, DoomEntry("some other key"));
309 EXPECT_EQ(1, cache_
->GetEntryCount());
314 EXPECT_EQ(net::OK
, DoomEntry("the first key"));
315 EXPECT_EQ(0, cache_
->GetEntryCount());
317 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry1
));
318 ASSERT_EQ(net::OK
, CreateEntry("some other key", &entry2
));
321 EXPECT_EQ(net::OK
, DoomEntry("some other key"));
322 EXPECT_EQ(0, cache_
->GetEntryCount());
326 TEST_F(DiskCacheBackendTest
, Basics
) {
330 TEST_F(DiskCacheBackendTest
, NewEvictionBasics
) {
335 TEST_F(DiskCacheBackendTest
, MemoryOnlyBasics
) {
340 TEST_F(DiskCacheBackendTest
, AppCacheBasics
) {
341 SetCacheType(net::APP_CACHE
);
345 TEST_F(DiskCacheBackendTest
, ShaderCacheBasics
) {
346 SetCacheType(net::SHADER_CACHE
);
350 void DiskCacheBackendTest::BackendKeying() {
352 const char kName1
[] = "the first key";
353 const char kName2
[] = "the first Key";
354 disk_cache::Entry
*entry1
, *entry2
;
355 ASSERT_EQ(net::OK
, CreateEntry(kName1
, &entry1
));
357 ASSERT_EQ(net::OK
, CreateEntry(kName2
, &entry2
));
358 EXPECT_TRUE(entry1
!= entry2
) << "Case sensitive";
362 base::strlcpy(buffer
, kName1
, arraysize(buffer
));
363 ASSERT_EQ(net::OK
, OpenEntry(buffer
, &entry2
));
364 EXPECT_TRUE(entry1
== entry2
);
367 base::strlcpy(buffer
+ 1, kName1
, arraysize(buffer
) - 1);
368 ASSERT_EQ(net::OK
, OpenEntry(buffer
+ 1, &entry2
));
369 EXPECT_TRUE(entry1
== entry2
);
372 base::strlcpy(buffer
+ 3, kName1
, arraysize(buffer
) - 3);
373 ASSERT_EQ(net::OK
, OpenEntry(buffer
+ 3, &entry2
));
374 EXPECT_TRUE(entry1
== entry2
);
377 // Now verify long keys.
379 memset(buffer2
, 's', sizeof(buffer2
));
380 buffer2
[1023] = '\0';
381 ASSERT_EQ(net::OK
, CreateEntry(buffer2
, &entry2
)) << "key on block file";
385 buffer2
[19999] = '\0';
386 ASSERT_EQ(net::OK
, CreateEntry(buffer2
, &entry2
)) << "key on external file";
391 TEST_F(DiskCacheBackendTest
, Keying
) {
395 TEST_F(DiskCacheBackendTest
, NewEvictionKeying
) {
400 TEST_F(DiskCacheBackendTest
, MemoryOnlyKeying
) {
405 TEST_F(DiskCacheBackendTest
, AppCacheKeying
) {
406 SetCacheType(net::APP_CACHE
);
410 TEST_F(DiskCacheBackendTest
, ShaderCacheKeying
) {
411 SetCacheType(net::SHADER_CACHE
);
415 TEST_F(DiskCacheTest
, CreateBackend
) {
416 net::TestCompletionCallback cb
;
419 ASSERT_TRUE(CleanupCacheDir());
420 base::Thread
cache_thread("CacheThread");
421 ASSERT_TRUE(cache_thread
.StartWithOptions(
422 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
424 // Test the private factory method(s).
425 scoped_ptr
<disk_cache::Backend
> cache
;
426 cache
= disk_cache::MemBackendImpl::CreateBackend(0, NULL
);
427 ASSERT_TRUE(cache
.get());
430 // Now test the public API.
431 int rv
= disk_cache::CreateCacheBackend(net::DISK_CACHE
,
432 net::CACHE_BACKEND_DEFAULT
,
436 cache_thread
.task_runner(),
440 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
441 ASSERT_TRUE(cache
.get());
444 rv
= disk_cache::CreateCacheBackend(net::MEMORY_CACHE
,
445 net::CACHE_BACKEND_DEFAULT
,
447 false, NULL
, NULL
, &cache
,
449 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
450 ASSERT_TRUE(cache
.get());
454 base::MessageLoop::current()->RunUntilIdle();
457 // Tests that |BackendImpl| fails to initialize with a missing file.
458 TEST_F(DiskCacheBackendTest
, CreateBackend_MissingFile
) {
459 ASSERT_TRUE(CopyTestCache("bad_entry"));
460 base::FilePath filename
= cache_path_
.AppendASCII("data_1");
461 base::DeleteFile(filename
, false);
462 base::Thread
cache_thread("CacheThread");
463 ASSERT_TRUE(cache_thread
.StartWithOptions(
464 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
465 net::TestCompletionCallback cb
;
467 bool prev
= base::ThreadRestrictions::SetIOAllowed(false);
468 scoped_ptr
<disk_cache::BackendImpl
> cache(new disk_cache::BackendImpl(
469 cache_path_
, cache_thread
.task_runner(), NULL
));
470 int rv
= cache
->Init(cb
.callback());
471 EXPECT_EQ(net::ERR_FAILED
, cb
.GetResult(rv
));
472 base::ThreadRestrictions::SetIOAllowed(prev
);
475 DisableIntegrityCheck();
478 TEST_F(DiskCacheBackendTest
, ExternalFiles
) {
480 // First, let's create a file on the folder.
481 base::FilePath filename
= cache_path_
.AppendASCII("f_000001");
483 const int kSize
= 50;
484 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
485 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
486 ASSERT_EQ(kSize
, base::WriteFile(filename
, buffer1
->data(), kSize
));
488 // Now let's create a file with the cache.
489 disk_cache::Entry
* entry
;
490 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
491 ASSERT_EQ(0, WriteData(entry
, 0, 20000, buffer1
.get(), 0, false));
494 // And verify that the first file is still there.
495 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
496 ASSERT_EQ(kSize
, base::ReadFile(filename
, buffer2
->data(), kSize
));
497 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer2
->data(), kSize
));
500 // Tests that we deal with file-level pending operations at destruction time.
501 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast
) {
502 ASSERT_TRUE(CleanupCacheDir());
503 uint32 flags
= disk_cache::kNoBuffering
;
505 flags
|= disk_cache::kNoRandom
;
508 CreateBackend(flags
, NULL
);
510 net::TestCompletionCallback cb
;
511 int rv
= GeneratePendingIO(&cb
);
513 // The cache destructor will see one pending operation here.
516 if (rv
== net::ERR_IO_PENDING
) {
517 if (fast
|| simple_cache_mode_
)
518 EXPECT_FALSE(cb
.have_result());
520 EXPECT_TRUE(cb
.have_result());
523 base::MessageLoop::current()->RunUntilIdle();
526 // Wait for the actual operation to complete, or we'll keep a file handle that
527 // may cause issues later. Note that on iOS systems even though this test
528 // uses a single thread, the actual IO is posted to a worker thread and the
529 // cache destructor breaks the link to reach cb when the operation completes.
530 rv
= cb
.GetResult(rv
);
534 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingFileIO
) {
535 BackendShutdownWithPendingFileIO(false);
538 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
539 // builds because they contain a lot of intentional memory leaks.
540 // The wrapper scripts used to run tests under Valgrind Memcheck will also
541 // disable these tests. See:
542 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
543 #if !defined(LEAK_SANITIZER)
544 // We'll be leaking from this test.
545 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingFileIO_Fast
) {
546 // The integrity test sets kNoRandom so there's a version mismatch if we don't
547 // force new eviction.
549 BackendShutdownWithPendingFileIO(true);
553 // See crbug.com/330074
555 // Tests that one cache instance is not affected by another one going away.
556 TEST_F(DiskCacheBackendTest
, MultipleInstancesWithPendingFileIO
) {
557 base::ScopedTempDir store
;
558 ASSERT_TRUE(store
.CreateUniqueTempDir());
560 net::TestCompletionCallback cb
;
561 scoped_ptr
<disk_cache::Backend
> extra_cache
;
562 int rv
= disk_cache::CreateCacheBackend(net::DISK_CACHE
,
563 net::CACHE_BACKEND_DEFAULT
,
567 base::ThreadTaskRunnerHandle::Get(),
571 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
572 ASSERT_TRUE(extra_cache
.get() != NULL
);
574 ASSERT_TRUE(CleanupCacheDir());
575 SetNewEviction(); // Match the expected behavior for integrity verification.
578 CreateBackend(disk_cache::kNoBuffering
, NULL
);
579 rv
= GeneratePendingIO(&cb
);
581 // cache_ has a pending operation, and extra_cache will go away.
584 if (rv
== net::ERR_IO_PENDING
)
585 EXPECT_FALSE(cb
.have_result());
587 base::MessageLoop::current()->RunUntilIdle();
589 // Wait for the actual operation to complete, or we'll keep a file handle that
590 // may cause issues later.
591 rv
= cb
.GetResult(rv
);
595 // Tests that we deal with background-thread pending operations.
596 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast
) {
597 net::TestCompletionCallback cb
;
600 ASSERT_TRUE(CleanupCacheDir());
601 base::Thread
cache_thread("CacheThread");
602 ASSERT_TRUE(cache_thread
.StartWithOptions(
603 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
605 uint32 flags
= disk_cache::kNoBuffering
;
607 flags
|= disk_cache::kNoRandom
;
609 CreateBackend(flags
, &cache_thread
);
611 disk_cache::Entry
* entry
;
612 int rv
= cache_
->CreateEntry("some key", &entry
, cb
.callback());
613 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
617 // The cache destructor will see one pending operation here.
621 base::MessageLoop::current()->RunUntilIdle();
624 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingIO
) {
625 BackendShutdownWithPendingIO(false);
628 #if !defined(LEAK_SANITIZER)
629 // We'll be leaking from this test.
630 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingIO_Fast
) {
631 // The integrity test sets kNoRandom so there's a version mismatch if we don't
632 // force new eviction.
634 BackendShutdownWithPendingIO(true);
638 // Tests that we deal with create-type pending operations.
639 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast
) {
640 net::TestCompletionCallback cb
;
643 ASSERT_TRUE(CleanupCacheDir());
644 base::Thread
cache_thread("CacheThread");
645 ASSERT_TRUE(cache_thread
.StartWithOptions(
646 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
648 disk_cache::BackendFlags flags
=
649 fast
? disk_cache::kNone
: disk_cache::kNoRandom
;
650 CreateBackend(flags
, &cache_thread
);
652 disk_cache::Entry
* entry
;
653 int rv
= cache_
->CreateEntry("some key", &entry
, cb
.callback());
654 ASSERT_EQ(net::ERR_IO_PENDING
, rv
);
657 EXPECT_FALSE(cb
.have_result());
660 base::MessageLoop::current()->RunUntilIdle();
663 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingCreate
) {
664 BackendShutdownWithPendingCreate(false);
667 #if !defined(LEAK_SANITIZER)
668 // We'll be leaking an entry from this test.
669 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingCreate_Fast
) {
670 // The integrity test sets kNoRandom so there's a version mismatch if we don't
671 // force new eviction.
673 BackendShutdownWithPendingCreate(true);
677 // Disabled on android since this test requires cache creator to create
679 #if !defined(OS_ANDROID)
680 TEST_F(DiskCacheTest
, TruncatedIndex
) {
681 ASSERT_TRUE(CleanupCacheDir());
682 base::FilePath index
= cache_path_
.AppendASCII("index");
683 ASSERT_EQ(5, base::WriteFile(index
, "hello", 5));
685 base::Thread
cache_thread("CacheThread");
686 ASSERT_TRUE(cache_thread
.StartWithOptions(
687 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
688 net::TestCompletionCallback cb
;
690 scoped_ptr
<disk_cache::Backend
> backend
;
691 int rv
= disk_cache::CreateCacheBackend(net::DISK_CACHE
,
692 net::CACHE_BACKEND_BLOCKFILE
,
696 cache_thread
.task_runner(),
700 ASSERT_NE(net::OK
, cb
.GetResult(rv
));
702 ASSERT_FALSE(backend
);
706 void DiskCacheBackendTest::BackendSetSize() {
707 const int cache_size
= 0x10000; // 64 kB
708 SetMaxSize(cache_size
);
711 std::string
first("some key");
712 std::string
second("something else");
713 disk_cache::Entry
* entry
;
714 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
716 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(cache_size
));
717 memset(buffer
->data(), 0, cache_size
);
718 EXPECT_EQ(cache_size
/ 10,
719 WriteData(entry
, 0, 0, buffer
.get(), cache_size
/ 10, false))
722 EXPECT_EQ(net::ERR_FAILED
,
723 WriteData(entry
, 1, 0, buffer
.get(), cache_size
/ 5, false))
724 << "file size above the limit";
726 // By doubling the total size, we make this file cacheable.
727 SetMaxSize(cache_size
* 2);
728 EXPECT_EQ(cache_size
/ 5,
729 WriteData(entry
, 1, 0, buffer
.get(), cache_size
/ 5, false));
731 // Let's fill up the cache!.
732 SetMaxSize(cache_size
* 10);
733 EXPECT_EQ(cache_size
* 3 / 4,
734 WriteData(entry
, 0, 0, buffer
.get(), cache_size
* 3 / 4, false));
738 SetMaxSize(cache_size
);
740 // The cache is 95% full.
742 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
743 EXPECT_EQ(cache_size
/ 10,
744 WriteData(entry
, 0, 0, buffer
.get(), cache_size
/ 10, false));
746 disk_cache::Entry
* entry2
;
747 ASSERT_EQ(net::OK
, CreateEntry("an extra key", &entry2
));
748 EXPECT_EQ(cache_size
/ 10,
749 WriteData(entry2
, 0, 0, buffer
.get(), cache_size
/ 10, false));
750 entry2
->Close(); // This will trigger the cache trim.
752 EXPECT_NE(net::OK
, OpenEntry(first
, &entry2
));
754 FlushQueueForTest(); // Make sure that we are done trimming the cache.
755 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
758 ASSERT_EQ(net::OK
, OpenEntry(second
, &entry
));
759 EXPECT_EQ(cache_size
/ 10, entry
->GetDataSize(0));
763 TEST_F(DiskCacheBackendTest
, SetSize
) {
767 TEST_F(DiskCacheBackendTest
, NewEvictionSetSize
) {
772 TEST_F(DiskCacheBackendTest
, MemoryOnlySetSize
) {
777 void DiskCacheBackendTest::BackendLoad() {
779 int seed
= static_cast<int>(Time::Now().ToInternalValue());
782 disk_cache::Entry
* entries
[100];
783 for (int i
= 0; i
< 100; i
++) {
784 std::string key
= GenerateKey(true);
785 ASSERT_EQ(net::OK
, CreateEntry(key
, &entries
[i
]));
787 EXPECT_EQ(100, cache_
->GetEntryCount());
789 for (int i
= 0; i
< 100; i
++) {
790 int source1
= rand() % 100;
791 int source2
= rand() % 100;
792 disk_cache::Entry
* temp
= entries
[source1
];
793 entries
[source1
] = entries
[source2
];
794 entries
[source2
] = temp
;
797 for (int i
= 0; i
< 100; i
++) {
798 disk_cache::Entry
* entry
;
799 ASSERT_EQ(net::OK
, OpenEntry(entries
[i
]->GetKey(), &entry
));
800 EXPECT_TRUE(entry
== entries
[i
]);
806 EXPECT_EQ(0, cache_
->GetEntryCount());
809 TEST_F(DiskCacheBackendTest
, Load
) {
810 // Work with a tiny index table (16 entries)
812 SetMaxSize(0x100000);
816 TEST_F(DiskCacheBackendTest
, NewEvictionLoad
) {
818 // Work with a tiny index table (16 entries)
820 SetMaxSize(0x100000);
824 TEST_F(DiskCacheBackendTest
, MemoryOnlyLoad
) {
825 SetMaxSize(0x100000);
830 TEST_F(DiskCacheBackendTest
, AppCacheLoad
) {
831 SetCacheType(net::APP_CACHE
);
832 // Work with a tiny index table (16 entries)
834 SetMaxSize(0x100000);
838 TEST_F(DiskCacheBackendTest
, ShaderCacheLoad
) {
839 SetCacheType(net::SHADER_CACHE
);
840 // Work with a tiny index table (16 entries)
842 SetMaxSize(0x100000);
846 // Tests the chaining of an entry to the current head.
847 void DiskCacheBackendTest::BackendChain() {
848 SetMask(0x1); // 2-entry table.
849 SetMaxSize(0x3000); // 12 kB.
852 disk_cache::Entry
* entry
;
853 ASSERT_EQ(net::OK
, CreateEntry("The first key", &entry
));
855 ASSERT_EQ(net::OK
, CreateEntry("The Second key", &entry
));
859 TEST_F(DiskCacheBackendTest
, Chain
) {
863 TEST_F(DiskCacheBackendTest
, NewEvictionChain
) {
868 TEST_F(DiskCacheBackendTest
, AppCacheChain
) {
869 SetCacheType(net::APP_CACHE
);
873 TEST_F(DiskCacheBackendTest
, ShaderCacheChain
) {
874 SetCacheType(net::SHADER_CACHE
);
878 TEST_F(DiskCacheBackendTest
, NewEvictionTrim
) {
882 disk_cache::Entry
* entry
;
883 for (int i
= 0; i
< 100; i
++) {
884 std::string
name(base::StringPrintf("Key %d", i
));
885 ASSERT_EQ(net::OK
, CreateEntry(name
, &entry
));
888 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
889 ASSERT_EQ(net::OK
, OpenEntry(name
, &entry
));
894 // The first eviction must come from list 1 (10% limit), the second must come
897 EXPECT_NE(net::OK
, OpenEntry("Key 0", &entry
));
899 EXPECT_NE(net::OK
, OpenEntry("Key 90", &entry
));
901 // Double check that we still have the list tails.
902 ASSERT_EQ(net::OK
, OpenEntry("Key 1", &entry
));
904 ASSERT_EQ(net::OK
, OpenEntry("Key 91", &entry
));
908 // Before looking for invalid entries, let's check a valid entry.
909 void DiskCacheBackendTest::BackendValidEntry() {
912 std::string
key("Some key");
913 disk_cache::Entry
* entry
;
914 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
916 const int kSize
= 50;
917 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
918 memset(buffer1
->data(), 0, kSize
);
919 base::strlcpy(buffer1
->data(), "And the data to save", kSize
);
920 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer1
.get(), kSize
, false));
924 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
926 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
927 memset(buffer2
->data(), 0, kSize
);
928 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer2
.get(), kSize
));
930 EXPECT_STREQ(buffer1
->data(), buffer2
->data());
933 TEST_F(DiskCacheBackendTest
, ValidEntry
) {
937 TEST_F(DiskCacheBackendTest
, NewEvictionValidEntry
) {
942 // The same logic of the previous test (ValidEntry), but this time force the
943 // entry to be invalid, simulating a crash in the middle.
944 // We'll be leaking memory from this test.
945 void DiskCacheBackendTest::BackendInvalidEntry() {
948 std::string
key("Some key");
949 disk_cache::Entry
* entry
;
950 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
952 const int kSize
= 50;
953 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
954 memset(buffer
->data(), 0, kSize
);
955 base::strlcpy(buffer
->data(), "And the data to save", kSize
);
956 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
959 EXPECT_NE(net::OK
, OpenEntry(key
, &entry
));
960 EXPECT_EQ(0, cache_
->GetEntryCount());
963 #if !defined(LEAK_SANITIZER)
964 // We'll be leaking memory from this test.
965 TEST_F(DiskCacheBackendTest
, InvalidEntry
) {
966 BackendInvalidEntry();
969 // We'll be leaking memory from this test.
970 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry
) {
972 BackendInvalidEntry();
975 // We'll be leaking memory from this test.
976 TEST_F(DiskCacheBackendTest
, AppCacheInvalidEntry
) {
977 SetCacheType(net::APP_CACHE
);
978 BackendInvalidEntry();
981 // We'll be leaking memory from this test.
982 TEST_F(DiskCacheBackendTest
, ShaderCacheInvalidEntry
) {
983 SetCacheType(net::SHADER_CACHE
);
984 BackendInvalidEntry();
987 // Almost the same test, but this time crash the cache after reading an entry.
988 // We'll be leaking memory from this test.
989 void DiskCacheBackendTest::BackendInvalidEntryRead() {
992 std::string
key("Some key");
993 disk_cache::Entry
* entry
;
994 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
996 const int kSize
= 50;
997 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
998 memset(buffer
->data(), 0, kSize
);
999 base::strlcpy(buffer
->data(), "And the data to save", kSize
);
1000 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1002 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1003 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer
.get(), kSize
));
1007 if (type_
== net::APP_CACHE
) {
1008 // Reading an entry and crashing should not make it dirty.
1009 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1010 EXPECT_EQ(1, cache_
->GetEntryCount());
1013 EXPECT_NE(net::OK
, OpenEntry(key
, &entry
));
1014 EXPECT_EQ(0, cache_
->GetEntryCount());
1018 // We'll be leaking memory from this test.
1019 TEST_F(DiskCacheBackendTest
, InvalidEntryRead
) {
1020 BackendInvalidEntryRead();
1023 // We'll be leaking memory from this test.
1024 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntryRead
) {
1026 BackendInvalidEntryRead();
1029 // We'll be leaking memory from this test.
1030 TEST_F(DiskCacheBackendTest
, AppCacheInvalidEntryRead
) {
1031 SetCacheType(net::APP_CACHE
);
1032 BackendInvalidEntryRead();
1035 // We'll be leaking memory from this test.
1036 TEST_F(DiskCacheBackendTest
, ShaderCacheInvalidEntryRead
) {
1037 SetCacheType(net::SHADER_CACHE
);
1038 BackendInvalidEntryRead();
1041 // We'll be leaking memory from this test.
1042 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1043 // Work with a tiny index table (16 entries)
1045 SetMaxSize(0x100000);
1048 int seed
= static_cast<int>(Time::Now().ToInternalValue());
1051 const int kNumEntries
= 100;
1052 disk_cache::Entry
* entries
[kNumEntries
];
1053 for (int i
= 0; i
< kNumEntries
; i
++) {
1054 std::string key
= GenerateKey(true);
1055 ASSERT_EQ(net::OK
, CreateEntry(key
, &entries
[i
]));
1057 EXPECT_EQ(kNumEntries
, cache_
->GetEntryCount());
1059 for (int i
= 0; i
< kNumEntries
; i
++) {
1060 int source1
= rand() % kNumEntries
;
1061 int source2
= rand() % kNumEntries
;
1062 disk_cache::Entry
* temp
= entries
[source1
];
1063 entries
[source1
] = entries
[source2
];
1064 entries
[source2
] = temp
;
1067 std::string keys
[kNumEntries
];
1068 for (int i
= 0; i
< kNumEntries
; i
++) {
1069 keys
[i
] = entries
[i
]->GetKey();
1070 if (i
< kNumEntries
/ 2)
1071 entries
[i
]->Close();
1076 for (int i
= kNumEntries
/ 2; i
< kNumEntries
; i
++) {
1077 disk_cache::Entry
* entry
;
1078 EXPECT_NE(net::OK
, OpenEntry(keys
[i
], &entry
));
1081 for (int i
= 0; i
< kNumEntries
/ 2; i
++) {
1082 disk_cache::Entry
* entry
;
1083 ASSERT_EQ(net::OK
, OpenEntry(keys
[i
], &entry
));
1087 EXPECT_EQ(kNumEntries
/ 2, cache_
->GetEntryCount());
1090 // We'll be leaking memory from this test.
1091 TEST_F(DiskCacheBackendTest
, InvalidEntryWithLoad
) {
1092 BackendInvalidEntryWithLoad();
1095 // We'll be leaking memory from this test.
1096 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntryWithLoad
) {
1098 BackendInvalidEntryWithLoad();
1101 // We'll be leaking memory from this test.
1102 TEST_F(DiskCacheBackendTest
, AppCacheInvalidEntryWithLoad
) {
1103 SetCacheType(net::APP_CACHE
);
1104 BackendInvalidEntryWithLoad();
1107 // We'll be leaking memory from this test.
1108 TEST_F(DiskCacheBackendTest
, ShaderCacheInvalidEntryWithLoad
) {
1109 SetCacheType(net::SHADER_CACHE
);
1110 BackendInvalidEntryWithLoad();
1113 // We'll be leaking memory from this test.
1114 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1115 const int kSize
= 0x3000; // 12 kB
1116 SetMaxSize(kSize
* 10);
1119 std::string
first("some key");
1120 std::string
second("something else");
1121 disk_cache::Entry
* entry
;
1122 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
1124 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1125 memset(buffer
->data(), 0, kSize
);
1126 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1128 // Simulate a crash.
1131 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
1132 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1134 EXPECT_EQ(2, cache_
->GetEntryCount());
1136 entry
->Close(); // Trim the cache.
1137 FlushQueueForTest();
1139 // If we evicted the entry in less than 20mS, we have one entry in the cache;
1140 // if it took more than that, we posted a task and we'll delete the second
1142 base::MessageLoop::current()->RunUntilIdle();
1144 // This may be not thread-safe in general, but for now it's OK so add some
1145 // ThreadSanitizer annotations to ignore data races on cache_.
1146 // See http://crbug.com/55970
1147 ANNOTATE_IGNORE_READS_BEGIN();
1148 EXPECT_GE(1, cache_
->GetEntryCount());
1149 ANNOTATE_IGNORE_READS_END();
1151 EXPECT_NE(net::OK
, OpenEntry(first
, &entry
));
1154 // We'll be leaking memory from this test.
1155 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry
) {
1156 BackendTrimInvalidEntry();
1159 // We'll be leaking memory from this test.
1160 TEST_F(DiskCacheBackendTest
, NewEvictionTrimInvalidEntry
) {
1162 BackendTrimInvalidEntry();
1165 // We'll be leaking memory from this test.
1166 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1167 SetMask(0xf); // 16-entry table.
1169 const int kSize
= 0x3000; // 12 kB
1170 SetMaxSize(kSize
* 40);
1173 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1174 memset(buffer
->data(), 0, kSize
);
1175 disk_cache::Entry
* entry
;
1177 // Writing 32 entries to this cache chains most of them.
1178 for (int i
= 0; i
< 32; i
++) {
1179 std::string
key(base::StringPrintf("some key %d", i
));
1180 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1181 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1183 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1184 // Note that we are not closing the entries.
1187 // Simulate a crash.
1190 ASSERT_EQ(net::OK
, CreateEntry("Something else", &entry
));
1191 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1193 FlushQueueForTest();
1194 EXPECT_EQ(33, cache_
->GetEntryCount());
1197 // For the new eviction code, all corrupt entries are on the second list so
1198 // they are not going away that easy.
1199 if (new_eviction_
) {
1200 EXPECT_EQ(net::OK
, DoomAllEntries());
1203 entry
->Close(); // Trim the cache.
1204 FlushQueueForTest();
1206 // We may abort the eviction before cleaning up everything.
1207 base::MessageLoop::current()->RunUntilIdle();
1208 FlushQueueForTest();
1209 // If it's not clear enough: we may still have eviction tasks running at this
1210 // time, so the number of entries is changing while we read it.
1211 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1212 EXPECT_GE(30, cache_
->GetEntryCount());
1213 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1216 // We'll be leaking memory from this test.
1217 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry2
) {
1218 BackendTrimInvalidEntry2();
1221 // We'll be leaking memory from this test.
1222 TEST_F(DiskCacheBackendTest
, NewEvictionTrimInvalidEntry2
) {
1224 BackendTrimInvalidEntry2();
1226 #endif // !defined(LEAK_SANITIZER)
1228 void DiskCacheBackendTest::BackendEnumerations() {
1230 Time initial
= Time::Now();
1232 const int kNumEntries
= 100;
1233 for (int i
= 0; i
< kNumEntries
; i
++) {
1234 std::string key
= GenerateKey(true);
1235 disk_cache::Entry
* entry
;
1236 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1239 EXPECT_EQ(kNumEntries
, cache_
->GetEntryCount());
1240 Time final
= Time::Now();
1242 disk_cache::Entry
* entry
;
1243 scoped_ptr
<TestIterator
> iter
= CreateIterator();
1245 Time last_modified
[kNumEntries
];
1246 Time last_used
[kNumEntries
];
1247 while (iter
->OpenNextEntry(&entry
) == net::OK
) {
1248 ASSERT_TRUE(NULL
!= entry
);
1249 if (count
< kNumEntries
) {
1250 last_modified
[count
] = entry
->GetLastModified();
1251 last_used
[count
] = entry
->GetLastUsed();
1252 EXPECT_TRUE(initial
<= last_modified
[count
]);
1253 EXPECT_TRUE(final
>= last_modified
[count
]);
1259 EXPECT_EQ(kNumEntries
, count
);
1261 iter
= CreateIterator();
1263 // The previous enumeration should not have changed the timestamps.
1264 while (iter
->OpenNextEntry(&entry
) == net::OK
) {
1265 ASSERT_TRUE(NULL
!= entry
);
1266 if (count
< kNumEntries
) {
1267 EXPECT_TRUE(last_modified
[count
] == entry
->GetLastModified());
1268 EXPECT_TRUE(last_used
[count
] == entry
->GetLastUsed());
1273 EXPECT_EQ(kNumEntries
, count
);
1276 TEST_F(DiskCacheBackendTest
, Enumerations
) {
1277 BackendEnumerations();
1280 TEST_F(DiskCacheBackendTest
, NewEvictionEnumerations
) {
1282 BackendEnumerations();
1285 TEST_F(DiskCacheBackendTest
, MemoryOnlyEnumerations
) {
1286 SetMemoryOnlyMode();
1287 BackendEnumerations();
1290 TEST_F(DiskCacheBackendTest
, ShaderCacheEnumerations
) {
1291 SetCacheType(net::SHADER_CACHE
);
1292 BackendEnumerations();
1295 TEST_F(DiskCacheBackendTest
, AppCacheEnumerations
) {
1296 SetCacheType(net::APP_CACHE
);
1297 BackendEnumerations();
1300 // Verifies enumerations while entries are open.
1301 void DiskCacheBackendTest::BackendEnumerations2() {
1303 const std::string
first("first");
1304 const std::string
second("second");
1305 disk_cache::Entry
*entry1
, *entry2
;
1306 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry1
));
1308 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry2
));
1310 FlushQueueForTest();
1312 // Make sure that the timestamp is not the same.
1314 ASSERT_EQ(net::OK
, OpenEntry(second
, &entry1
));
1315 scoped_ptr
<TestIterator
> iter
= CreateIterator();
1316 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry2
));
1317 EXPECT_EQ(entry2
->GetKey(), second
);
1319 // Two entries and the iterator pointing at "first".
1323 // The iterator should still be valid, so we should not crash.
1324 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry2
));
1325 EXPECT_EQ(entry2
->GetKey(), first
);
1327 iter
= CreateIterator();
1329 // Modify the oldest entry and get the newest element.
1330 ASSERT_EQ(net::OK
, OpenEntry(first
, &entry1
));
1331 EXPECT_EQ(0, WriteData(entry1
, 0, 200, NULL
, 0, false));
1332 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry2
));
1333 if (type_
== net::APP_CACHE
) {
1334 // The list is not updated.
1335 EXPECT_EQ(entry2
->GetKey(), second
);
1337 EXPECT_EQ(entry2
->GetKey(), first
);
1344 TEST_F(DiskCacheBackendTest
, Enumerations2
) {
1345 BackendEnumerations2();
1348 TEST_F(DiskCacheBackendTest
, NewEvictionEnumerations2
) {
1350 BackendEnumerations2();
1353 TEST_F(DiskCacheBackendTest
, MemoryOnlyEnumerations2
) {
1354 SetMemoryOnlyMode();
1355 BackendEnumerations2();
1358 TEST_F(DiskCacheBackendTest
, AppCacheEnumerations2
) {
1359 SetCacheType(net::APP_CACHE
);
1360 BackendEnumerations2();
1363 TEST_F(DiskCacheBackendTest
, ShaderCacheEnumerations2
) {
1364 SetCacheType(net::SHADER_CACHE
);
1365 BackendEnumerations2();
1368 // Verify that ReadData calls do not update the LRU cache
1369 // when using the SHADER_CACHE type.
1370 TEST_F(DiskCacheBackendTest
, ShaderCacheEnumerationReadData
) {
1371 SetCacheType(net::SHADER_CACHE
);
1373 const std::string
first("first");
1374 const std::string
second("second");
1375 disk_cache::Entry
*entry1
, *entry2
;
1376 const int kSize
= 50;
1377 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
1379 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry1
));
1380 memset(buffer1
->data(), 0, kSize
);
1381 base::strlcpy(buffer1
->data(), "And the data to save", kSize
);
1382 EXPECT_EQ(kSize
, WriteData(entry1
, 0, 0, buffer1
.get(), kSize
, false));
1384 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry2
));
1387 FlushQueueForTest();
1389 // Make sure that the timestamp is not the same.
1392 // Read from the last item in the LRU.
1393 EXPECT_EQ(kSize
, ReadData(entry1
, 0, 0, buffer1
.get(), kSize
));
1396 scoped_ptr
<TestIterator
> iter
= CreateIterator();
1397 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry2
));
1398 EXPECT_EQ(entry2
->GetKey(), second
);
1402 #if !defined(LEAK_SANITIZER)
1403 // Verify handling of invalid entries while doing enumerations.
1404 // We'll be leaking memory from this test.
1405 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1408 std::string
key("Some key");
1409 disk_cache::Entry
*entry
, *entry1
, *entry2
;
1410 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry1
));
1412 const int kSize
= 50;
1413 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
1414 memset(buffer1
->data(), 0, kSize
);
1415 base::strlcpy(buffer1
->data(), "And the data to save", kSize
);
1416 EXPECT_EQ(kSize
, WriteData(entry1
, 0, 0, buffer1
.get(), kSize
, false));
1418 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry1
));
1419 EXPECT_EQ(kSize
, ReadData(entry1
, 0, 0, buffer1
.get(), kSize
));
1421 std::string
key2("Another key");
1422 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry2
));
1424 ASSERT_EQ(2, cache_
->GetEntryCount());
1428 scoped_ptr
<TestIterator
> iter
= CreateIterator();
1430 while (iter
->OpenNextEntry(&entry
) == net::OK
) {
1431 ASSERT_TRUE(NULL
!= entry
);
1432 EXPECT_EQ(key2
, entry
->GetKey());
1436 EXPECT_EQ(1, count
);
1437 EXPECT_EQ(1, cache_
->GetEntryCount());
1440 // We'll be leaking memory from this test.
1441 TEST_F(DiskCacheBackendTest
, InvalidEntryEnumeration
) {
1442 BackendInvalidEntryEnumeration();
1445 // We'll be leaking memory from this test.
1446 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntryEnumeration
) {
1448 BackendInvalidEntryEnumeration();
1450 #endif // !defined(LEAK_SANITIZER)
1452 // Tests that if for some reason entries are modified close to existing cache
1453 // iterators, we don't generate fatal errors or reset the cache.
1454 void DiskCacheBackendTest::BackendFixEnumerators() {
1457 int seed
= static_cast<int>(Time::Now().ToInternalValue());
1460 const int kNumEntries
= 10;
1461 for (int i
= 0; i
< kNumEntries
; i
++) {
1462 std::string key
= GenerateKey(true);
1463 disk_cache::Entry
* entry
;
1464 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1467 EXPECT_EQ(kNumEntries
, cache_
->GetEntryCount());
1469 disk_cache::Entry
*entry1
, *entry2
;
1470 scoped_ptr
<TestIterator
> iter1
= CreateIterator(), iter2
= CreateIterator();
1471 ASSERT_EQ(net::OK
, iter1
->OpenNextEntry(&entry1
));
1472 ASSERT_TRUE(NULL
!= entry1
);
1476 // Let's go to the middle of the list.
1477 for (int i
= 0; i
< kNumEntries
/ 2; i
++) {
1480 ASSERT_EQ(net::OK
, iter1
->OpenNextEntry(&entry1
));
1481 ASSERT_TRUE(NULL
!= entry1
);
1483 ASSERT_EQ(net::OK
, iter2
->OpenNextEntry(&entry2
));
1484 ASSERT_TRUE(NULL
!= entry2
);
1488 // Messing up with entry1 will modify entry2->next.
1490 ASSERT_EQ(net::OK
, iter2
->OpenNextEntry(&entry2
));
1491 ASSERT_TRUE(NULL
!= entry2
);
1493 // The link entry2->entry1 should be broken.
1494 EXPECT_NE(entry2
->GetKey(), entry1
->GetKey());
1498 // And the second iterator should keep working.
1499 ASSERT_EQ(net::OK
, iter2
->OpenNextEntry(&entry2
));
1500 ASSERT_TRUE(NULL
!= entry2
);
1504 TEST_F(DiskCacheBackendTest
, FixEnumerators
) {
1505 BackendFixEnumerators();
1508 TEST_F(DiskCacheBackendTest
, NewEvictionFixEnumerators
) {
1510 BackendFixEnumerators();
1513 void DiskCacheBackendTest::BackendDoomRecent() {
1516 disk_cache::Entry
*entry
;
1517 ASSERT_EQ(net::OK
, CreateEntry("first", &entry
));
1519 ASSERT_EQ(net::OK
, CreateEntry("second", &entry
));
1521 FlushQueueForTest();
1524 Time middle
= Time::Now();
1526 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
1528 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry
));
1530 FlushQueueForTest();
1533 Time final
= Time::Now();
1535 ASSERT_EQ(4, cache_
->GetEntryCount());
1536 EXPECT_EQ(net::OK
, DoomEntriesSince(final
));
1537 ASSERT_EQ(4, cache_
->GetEntryCount());
1539 EXPECT_EQ(net::OK
, DoomEntriesSince(middle
));
1540 ASSERT_EQ(2, cache_
->GetEntryCount());
1542 ASSERT_EQ(net::OK
, OpenEntry("second", &entry
));
1546 TEST_F(DiskCacheBackendTest
, DoomRecent
) {
1547 BackendDoomRecent();
1550 TEST_F(DiskCacheBackendTest
, NewEvictionDoomRecent
) {
1552 BackendDoomRecent();
1555 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomRecent
) {
1556 SetMemoryOnlyMode();
1557 BackendDoomRecent();
1560 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomEntriesSinceSparse
) {
1561 SetMemoryOnlyMode();
1563 InitSparseCache(&start
, NULL
);
1564 DoomEntriesSince(start
);
1565 EXPECT_EQ(1, cache_
->GetEntryCount());
1568 TEST_F(DiskCacheBackendTest
, DoomEntriesSinceSparse
) {
1570 InitSparseCache(&start
, NULL
);
1571 DoomEntriesSince(start
);
1572 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1573 // MemBackendImpl does not. Thats why expected value differs here from
1574 // MemoryOnlyDoomEntriesSinceSparse.
1575 EXPECT_EQ(3, cache_
->GetEntryCount());
1578 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomAllSparse
) {
1579 SetMemoryOnlyMode();
1580 InitSparseCache(NULL
, NULL
);
1581 EXPECT_EQ(net::OK
, DoomAllEntries());
1582 EXPECT_EQ(0, cache_
->GetEntryCount());
1585 TEST_F(DiskCacheBackendTest
, DoomAllSparse
) {
1586 InitSparseCache(NULL
, NULL
);
1587 EXPECT_EQ(net::OK
, DoomAllEntries());
1588 EXPECT_EQ(0, cache_
->GetEntryCount());
1591 void DiskCacheBackendTest::BackendDoomBetween() {
1594 disk_cache::Entry
*entry
;
1595 ASSERT_EQ(net::OK
, CreateEntry("first", &entry
));
1597 FlushQueueForTest();
1600 Time middle_start
= Time::Now();
1602 ASSERT_EQ(net::OK
, CreateEntry("second", &entry
));
1604 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
1606 FlushQueueForTest();
1609 Time middle_end
= Time::Now();
1611 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry
));
1613 ASSERT_EQ(net::OK
, OpenEntry("fourth", &entry
));
1615 FlushQueueForTest();
1618 Time final
= Time::Now();
1620 ASSERT_EQ(4, cache_
->GetEntryCount());
1621 EXPECT_EQ(net::OK
, DoomEntriesBetween(middle_start
, middle_end
));
1622 ASSERT_EQ(2, cache_
->GetEntryCount());
1624 ASSERT_EQ(net::OK
, OpenEntry("fourth", &entry
));
1627 EXPECT_EQ(net::OK
, DoomEntriesBetween(middle_start
, final
));
1628 ASSERT_EQ(1, cache_
->GetEntryCount());
1630 ASSERT_EQ(net::OK
, OpenEntry("first", &entry
));
1634 TEST_F(DiskCacheBackendTest
, DoomBetween
) {
1635 BackendDoomBetween();
1638 TEST_F(DiskCacheBackendTest
, NewEvictionDoomBetween
) {
1640 BackendDoomBetween();
1643 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomBetween
) {
1644 SetMemoryOnlyMode();
1645 BackendDoomBetween();
1648 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomEntriesBetweenSparse
) {
1649 SetMemoryOnlyMode();
1650 base::Time start
, end
;
1651 InitSparseCache(&start
, &end
);
1652 DoomEntriesBetween(start
, end
);
1653 EXPECT_EQ(3, cache_
->GetEntryCount());
1656 end
= base::Time::Now();
1657 DoomEntriesBetween(start
, end
);
1658 EXPECT_EQ(1, cache_
->GetEntryCount());
1661 TEST_F(DiskCacheBackendTest
, DoomEntriesBetweenSparse
) {
1662 base::Time start
, end
;
1663 InitSparseCache(&start
, &end
);
1664 DoomEntriesBetween(start
, end
);
1665 EXPECT_EQ(9, cache_
->GetEntryCount());
1668 end
= base::Time::Now();
1669 DoomEntriesBetween(start
, end
);
1670 EXPECT_EQ(3, cache_
->GetEntryCount());
1673 void DiskCacheBackendTest::BackendTransaction(const std::string
& name
,
1674 int num_entries
, bool load
) {
1676 ASSERT_TRUE(CopyTestCache(name
));
1677 DisableFirstCleanup();
1682 SetMaxSize(0x100000);
1684 // Clear the settings from the previous run.
1691 ASSERT_EQ(num_entries
+ 1, cache_
->GetEntryCount());
1693 std::string
key("the first key");
1694 disk_cache::Entry
* entry1
;
1695 ASSERT_NE(net::OK
, OpenEntry(key
, &entry1
));
1697 int actual
= cache_
->GetEntryCount();
1698 if (num_entries
!= actual
) {
1700 // If there is a heavy load, inserting an entry will make another entry
1701 // dirty (on the hash bucket) so two entries are removed.
1702 ASSERT_EQ(num_entries
- 1, actual
);
1708 ASSERT_TRUE(CheckCacheIntegrity(cache_path_
, new_eviction_
, mask
));
1712 void DiskCacheBackendTest::BackendRecoverInsert() {
1713 // Tests with an empty cache.
1714 BackendTransaction("insert_empty1", 0, false);
1715 ASSERT_TRUE(success_
) << "insert_empty1";
1716 BackendTransaction("insert_empty2", 0, false);
1717 ASSERT_TRUE(success_
) << "insert_empty2";
1718 BackendTransaction("insert_empty3", 0, false);
1719 ASSERT_TRUE(success_
) << "insert_empty3";
1721 // Tests with one entry on the cache.
1722 BackendTransaction("insert_one1", 1, false);
1723 ASSERT_TRUE(success_
) << "insert_one1";
1724 BackendTransaction("insert_one2", 1, false);
1725 ASSERT_TRUE(success_
) << "insert_one2";
1726 BackendTransaction("insert_one3", 1, false);
1727 ASSERT_TRUE(success_
) << "insert_one3";
1729 // Tests with one hundred entries on the cache, tiny index.
1730 BackendTransaction("insert_load1", 100, true);
1731 ASSERT_TRUE(success_
) << "insert_load1";
1732 BackendTransaction("insert_load2", 100, true);
1733 ASSERT_TRUE(success_
) << "insert_load2";
1736 TEST_F(DiskCacheBackendTest
, RecoverInsert
) {
1737 BackendRecoverInsert();
1740 TEST_F(DiskCacheBackendTest
, NewEvictionRecoverInsert
) {
1742 BackendRecoverInsert();
1745 void DiskCacheBackendTest::BackendRecoverRemove() {
1746 // Removing the only element.
1747 BackendTransaction("remove_one1", 0, false);
1748 ASSERT_TRUE(success_
) << "remove_one1";
1749 BackendTransaction("remove_one2", 0, false);
1750 ASSERT_TRUE(success_
) << "remove_one2";
1751 BackendTransaction("remove_one3", 0, false);
1752 ASSERT_TRUE(success_
) << "remove_one3";
1754 // Removing the head.
1755 BackendTransaction("remove_head1", 1, false);
1756 ASSERT_TRUE(success_
) << "remove_head1";
1757 BackendTransaction("remove_head2", 1, false);
1758 ASSERT_TRUE(success_
) << "remove_head2";
1759 BackendTransaction("remove_head3", 1, false);
1760 ASSERT_TRUE(success_
) << "remove_head3";
1762 // Removing the tail.
1763 BackendTransaction("remove_tail1", 1, false);
1764 ASSERT_TRUE(success_
) << "remove_tail1";
1765 BackendTransaction("remove_tail2", 1, false);
1766 ASSERT_TRUE(success_
) << "remove_tail2";
1767 BackendTransaction("remove_tail3", 1, false);
1768 ASSERT_TRUE(success_
) << "remove_tail3";
1770 // Removing with one hundred entries on the cache, tiny index.
1771 BackendTransaction("remove_load1", 100, true);
1772 ASSERT_TRUE(success_
) << "remove_load1";
1773 BackendTransaction("remove_load2", 100, true);
1774 ASSERT_TRUE(success_
) << "remove_load2";
1775 BackendTransaction("remove_load3", 100, true);
1776 ASSERT_TRUE(success_
) << "remove_load3";
1778 // This case cannot be reverted.
1779 BackendTransaction("remove_one4", 0, false);
1780 ASSERT_TRUE(success_
) << "remove_one4";
1781 BackendTransaction("remove_head4", 1, false);
1782 ASSERT_TRUE(success_
) << "remove_head4";
1786 // http://crbug.com/396392
1787 #define MAYBE_RecoverRemove DISABLED_RecoverRemove
1789 #define MAYBE_RecoverRemove RecoverRemove
1791 TEST_F(DiskCacheBackendTest
, MAYBE_RecoverRemove
) {
1792 BackendRecoverRemove();
1796 // http://crbug.com/396392
1797 #define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove
1799 #define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove
1801 TEST_F(DiskCacheBackendTest
, MAYBE_NewEvictionRecoverRemove
) {
1803 BackendRecoverRemove();
1806 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1808 ASSERT_TRUE(CopyTestCache("insert_load1"));
1809 DisableFirstCleanup();
1814 // We should not crash here.
1816 DisableIntegrityCheck();
1819 TEST_F(DiskCacheBackendTest
, RecoverWithEviction
) {
1820 BackendRecoverWithEviction();
1823 TEST_F(DiskCacheBackendTest
, NewEvictionRecoverWithEviction
) {
1825 BackendRecoverWithEviction();
1828 // Tests that the |BackendImpl| fails to start with the wrong cache version.
1829 TEST_F(DiskCacheTest
, WrongVersion
) {
1830 ASSERT_TRUE(CopyTestCache("wrong_version"));
1831 base::Thread
cache_thread("CacheThread");
1832 ASSERT_TRUE(cache_thread
.StartWithOptions(
1833 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1834 net::TestCompletionCallback cb
;
1836 scoped_ptr
<disk_cache::BackendImpl
> cache(new disk_cache::BackendImpl(
1837 cache_path_
, cache_thread
.task_runner(), NULL
));
1838 int rv
= cache
->Init(cb
.callback());
1839 ASSERT_EQ(net::ERR_FAILED
, cb
.GetResult(rv
));
1842 class BadEntropyProvider
: public base::FieldTrial::EntropyProvider
{
1844 ~BadEntropyProvider() override
{}
1846 double GetEntropyForTrial(const std::string
& trial_name
,
1847 uint32 randomization_seed
) const override
{
1852 // Tests that the disk cache successfully joins the control group, dropping the
1853 // existing cache in favour of a new empty cache.
1854 // Disabled on android since this test requires cache creator to create
1855 // blockfile caches.
1856 #if !defined(OS_ANDROID)
1857 TEST_F(DiskCacheTest
, SimpleCacheControlJoin
) {
1858 base::Thread
cache_thread("CacheThread");
1859 ASSERT_TRUE(cache_thread
.StartWithOptions(
1860 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1862 scoped_ptr
<disk_cache::BackendImpl
> cache
=
1863 CreateExistingEntryCache(cache_thread
, cache_path_
);
1864 ASSERT_TRUE(cache
.get());
1867 // Instantiate the SimpleCacheTrial, forcing this run into the
1868 // ExperimentControl group.
1869 base::FieldTrialList
field_trial_list(new BadEntropyProvider());
1870 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1871 "ExperimentControl");
1872 net::TestCompletionCallback cb
;
1873 scoped_ptr
<disk_cache::Backend
> base_cache
;
1874 int rv
= disk_cache::CreateCacheBackend(net::DISK_CACHE
,
1875 net::CACHE_BACKEND_BLOCKFILE
,
1879 cache_thread
.task_runner(),
1883 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
1884 EXPECT_EQ(0, base_cache
->GetEntryCount());
1888 // Tests that the disk cache can restart in the control group preserving
1889 // existing entries.
1890 TEST_F(DiskCacheTest
, SimpleCacheControlRestart
) {
1891 // Instantiate the SimpleCacheTrial, forcing this run into the
1892 // ExperimentControl group.
1893 base::FieldTrialList
field_trial_list(new BadEntropyProvider());
1894 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1895 "ExperimentControl");
1897 base::Thread
cache_thread("CacheThread");
1898 ASSERT_TRUE(cache_thread
.StartWithOptions(
1899 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1901 scoped_ptr
<disk_cache::BackendImpl
> cache
=
1902 CreateExistingEntryCache(cache_thread
, cache_path_
);
1903 ASSERT_TRUE(cache
.get());
1905 net::TestCompletionCallback cb
;
1907 const int kRestartCount
= 5;
1908 for (int i
= 0; i
< kRestartCount
; ++i
) {
1909 cache
.reset(new disk_cache::BackendImpl(
1910 cache_path_
, cache_thread
.message_loop_proxy(), NULL
));
1911 int rv
= cache
->Init(cb
.callback());
1912 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
1913 EXPECT_EQ(1, cache
->GetEntryCount());
1915 disk_cache::Entry
* entry
= NULL
;
1916 rv
= cache
->OpenEntry(kExistingEntryKey
, &entry
, cb
.callback());
1917 EXPECT_EQ(net::OK
, cb
.GetResult(rv
));
1923 // Tests that the disk cache can leave the control group preserving existing
1925 TEST_F(DiskCacheTest
, SimpleCacheControlLeave
) {
1926 base::Thread
cache_thread("CacheThread");
1927 ASSERT_TRUE(cache_thread
.StartWithOptions(
1928 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1931 // Instantiate the SimpleCacheTrial, forcing this run into the
1932 // ExperimentControl group.
1933 base::FieldTrialList
field_trial_list(new BadEntropyProvider());
1934 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1935 "ExperimentControl");
1937 scoped_ptr
<disk_cache::BackendImpl
> cache
=
1938 CreateExistingEntryCache(cache_thread
, cache_path_
);
1939 ASSERT_TRUE(cache
.get());
1942 // Instantiate the SimpleCacheTrial, forcing this run into the
1943 // ExperimentNo group.
1944 base::FieldTrialList
field_trial_list(new BadEntropyProvider());
1945 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1946 net::TestCompletionCallback cb
;
1948 const int kRestartCount
= 5;
1949 for (int i
= 0; i
< kRestartCount
; ++i
) {
1950 scoped_ptr
<disk_cache::BackendImpl
> cache(new disk_cache::BackendImpl(
1951 cache_path_
, cache_thread
.message_loop_proxy(), NULL
));
1952 int rv
= cache
->Init(cb
.callback());
1953 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
1954 EXPECT_EQ(1, cache
->GetEntryCount());
1956 disk_cache::Entry
* entry
= NULL
;
1957 rv
= cache
->OpenEntry(kExistingEntryKey
, &entry
, cb
.callback());
1958 EXPECT_EQ(net::OK
, cb
.GetResult(rv
));
1964 // Tests that the cache is properly restarted on recovery error.
1965 // Disabled on android since this test requires cache creator to create
1966 // blockfile caches.
1967 #if !defined(OS_ANDROID)
1968 TEST_F(DiskCacheBackendTest
, DeleteOld
) {
1969 ASSERT_TRUE(CopyTestCache("wrong_version"));
1971 base::Thread
cache_thread("CacheThread");
1972 ASSERT_TRUE(cache_thread
.StartWithOptions(
1973 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1975 net::TestCompletionCallback cb
;
1976 bool prev
= base::ThreadRestrictions::SetIOAllowed(false);
1977 base::FilePath
path(cache_path_
);
1978 int rv
= disk_cache::CreateCacheBackend(net::DISK_CACHE
,
1979 net::CACHE_BACKEND_BLOCKFILE
,
1983 cache_thread
.task_runner(),
1987 path
.clear(); // Make sure path was captured by the previous call.
1988 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
1989 base::ThreadRestrictions::SetIOAllowed(prev
);
1991 EXPECT_TRUE(CheckCacheIntegrity(cache_path_
, new_eviction_
, mask_
));
1995 // We want to be able to deal with messed up entries on disk.
1996 void DiskCacheBackendTest::BackendInvalidEntry2() {
1997 ASSERT_TRUE(CopyTestCache("bad_entry"));
1998 DisableFirstCleanup();
2001 disk_cache::Entry
*entry1
, *entry2
;
2002 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry1
));
2003 EXPECT_NE(net::OK
, OpenEntry("some other key", &entry2
));
2006 // CheckCacheIntegrity will fail at this point.
2007 DisableIntegrityCheck();
2010 TEST_F(DiskCacheBackendTest
, InvalidEntry2
) {
2011 BackendInvalidEntry2();
2014 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry2
) {
2016 BackendInvalidEntry2();
2019 // Tests that we don't crash or hang when enumerating this cache.
2020 void DiskCacheBackendTest::BackendInvalidEntry3() {
2021 SetMask(0x1); // 2-entry table.
2022 SetMaxSize(0x3000); // 12 kB.
2023 DisableFirstCleanup();
2026 disk_cache::Entry
* entry
;
2027 scoped_ptr
<TestIterator
> iter
= CreateIterator();
2028 while (iter
->OpenNextEntry(&entry
) == net::OK
) {
2033 TEST_F(DiskCacheBackendTest
, InvalidEntry3
) {
2034 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2035 BackendInvalidEntry3();
2038 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry3
) {
2039 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2041 BackendInvalidEntry3();
2042 DisableIntegrityCheck();
2045 // Test that we handle a dirty entry on the LRU list, already replaced with
2046 // the same key, and with hash collisions.
2047 TEST_F(DiskCacheBackendTest
, InvalidEntry4
) {
2048 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2049 SetMask(0x1); // 2-entry table.
2050 SetMaxSize(0x3000); // 12 kB.
2051 DisableFirstCleanup();
2057 // Test that we handle a dirty entry on the deleted list, already replaced with
2058 // the same key, and with hash collisions.
2059 TEST_F(DiskCacheBackendTest
, InvalidEntry5
) {
2060 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2062 SetMask(0x1); // 2-entry table.
2063 SetMaxSize(0x3000); // 12 kB.
2064 DisableFirstCleanup();
2067 TrimDeletedListForTest(false);
2070 TEST_F(DiskCacheBackendTest
, InvalidEntry6
) {
2071 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2072 SetMask(0x1); // 2-entry table.
2073 SetMaxSize(0x3000); // 12 kB.
2074 DisableFirstCleanup();
2077 // There is a dirty entry (but marked as clean) at the end, pointing to a
2078 // deleted entry through the hash collision list. We should not re-insert the
2079 // deleted entry into the index table.
2082 // The cache should be clean (as detected by CheckCacheIntegrity).
2085 // Tests that we don't hang when there is a loop on the hash collision list.
2086 // The test cache could be a result of bug 69135.
2087 TEST_F(DiskCacheBackendTest
, BadNextEntry1
) {
2088 ASSERT_TRUE(CopyTestCache("list_loop2"));
2089 SetMask(0x1); // 2-entry table.
2090 SetMaxSize(0x3000); // 12 kB.
2091 DisableFirstCleanup();
2094 // The second entry points at itselft, and the first entry is not accessible
2095 // though the index, but it is at the head of the LRU.
2097 disk_cache::Entry
* entry
;
2098 ASSERT_EQ(net::OK
, CreateEntry("The first key", &entry
));
2103 ASSERT_EQ(net::OK
, OpenEntry("The first key", &entry
));
2105 EXPECT_EQ(1, cache_
->GetEntryCount());
2108 // Tests that we don't hang when there is a loop on the hash collision list.
2109 // The test cache could be a result of bug 69135.
2110 TEST_F(DiskCacheBackendTest
, BadNextEntry2
) {
2111 ASSERT_TRUE(CopyTestCache("list_loop3"));
2112 SetMask(0x1); // 2-entry table.
2113 SetMaxSize(0x3000); // 12 kB.
2114 DisableFirstCleanup();
2117 // There is a wide loop of 5 entries.
2119 disk_cache::Entry
* entry
;
2120 ASSERT_NE(net::OK
, OpenEntry("Not present key", &entry
));
2123 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry6
) {
2124 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2125 DisableFirstCleanup();
2129 // The second entry is dirty, but removing it should not corrupt the list.
2130 disk_cache::Entry
* entry
;
2131 ASSERT_NE(net::OK
, OpenEntry("the second key", &entry
));
2132 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry
));
2134 // This should not delete the cache.
2136 FlushQueueForTest();
2139 ASSERT_EQ(net::OK
, OpenEntry("some other key", &entry
));
2143 // Tests handling of corrupt entries by keeping the rankings node around, with
2145 void DiskCacheBackendTest::BackendInvalidEntry7() {
2146 const int kSize
= 0x3000; // 12 kB.
2147 SetMaxSize(kSize
* 10);
2150 std::string
first("some key");
2151 std::string
second("something else");
2152 disk_cache::Entry
* entry
;
2153 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2155 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2157 // Corrupt this entry.
2158 disk_cache::EntryImpl
* entry_impl
=
2159 static_cast<disk_cache::EntryImpl
*>(entry
);
2161 entry_impl
->rankings()->Data()->next
= 0;
2162 entry_impl
->rankings()->Store();
2164 FlushQueueForTest();
2165 EXPECT_EQ(2, cache_
->GetEntryCount());
2167 // This should detect the bad entry.
2168 EXPECT_NE(net::OK
, OpenEntry(second
, &entry
));
2169 EXPECT_EQ(1, cache_
->GetEntryCount());
2171 // We should delete the cache. The list still has a corrupt node.
2172 scoped_ptr
<TestIterator
> iter
= CreateIterator();
2173 EXPECT_NE(net::OK
, iter
->OpenNextEntry(&entry
));
2174 FlushQueueForTest();
2175 EXPECT_EQ(0, cache_
->GetEntryCount());
2178 TEST_F(DiskCacheBackendTest
, InvalidEntry7
) {
2179 BackendInvalidEntry7();
2182 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry7
) {
2184 BackendInvalidEntry7();
2187 // Tests handling of corrupt entries by keeping the rankings node around, with
2188 // a non fatal failure.
2189 void DiskCacheBackendTest::BackendInvalidEntry8() {
2190 const int kSize
= 0x3000; // 12 kB
2191 SetMaxSize(kSize
* 10);
2194 std::string
first("some key");
2195 std::string
second("something else");
2196 disk_cache::Entry
* entry
;
2197 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2199 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2201 // Corrupt this entry.
2202 disk_cache::EntryImpl
* entry_impl
=
2203 static_cast<disk_cache::EntryImpl
*>(entry
);
2205 entry_impl
->rankings()->Data()->contents
= 0;
2206 entry_impl
->rankings()->Store();
2208 FlushQueueForTest();
2209 EXPECT_EQ(2, cache_
->GetEntryCount());
2211 // This should detect the bad entry.
2212 EXPECT_NE(net::OK
, OpenEntry(second
, &entry
));
2213 EXPECT_EQ(1, cache_
->GetEntryCount());
2215 // We should not delete the cache.
2216 scoped_ptr
<TestIterator
> iter
= CreateIterator();
2217 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry
));
2219 EXPECT_NE(net::OK
, iter
->OpenNextEntry(&entry
));
2220 EXPECT_EQ(1, cache_
->GetEntryCount());
2223 TEST_F(DiskCacheBackendTest
, InvalidEntry8
) {
2224 BackendInvalidEntry8();
2227 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry8
) {
2229 BackendInvalidEntry8();
2232 // Tests handling of corrupt entries detected by enumerations. Note that these
2233 // tests (xx9 to xx11) are basically just going though slightly different
2234 // codepaths so they are tighlty coupled with the code, but that is better than
2235 // not testing error handling code.
2236 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction
) {
2237 const int kSize
= 0x3000; // 12 kB.
2238 SetMaxSize(kSize
* 10);
2241 std::string
first("some key");
2242 std::string
second("something else");
2243 disk_cache::Entry
* entry
;
2244 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2246 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2248 // Corrupt this entry.
2249 disk_cache::EntryImpl
* entry_impl
=
2250 static_cast<disk_cache::EntryImpl
*>(entry
);
2252 entry_impl
->entry()->Data()->state
= 0xbad;
2253 entry_impl
->entry()->Store();
2255 FlushQueueForTest();
2256 EXPECT_EQ(2, cache_
->GetEntryCount());
2260 EXPECT_EQ(1, cache_
->GetEntryCount());
2262 EXPECT_EQ(1, cache_
->GetEntryCount());
2264 // We should detect the problem through the list, but we should not delete
2265 // the entry, just fail the iteration.
2266 scoped_ptr
<TestIterator
> iter
= CreateIterator();
2267 EXPECT_NE(net::OK
, iter
->OpenNextEntry(&entry
));
2269 // Now a full iteration will work, and return one entry.
2270 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry
));
2272 EXPECT_NE(net::OK
, iter
->OpenNextEntry(&entry
));
2274 // This should detect what's left of the bad entry.
2275 EXPECT_NE(net::OK
, OpenEntry(second
, &entry
));
2276 EXPECT_EQ(2, cache_
->GetEntryCount());
2278 DisableIntegrityCheck();
2281 TEST_F(DiskCacheBackendTest
, InvalidEntry9
) {
2282 BackendInvalidEntry9(false);
2285 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry9
) {
2287 BackendInvalidEntry9(false);
2290 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry9
) {
2291 BackendInvalidEntry9(true);
2294 TEST_F(DiskCacheBackendTest
, NewEvictionTrimInvalidEntry9
) {
2296 BackendInvalidEntry9(true);
2299 // Tests handling of corrupt entries detected by enumerations.
2300 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction
) {
2301 const int kSize
= 0x3000; // 12 kB.
2302 SetMaxSize(kSize
* 10);
2306 std::string
first("some key");
2307 std::string
second("something else");
2308 disk_cache::Entry
* entry
;
2309 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2311 ASSERT_EQ(net::OK
, OpenEntry(first
, &entry
));
2312 EXPECT_EQ(0, WriteData(entry
, 0, 200, NULL
, 0, false));
2314 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2316 // Corrupt this entry.
2317 disk_cache::EntryImpl
* entry_impl
=
2318 static_cast<disk_cache::EntryImpl
*>(entry
);
2320 entry_impl
->entry()->Data()->state
= 0xbad;
2321 entry_impl
->entry()->Store();
2323 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
2325 EXPECT_EQ(3, cache_
->GetEntryCount());
2328 // List 0: third -> second (bad).
2332 // Detection order: second -> first -> third.
2334 EXPECT_EQ(3, cache_
->GetEntryCount());
2336 EXPECT_EQ(2, cache_
->GetEntryCount());
2338 EXPECT_EQ(1, cache_
->GetEntryCount());
2340 // Detection order: third -> second -> first.
2341 // We should detect the problem through the list, but we should not delete
2343 scoped_ptr
<TestIterator
> iter
= CreateIterator();
2344 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry
));
2346 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry
));
2347 EXPECT_EQ(first
, entry
->GetKey());
2349 EXPECT_NE(net::OK
, iter
->OpenNextEntry(&entry
));
2351 DisableIntegrityCheck();
2354 TEST_F(DiskCacheBackendTest
, InvalidEntry10
) {
2355 BackendInvalidEntry10(false);
2358 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry10
) {
2359 BackendInvalidEntry10(true);
2362 // Tests handling of corrupt entries detected by enumerations.
2363 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction
) {
2364 const int kSize
= 0x3000; // 12 kB.
2365 SetMaxSize(kSize
* 10);
2369 std::string
first("some key");
2370 std::string
second("something else");
2371 disk_cache::Entry
* entry
;
2372 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2374 ASSERT_EQ(net::OK
, OpenEntry(first
, &entry
));
2375 EXPECT_EQ(0, WriteData(entry
, 0, 200, NULL
, 0, false));
2377 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2379 ASSERT_EQ(net::OK
, OpenEntry(second
, &entry
));
2380 EXPECT_EQ(0, WriteData(entry
, 0, 200, NULL
, 0, false));
2382 // Corrupt this entry.
2383 disk_cache::EntryImpl
* entry_impl
=
2384 static_cast<disk_cache::EntryImpl
*>(entry
);
2386 entry_impl
->entry()->Data()->state
= 0xbad;
2387 entry_impl
->entry()->Store();
2389 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
2391 FlushQueueForTest();
2392 EXPECT_EQ(3, cache_
->GetEntryCount());
2396 // List 1: second (bad) -> first.
2399 // Detection order: third -> first -> second.
2401 EXPECT_EQ(2, cache_
->GetEntryCount());
2403 EXPECT_EQ(1, cache_
->GetEntryCount());
2405 EXPECT_EQ(1, cache_
->GetEntryCount());
2407 // Detection order: third -> second.
2408 // We should detect the problem through the list, but we should not delete
2409 // the entry, just fail the iteration.
2410 scoped_ptr
<TestIterator
> iter
= CreateIterator();
2411 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry
));
2413 EXPECT_NE(net::OK
, iter
->OpenNextEntry(&entry
));
2415 // Now a full iteration will work, and return two entries.
2416 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry
));
2418 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry
));
2420 EXPECT_NE(net::OK
, iter
->OpenNextEntry(&entry
));
2422 DisableIntegrityCheck();
2425 TEST_F(DiskCacheBackendTest
, InvalidEntry11
) {
2426 BackendInvalidEntry11(false);
2429 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry11
) {
2430 BackendInvalidEntry11(true);
2433 // Tests handling of corrupt entries in the middle of a long eviction run.
2434 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2435 const int kSize
= 0x3000; // 12 kB
2436 SetMaxSize(kSize
* 10);
2439 std::string
first("some key");
2440 std::string
second("something else");
2441 disk_cache::Entry
* entry
;
2442 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2444 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2446 // Corrupt this entry.
2447 disk_cache::EntryImpl
* entry_impl
=
2448 static_cast<disk_cache::EntryImpl
*>(entry
);
2450 entry_impl
->entry()->Data()->state
= 0xbad;
2451 entry_impl
->entry()->Store();
2453 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
2455 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry
));
2457 EXPECT_EQ(1, cache_
->GetEntryCount());
2459 DisableIntegrityCheck();
2462 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry12
) {
2463 BackendTrimInvalidEntry12();
2466 TEST_F(DiskCacheBackendTest
, NewEvictionTrimInvalidEntry12
) {
2468 BackendTrimInvalidEntry12();
2471 // We want to be able to deal with messed up entries on disk.
2472 void DiskCacheBackendTest::BackendInvalidRankings2() {
2473 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2474 DisableFirstCleanup();
2477 disk_cache::Entry
*entry1
, *entry2
;
2478 EXPECT_NE(net::OK
, OpenEntry("the first key", &entry1
));
2479 ASSERT_EQ(net::OK
, OpenEntry("some other key", &entry2
));
2482 // CheckCacheIntegrity will fail at this point.
2483 DisableIntegrityCheck();
2486 TEST_F(DiskCacheBackendTest
, InvalidRankings2
) {
2487 BackendInvalidRankings2();
2490 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidRankings2
) {
2492 BackendInvalidRankings2();
2495 // If the LRU is corrupt, we delete the cache.
2496 void DiskCacheBackendTest::BackendInvalidRankings() {
2497 disk_cache::Entry
* entry
;
2498 scoped_ptr
<TestIterator
> iter
= CreateIterator();
2499 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry
));
2501 EXPECT_EQ(2, cache_
->GetEntryCount());
2503 EXPECT_NE(net::OK
, iter
->OpenNextEntry(&entry
));
2504 FlushQueueForTest(); // Allow the restart to finish.
2505 EXPECT_EQ(0, cache_
->GetEntryCount());
2508 TEST_F(DiskCacheBackendTest
, InvalidRankingsSuccess
) {
2509 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2510 DisableFirstCleanup();
2512 BackendInvalidRankings();
2515 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidRankingsSuccess
) {
2516 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2517 DisableFirstCleanup();
2520 BackendInvalidRankings();
2523 TEST_F(DiskCacheBackendTest
, InvalidRankingsFailure
) {
2524 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2525 DisableFirstCleanup();
2527 SetTestMode(); // Fail cache reinitialization.
2528 BackendInvalidRankings();
2531 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidRankingsFailure
) {
2532 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2533 DisableFirstCleanup();
2536 SetTestMode(); // Fail cache reinitialization.
2537 BackendInvalidRankings();
2540 // If the LRU is corrupt and we have open entries, we disable the cache.
2541 void DiskCacheBackendTest::BackendDisable() {
2542 disk_cache::Entry
*entry1
, *entry2
;
2543 scoped_ptr
<TestIterator
> iter
= CreateIterator();
2544 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry1
));
2546 EXPECT_NE(net::OK
, iter
->OpenNextEntry(&entry2
));
2547 EXPECT_EQ(0, cache_
->GetEntryCount());
2548 EXPECT_NE(net::OK
, CreateEntry("Something new", &entry2
));
2551 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2552 FlushQueueForTest(); // This one actually allows that task to complete.
2554 EXPECT_EQ(0, cache_
->GetEntryCount());
2557 TEST_F(DiskCacheBackendTest
, DisableSuccess
) {
2558 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2559 DisableFirstCleanup();
2564 TEST_F(DiskCacheBackendTest
, NewEvictionDisableSuccess
) {
2565 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2566 DisableFirstCleanup();
2572 TEST_F(DiskCacheBackendTest
, DisableFailure
) {
2573 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2574 DisableFirstCleanup();
2576 SetTestMode(); // Fail cache reinitialization.
2580 TEST_F(DiskCacheBackendTest
, NewEvictionDisableFailure
) {
2581 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2582 DisableFirstCleanup();
2585 SetTestMode(); // Fail cache reinitialization.
2589 // This is another type of corruption on the LRU; disable the cache.
2590 void DiskCacheBackendTest::BackendDisable2() {
2591 EXPECT_EQ(8, cache_
->GetEntryCount());
2593 disk_cache::Entry
* entry
;
2594 scoped_ptr
<TestIterator
> iter
= CreateIterator();
2596 while (iter
->OpenNextEntry(&entry
) == net::OK
) {
2597 ASSERT_TRUE(NULL
!= entry
);
2600 ASSERT_LT(count
, 9);
2603 FlushQueueForTest();
2604 EXPECT_EQ(0, cache_
->GetEntryCount());
2607 TEST_F(DiskCacheBackendTest
, DisableSuccess2
) {
2608 ASSERT_TRUE(CopyTestCache("list_loop"));
2609 DisableFirstCleanup();
2614 TEST_F(DiskCacheBackendTest
, NewEvictionDisableSuccess2
) {
2615 ASSERT_TRUE(CopyTestCache("list_loop"));
2616 DisableFirstCleanup();
2622 TEST_F(DiskCacheBackendTest
, DisableFailure2
) {
2623 ASSERT_TRUE(CopyTestCache("list_loop"));
2624 DisableFirstCleanup();
2626 SetTestMode(); // Fail cache reinitialization.
2630 TEST_F(DiskCacheBackendTest
, NewEvictionDisableFailure2
) {
2631 ASSERT_TRUE(CopyTestCache("list_loop"));
2632 DisableFirstCleanup();
2635 SetTestMode(); // Fail cache reinitialization.
2639 // If the index size changes when we disable the cache, we should not crash.
2640 void DiskCacheBackendTest::BackendDisable3() {
2641 disk_cache::Entry
*entry1
, *entry2
;
2642 scoped_ptr
<TestIterator
> iter
= CreateIterator();
2643 EXPECT_EQ(2, cache_
->GetEntryCount());
2644 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry1
));
2647 EXPECT_NE(net::OK
, iter
->OpenNextEntry(&entry2
));
2648 FlushQueueForTest();
2650 ASSERT_EQ(net::OK
, CreateEntry("Something new", &entry2
));
2653 EXPECT_EQ(1, cache_
->GetEntryCount());
2656 TEST_F(DiskCacheBackendTest
, DisableSuccess3
) {
2657 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2658 DisableFirstCleanup();
2659 SetMaxSize(20 * 1024 * 1024);
2664 TEST_F(DiskCacheBackendTest
, NewEvictionDisableSuccess3
) {
2665 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2666 DisableFirstCleanup();
2667 SetMaxSize(20 * 1024 * 1024);
2673 // If we disable the cache, already open entries should work as far as possible.
2674 void DiskCacheBackendTest::BackendDisable4() {
2675 disk_cache::Entry
*entry1
, *entry2
, *entry3
, *entry4
;
2676 scoped_ptr
<TestIterator
> iter
= CreateIterator();
2677 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry1
));
2681 CacheTestFillBuffer(key2
, sizeof(key2
), true);
2682 CacheTestFillBuffer(key3
, sizeof(key3
), true);
2683 key2
[sizeof(key2
) - 1] = '\0';
2684 key3
[sizeof(key3
) - 1] = '\0';
2685 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry2
));
2686 ASSERT_EQ(net::OK
, CreateEntry(key3
, &entry3
));
2688 const int kBufSize
= 20000;
2689 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kBufSize
));
2690 memset(buf
->data(), 0, kBufSize
);
2691 EXPECT_EQ(100, WriteData(entry2
, 0, 0, buf
.get(), 100, false));
2692 EXPECT_EQ(kBufSize
, WriteData(entry3
, 0, 0, buf
.get(), kBufSize
, false));
2694 // This line should disable the cache but not delete it.
2695 EXPECT_NE(net::OK
, iter
->OpenNextEntry(&entry4
));
2696 EXPECT_EQ(0, cache_
->GetEntryCount());
2698 EXPECT_NE(net::OK
, CreateEntry("cache is disabled", &entry4
));
2700 EXPECT_EQ(100, ReadData(entry2
, 0, 0, buf
.get(), 100));
2701 EXPECT_EQ(100, WriteData(entry2
, 0, 0, buf
.get(), 100, false));
2702 EXPECT_EQ(100, WriteData(entry2
, 1, 0, buf
.get(), 100, false));
2704 EXPECT_EQ(kBufSize
, ReadData(entry3
, 0, 0, buf
.get(), kBufSize
));
2705 EXPECT_EQ(kBufSize
, WriteData(entry3
, 0, 0, buf
.get(), kBufSize
, false));
2706 EXPECT_EQ(kBufSize
, WriteData(entry3
, 1, 0, buf
.get(), kBufSize
, false));
2708 std::string key
= entry2
->GetKey();
2709 EXPECT_EQ(sizeof(key2
) - 1, key
.size());
2710 key
= entry3
->GetKey();
2711 EXPECT_EQ(sizeof(key3
) - 1, key
.size());
2716 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2717 FlushQueueForTest(); // This one actually allows that task to complete.
2719 EXPECT_EQ(0, cache_
->GetEntryCount());
2722 TEST_F(DiskCacheBackendTest
, DisableSuccess4
) {
2723 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2724 DisableFirstCleanup();
2729 TEST_F(DiskCacheBackendTest
, NewEvictionDisableSuccess4
) {
2730 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2731 DisableFirstCleanup();
2737 TEST_F(DiskCacheTest
, Backend_UsageStatsTimer
) {
2738 MessageLoopHelper helper
;
2740 ASSERT_TRUE(CleanupCacheDir());
2741 scoped_ptr
<disk_cache::BackendImpl
> cache
;
2742 cache
.reset(new disk_cache::BackendImpl(
2743 cache_path_
, base::ThreadTaskRunnerHandle::Get(), NULL
));
2744 ASSERT_TRUE(NULL
!= cache
.get());
2745 cache
->SetUnitTestMode();
2746 ASSERT_EQ(net::OK
, cache
->SyncInit());
2748 // Wait for a callback that never comes... about 2 secs :). The message loop
2749 // has to run to allow invocation of the usage timer.
2750 helper
.WaitUntilCacheIoFinished(1);
2753 TEST_F(DiskCacheBackendTest
, TimerNotCreated
) {
2754 ASSERT_TRUE(CopyTestCache("wrong_version"));
2756 scoped_ptr
<disk_cache::BackendImpl
> cache
;
2757 cache
.reset(new disk_cache::BackendImpl(
2758 cache_path_
, base::ThreadTaskRunnerHandle::Get(), NULL
));
2759 ASSERT_TRUE(NULL
!= cache
.get());
2760 cache
->SetUnitTestMode();
2761 ASSERT_NE(net::OK
, cache
->SyncInit());
2763 ASSERT_TRUE(NULL
== cache
->GetTimerForTest());
2765 DisableIntegrityCheck();
2768 TEST_F(DiskCacheBackendTest
, Backend_UsageStats
) {
2770 disk_cache::Entry
* entry
;
2771 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
2773 FlushQueueForTest();
2775 disk_cache::StatsItems stats
;
2776 cache_
->GetStats(&stats
);
2777 EXPECT_FALSE(stats
.empty());
2779 disk_cache::StatsItems::value_type
hits("Create hit", "0x1");
2780 EXPECT_EQ(1, std::count(stats
.begin(), stats
.end(), hits
));
2784 // Now open the cache and verify that the stats are still there.
2785 DisableFirstCleanup();
2787 EXPECT_EQ(1, cache_
->GetEntryCount());
2790 cache_
->GetStats(&stats
);
2791 EXPECT_FALSE(stats
.empty());
2793 EXPECT_EQ(1, std::count(stats
.begin(), stats
.end(), hits
));
2796 void DiskCacheBackendTest::BackendDoomAll() {
2799 disk_cache::Entry
*entry1
, *entry2
;
2800 ASSERT_EQ(net::OK
, CreateEntry("first", &entry1
));
2801 ASSERT_EQ(net::OK
, CreateEntry("second", &entry2
));
2805 ASSERT_EQ(net::OK
, CreateEntry("third", &entry1
));
2806 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry2
));
2808 ASSERT_EQ(4, cache_
->GetEntryCount());
2809 EXPECT_EQ(net::OK
, DoomAllEntries());
2810 ASSERT_EQ(0, cache_
->GetEntryCount());
2812 // We should stop posting tasks at some point (if we post any).
2813 base::MessageLoop::current()->RunUntilIdle();
2815 disk_cache::Entry
*entry3
, *entry4
;
2816 EXPECT_NE(net::OK
, OpenEntry("third", &entry3
));
2817 ASSERT_EQ(net::OK
, CreateEntry("third", &entry3
));
2818 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry4
));
2820 EXPECT_EQ(net::OK
, DoomAllEntries());
2821 ASSERT_EQ(0, cache_
->GetEntryCount());
2825 entry3
->Doom(); // The entry should be already doomed, but this must work.
2829 // Now try with all references released.
2830 ASSERT_EQ(net::OK
, CreateEntry("third", &entry1
));
2831 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry2
));
2835 ASSERT_EQ(2, cache_
->GetEntryCount());
2836 EXPECT_EQ(net::OK
, DoomAllEntries());
2837 ASSERT_EQ(0, cache_
->GetEntryCount());
2839 EXPECT_EQ(net::OK
, DoomAllEntries());
2842 TEST_F(DiskCacheBackendTest
, DoomAll
) {
2846 TEST_F(DiskCacheBackendTest
, NewEvictionDoomAll
) {
2851 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomAll
) {
2852 SetMemoryOnlyMode();
2856 TEST_F(DiskCacheBackendTest
, AppCacheOnlyDoomAll
) {
2857 SetCacheType(net::APP_CACHE
);
2861 TEST_F(DiskCacheBackendTest
, ShaderCacheOnlyDoomAll
) {
2862 SetCacheType(net::SHADER_CACHE
);
2866 // If the index size changes when we doom the cache, we should not crash.
2867 void DiskCacheBackendTest::BackendDoomAll2() {
2868 EXPECT_EQ(2, cache_
->GetEntryCount());
2869 EXPECT_EQ(net::OK
, DoomAllEntries());
2871 disk_cache::Entry
* entry
;
2872 ASSERT_EQ(net::OK
, CreateEntry("Something new", &entry
));
2875 EXPECT_EQ(1, cache_
->GetEntryCount());
2878 TEST_F(DiskCacheBackendTest
, DoomAll2
) {
2879 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2880 DisableFirstCleanup();
2881 SetMaxSize(20 * 1024 * 1024);
2886 TEST_F(DiskCacheBackendTest
, NewEvictionDoomAll2
) {
2887 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2888 DisableFirstCleanup();
2889 SetMaxSize(20 * 1024 * 1024);
2895 // We should be able to create the same entry on multiple simultaneous instances
2897 TEST_F(DiskCacheTest
, MultipleInstances
) {
2898 base::ScopedTempDir store1
, store2
;
2899 ASSERT_TRUE(store1
.CreateUniqueTempDir());
2900 ASSERT_TRUE(store2
.CreateUniqueTempDir());
2902 base::Thread
cache_thread("CacheThread");
2903 ASSERT_TRUE(cache_thread
.StartWithOptions(
2904 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
2905 net::TestCompletionCallback cb
;
2907 const int kNumberOfCaches
= 2;
2908 scoped_ptr
<disk_cache::Backend
> cache
[kNumberOfCaches
];
2910 int rv
= disk_cache::CreateCacheBackend(net::DISK_CACHE
,
2911 net::CACHE_BACKEND_DEFAULT
,
2915 cache_thread
.task_runner(),
2919 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
2920 rv
= disk_cache::CreateCacheBackend(net::MEDIA_CACHE
,
2921 net::CACHE_BACKEND_DEFAULT
,
2925 cache_thread
.task_runner(),
2929 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
2931 ASSERT_TRUE(cache
[0].get() != NULL
&& cache
[1].get() != NULL
);
2933 std::string
key("the first key");
2934 disk_cache::Entry
* entry
;
2935 for (int i
= 0; i
< kNumberOfCaches
; i
++) {
2936 rv
= cache
[i
]->CreateEntry(key
, &entry
, cb
.callback());
2937 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
2942 // Test the six regions of the curve that determines the max cache size.
2943 TEST_F(DiskCacheTest
, AutomaticMaxSize
) {
2944 using disk_cache::kDefaultCacheSize
;
2945 int64 large_size
= kDefaultCacheSize
;
2947 // Region 1: expected = available * 0.8
2948 EXPECT_EQ((kDefaultCacheSize
- 1) * 8 / 10,
2949 disk_cache::PreferredCacheSize(large_size
- 1));
2950 EXPECT_EQ(kDefaultCacheSize
* 8 / 10,
2951 disk_cache::PreferredCacheSize(large_size
));
2952 EXPECT_EQ(kDefaultCacheSize
- 1,
2953 disk_cache::PreferredCacheSize(large_size
* 10 / 8 - 1));
2955 // Region 2: expected = default_size
2956 EXPECT_EQ(kDefaultCacheSize
,
2957 disk_cache::PreferredCacheSize(large_size
* 10 / 8));
2958 EXPECT_EQ(kDefaultCacheSize
,
2959 disk_cache::PreferredCacheSize(large_size
* 10 - 1));
2961 // Region 3: expected = available * 0.1
2962 EXPECT_EQ(kDefaultCacheSize
,
2963 disk_cache::PreferredCacheSize(large_size
* 10));
2964 EXPECT_EQ((kDefaultCacheSize
* 25 - 1) / 10,
2965 disk_cache::PreferredCacheSize(large_size
* 25 - 1));
2967 // Region 4: expected = default_size * 2.5
2968 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2969 disk_cache::PreferredCacheSize(large_size
* 25));
2970 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2971 disk_cache::PreferredCacheSize(large_size
* 100 - 1));
2972 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2973 disk_cache::PreferredCacheSize(large_size
* 100));
2974 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2975 disk_cache::PreferredCacheSize(large_size
* 250 - 1));
2977 // Region 5: expected = available * 0.1
2978 int64 largest_size
= kDefaultCacheSize
* 4;
2979 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2980 disk_cache::PreferredCacheSize(large_size
* 250));
2981 EXPECT_EQ(largest_size
- 1,
2982 disk_cache::PreferredCacheSize(largest_size
* 100 - 1));
2984 // Region 6: expected = largest possible size
2985 EXPECT_EQ(largest_size
,
2986 disk_cache::PreferredCacheSize(largest_size
* 100));
2987 EXPECT_EQ(largest_size
,
2988 disk_cache::PreferredCacheSize(largest_size
* 10000));
2991 // Tests that we can "migrate" a running instance from one experiment group to
2993 TEST_F(DiskCacheBackendTest
, Histograms
) {
2995 disk_cache::BackendImpl
* backend_
= cache_impl_
; // Needed be the macro.
2997 for (int i
= 1; i
< 3; i
++) {
2998 CACHE_UMA(HOURS
, "FillupTime", i
, 28);
3002 // Make sure that we keep the total memory used by the internal buffers under
3004 TEST_F(DiskCacheBackendTest
, TotalBuffersSize1
) {
3006 std::string
key("the first key");
3007 disk_cache::Entry
* entry
;
3008 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3010 const int kSize
= 200;
3011 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3012 CacheTestFillBuffer(buffer
->data(), kSize
, true);
3014 for (int i
= 0; i
< 10; i
++) {
3016 // Allocate 2MB for this entry.
3017 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, true));
3018 EXPECT_EQ(kSize
, WriteData(entry
, 1, 0, buffer
.get(), kSize
, true));
3020 WriteData(entry
, 0, 1024 * 1024, buffer
.get(), kSize
, false));
3022 WriteData(entry
, 1, 1024 * 1024, buffer
.get(), kSize
, false));
3024 // Delete one of the buffers and truncate the other.
3025 EXPECT_EQ(0, WriteData(entry
, 0, 0, buffer
.get(), 0, true));
3026 EXPECT_EQ(0, WriteData(entry
, 1, 10, buffer
.get(), 0, true));
3028 // Delete the second buffer, writing 10 bytes to disk.
3030 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3034 EXPECT_EQ(0, cache_impl_
->GetTotalBuffersSize());
3037 // This test assumes at least 150MB of system memory.
3038 TEST_F(DiskCacheBackendTest
, TotalBuffersSize2
) {
3041 const int kOneMB
= 1024 * 1024;
3042 EXPECT_TRUE(cache_impl_
->IsAllocAllowed(0, kOneMB
));
3043 EXPECT_EQ(kOneMB
, cache_impl_
->GetTotalBuffersSize());
3045 EXPECT_TRUE(cache_impl_
->IsAllocAllowed(0, kOneMB
));
3046 EXPECT_EQ(kOneMB
* 2, cache_impl_
->GetTotalBuffersSize());
3048 EXPECT_TRUE(cache_impl_
->IsAllocAllowed(0, kOneMB
));
3049 EXPECT_EQ(kOneMB
* 3, cache_impl_
->GetTotalBuffersSize());
3051 cache_impl_
->BufferDeleted(kOneMB
);
3052 EXPECT_EQ(kOneMB
* 2, cache_impl_
->GetTotalBuffersSize());
3054 // Check the upper limit.
3055 EXPECT_FALSE(cache_impl_
->IsAllocAllowed(0, 30 * kOneMB
));
3057 for (int i
= 0; i
< 30; i
++)
3058 cache_impl_
->IsAllocAllowed(0, kOneMB
); // Ignore the result.
3060 EXPECT_FALSE(cache_impl_
->IsAllocAllowed(0, kOneMB
));
3063 // Tests that sharing of external files works and we are able to delete the
3064 // files when we need to.
3065 TEST_F(DiskCacheBackendTest
, FileSharing
) {
3068 disk_cache::Addr
address(0x80000001);
3069 ASSERT_TRUE(cache_impl_
->CreateExternalFile(&address
));
3070 base::FilePath name
= cache_impl_
->GetFileName(address
);
3072 scoped_refptr
<disk_cache::File
> file(new disk_cache::File(false));
3076 DWORD sharing
= FILE_SHARE_READ
| FILE_SHARE_WRITE
;
3077 DWORD access
= GENERIC_READ
| GENERIC_WRITE
;
3078 base::win::ScopedHandle
file2(CreateFile(
3079 name
.value().c_str(), access
, sharing
, NULL
, OPEN_EXISTING
, 0, NULL
));
3080 EXPECT_FALSE(file2
.IsValid());
3082 sharing
|= FILE_SHARE_DELETE
;
3083 file2
.Set(CreateFile(name
.value().c_str(), access
, sharing
, NULL
,
3084 OPEN_EXISTING
, 0, NULL
));
3085 EXPECT_TRUE(file2
.IsValid());
3088 EXPECT_TRUE(base::DeleteFile(name
, false));
3090 // We should be able to use the file.
3091 const int kSize
= 200;
3092 char buffer1
[kSize
];
3093 char buffer2
[kSize
];
3094 memset(buffer1
, 't', kSize
);
3095 memset(buffer2
, 0, kSize
);
3096 EXPECT_TRUE(file
->Write(buffer1
, kSize
, 0));
3097 EXPECT_TRUE(file
->Read(buffer2
, kSize
, 0));
3098 EXPECT_EQ(0, memcmp(buffer1
, buffer2
, kSize
));
3100 EXPECT_TRUE(disk_cache::DeleteCacheFile(name
));
3103 TEST_F(DiskCacheBackendTest
, UpdateRankForExternalCacheHit
) {
3106 disk_cache::Entry
* entry
;
3108 for (int i
= 0; i
< 2; ++i
) {
3109 std::string key
= base::StringPrintf("key%d", i
);
3110 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3114 // Ping the oldest entry.
3115 cache_
->OnExternalCacheHit("key0");
3119 // Make sure the older key remains.
3120 EXPECT_EQ(1, cache_
->GetEntryCount());
3121 ASSERT_EQ(net::OK
, OpenEntry("key0", &entry
));
3125 TEST_F(DiskCacheBackendTest
, ShaderCacheUpdateRankForExternalCacheHit
) {
3126 SetCacheType(net::SHADER_CACHE
);
3129 disk_cache::Entry
* entry
;
3131 for (int i
= 0; i
< 2; ++i
) {
3132 std::string key
= base::StringPrintf("key%d", i
);
3133 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3137 // Ping the oldest entry.
3138 cache_
->OnExternalCacheHit("key0");
3142 // Make sure the older key remains.
3143 EXPECT_EQ(1, cache_
->GetEntryCount());
3144 ASSERT_EQ(net::OK
, OpenEntry("key0", &entry
));
3148 TEST_F(DiskCacheBackendTest
, SimpleCacheShutdownWithPendingCreate
) {
3149 SetCacheType(net::APP_CACHE
);
3150 SetSimpleCacheMode();
3151 BackendShutdownWithPendingCreate(false);
3154 TEST_F(DiskCacheBackendTest
, SimpleCacheShutdownWithPendingFileIO
) {
3155 SetCacheType(net::APP_CACHE
);
3156 SetSimpleCacheMode();
3157 BackendShutdownWithPendingFileIO(false);
3160 TEST_F(DiskCacheBackendTest
, SimpleCacheBasics
) {
3161 SetSimpleCacheMode();
3165 TEST_F(DiskCacheBackendTest
, SimpleCacheAppCacheBasics
) {
3166 SetCacheType(net::APP_CACHE
);
3167 SetSimpleCacheMode();
3171 TEST_F(DiskCacheBackendTest
, SimpleCacheKeying
) {
3172 SetSimpleCacheMode();
3176 TEST_F(DiskCacheBackendTest
, SimpleCacheAppCacheKeying
) {
3177 SetSimpleCacheMode();
3178 SetCacheType(net::APP_CACHE
);
3182 TEST_F(DiskCacheBackendTest
, DISABLED_SimpleCacheSetSize
) {
3183 SetSimpleCacheMode();
3187 // MacOS has a default open file limit of 256 files, which is incompatible with
3188 // this simple cache test.
3189 #if defined(OS_MACOSX)
3190 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3192 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3195 TEST_F(DiskCacheBackendTest
, SIMPLE_MAYBE_MACOS(SimpleCacheLoad
)) {
3196 SetMaxSize(0x100000);
3197 SetSimpleCacheMode();
3201 TEST_F(DiskCacheBackendTest
, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad
)) {
3202 SetCacheType(net::APP_CACHE
);
3203 SetSimpleCacheMode();
3204 SetMaxSize(0x100000);
3208 TEST_F(DiskCacheBackendTest
, SimpleDoomRecent
) {
3209 SetSimpleCacheMode();
3210 BackendDoomRecent();
3213 // crbug.com/330926, crbug.com/370677
3214 TEST_F(DiskCacheBackendTest
, DISABLED_SimpleDoomBetween
) {
3215 SetSimpleCacheMode();
3216 BackendDoomBetween();
3219 TEST_F(DiskCacheBackendTest
, SimpleCacheDoomAll
) {
3220 SetSimpleCacheMode();
3224 TEST_F(DiskCacheBackendTest
, SimpleCacheAppCacheOnlyDoomAll
) {
3225 SetCacheType(net::APP_CACHE
);
3226 SetSimpleCacheMode();
3230 TEST_F(DiskCacheBackendTest
, SimpleCacheOpenMissingFile
) {
3231 SetSimpleCacheMode();
3234 const char key
[] = "the first key";
3235 disk_cache::Entry
* entry
= NULL
;
3237 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3238 ASSERT_TRUE(entry
!= NULL
);
3242 // To make sure the file creation completed we need to call open again so that
3243 // we block until it actually created the files.
3244 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3245 ASSERT_TRUE(entry
!= NULL
);
3249 // Delete one of the files in the entry.
3250 base::FilePath to_delete_file
= cache_path_
.AppendASCII(
3251 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, 0));
3252 EXPECT_TRUE(base::PathExists(to_delete_file
));
3253 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file
));
3255 // Failing to open the entry should delete the rest of these files.
3256 ASSERT_EQ(net::ERR_FAILED
, OpenEntry(key
, &entry
));
3258 // Confirm the rest of the files are gone.
3259 for (int i
= 1; i
< disk_cache::kSimpleEntryFileCount
; ++i
) {
3260 base::FilePath
should_be_gone_file(cache_path_
.AppendASCII(
3261 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, i
)));
3262 EXPECT_FALSE(base::PathExists(should_be_gone_file
));
3266 TEST_F(DiskCacheBackendTest
, SimpleCacheOpenBadFile
) {
3267 SetSimpleCacheMode();
3270 const char key
[] = "the first key";
3271 disk_cache::Entry
* entry
= NULL
;
3273 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3274 disk_cache::Entry
* null
= NULL
;
3275 ASSERT_NE(null
, entry
);
3279 // To make sure the file creation completed we need to call open again so that
3280 // we block until it actually created the files.
3281 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3282 ASSERT_NE(null
, entry
);
3286 // The entry is being closed on the Simple Cache worker pool
3287 disk_cache::SimpleBackendImpl::FlushWorkerPoolForTesting();
3288 base::RunLoop().RunUntilIdle();
3290 // Write an invalid header for stream 0 and stream 1.
3291 base::FilePath entry_file1_path
= cache_path_
.AppendASCII(
3292 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, 0));
3294 disk_cache::SimpleFileHeader header
;
3295 header
.initial_magic_number
= GG_UINT64_C(0xbadf00d);
3297 implicit_cast
<int>(sizeof(header
)),
3298 base::WriteFile(entry_file1_path
, reinterpret_cast<char*>(&header
),
3300 ASSERT_EQ(net::ERR_FAILED
, OpenEntry(key
, &entry
));
3303 // Tests that the Simple Cache Backend fails to initialize with non-matching
3304 // file structure on disk.
3305 TEST_F(DiskCacheBackendTest
, SimpleCacheOverBlockfileCache
) {
3306 // Create a cache structure with the |BackendImpl|.
3308 disk_cache::Entry
* entry
;
3309 const int kSize
= 50;
3310 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3311 CacheTestFillBuffer(buffer
->data(), kSize
, false);
3312 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
3313 ASSERT_EQ(0, WriteData(entry
, 0, 0, buffer
.get(), 0, false));
3317 // Check that the |SimpleBackendImpl| does not favor this structure.
3318 base::Thread
cache_thread("CacheThread");
3319 ASSERT_TRUE(cache_thread
.StartWithOptions(
3320 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
3321 disk_cache::SimpleBackendImpl
* simple_cache
=
3322 new disk_cache::SimpleBackendImpl(
3323 cache_path_
, 0, net::DISK_CACHE
, cache_thread
.task_runner(), NULL
);
3324 net::TestCompletionCallback cb
;
3325 int rv
= simple_cache
->Init(cb
.callback());
3326 EXPECT_NE(net::OK
, cb
.GetResult(rv
));
3327 delete simple_cache
;
3328 DisableIntegrityCheck();
3331 // Tests that the |BackendImpl| refuses to initialize on top of the files
3332 // generated by the Simple Cache Backend.
3333 TEST_F(DiskCacheBackendTest
, BlockfileCacheOverSimpleCache
) {
3334 // Create a cache structure with the |SimpleBackendImpl|.
3335 SetSimpleCacheMode();
3337 disk_cache::Entry
* entry
;
3338 const int kSize
= 50;
3339 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3340 CacheTestFillBuffer(buffer
->data(), kSize
, false);
3341 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
3342 ASSERT_EQ(0, WriteData(entry
, 0, 0, buffer
.get(), 0, false));
3346 // Check that the |BackendImpl| does not favor this structure.
3347 base::Thread
cache_thread("CacheThread");
3348 ASSERT_TRUE(cache_thread
.StartWithOptions(
3349 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
3350 disk_cache::BackendImpl
* cache
= new disk_cache::BackendImpl(
3351 cache_path_
, base::ThreadTaskRunnerHandle::Get(), NULL
);
3352 cache
->SetUnitTestMode();
3353 net::TestCompletionCallback cb
;
3354 int rv
= cache
->Init(cb
.callback());
3355 EXPECT_NE(net::OK
, cb
.GetResult(rv
));
3357 DisableIntegrityCheck();
3360 TEST_F(DiskCacheBackendTest
, SimpleCacheFixEnumerators
) {
3361 SetSimpleCacheMode();
3362 BackendFixEnumerators();
3365 // Tests basic functionality of the SimpleBackend implementation of the
3367 TEST_F(DiskCacheBackendTest
, SimpleCacheEnumerationBasics
) {
3368 SetSimpleCacheMode();
3370 std::set
<std::string
> key_pool
;
3371 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool
));
3373 // Check that enumeration returns all entries.
3374 std::set
<std::string
> keys_to_match(key_pool
);
3375 scoped_ptr
<TestIterator
> iter
= CreateIterator();
3377 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter
.get(), &keys_to_match
, &count
));
3379 EXPECT_EQ(key_pool
.size(), count
);
3380 EXPECT_TRUE(keys_to_match
.empty());
3382 // Check that opening entries does not affect enumeration.
3383 keys_to_match
= key_pool
;
3384 iter
= CreateIterator();
3386 disk_cache::Entry
* entry_opened_before
;
3387 ASSERT_EQ(net::OK
, OpenEntry(*(key_pool
.begin()), &entry_opened_before
));
3388 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool
.size()/2,
3393 disk_cache::Entry
* entry_opened_middle
;
3395 OpenEntry(*(keys_to_match
.begin()), &entry_opened_middle
));
3396 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter
.get(), &keys_to_match
, &count
));
3398 entry_opened_before
->Close();
3399 entry_opened_middle
->Close();
3401 EXPECT_EQ(key_pool
.size(), count
);
3402 EXPECT_TRUE(keys_to_match
.empty());
3405 // Tests that the enumerations are not affected by dooming an entry in the
3407 TEST_F(DiskCacheBackendTest
, SimpleCacheEnumerationWhileDoomed
) {
3408 SetSimpleCacheMode();
3410 std::set
<std::string
> key_pool
;
3411 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool
));
3413 // Check that enumeration returns all entries but the doomed one.
3414 std::set
<std::string
> keys_to_match(key_pool
);
3415 scoped_ptr
<TestIterator
> iter
= CreateIterator();
3417 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool
.size()/2,
3422 std::string key_to_delete
= *(keys_to_match
.begin());
3423 DoomEntry(key_to_delete
);
3424 keys_to_match
.erase(key_to_delete
);
3425 key_pool
.erase(key_to_delete
);
3426 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter
.get(), &keys_to_match
, &count
));
3429 EXPECT_EQ(key_pool
.size(), count
);
3430 EXPECT_TRUE(keys_to_match
.empty());
3433 // Tests that enumerations are not affected by corrupt files.
3434 TEST_F(DiskCacheBackendTest
, SimpleCacheEnumerationCorruption
) {
3435 SetSimpleCacheMode();
3437 std::set
<std::string
> key_pool
;
3438 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool
));
3440 // Create a corrupt entry. The write/read sequence ensures that the entry will
3441 // have been created before corrupting the platform files, in the case of
3442 // optimistic operations.
3443 const std::string key
= "the key";
3444 disk_cache::Entry
* corrupted_entry
;
3446 ASSERT_EQ(net::OK
, CreateEntry(key
, &corrupted_entry
));
3447 ASSERT_TRUE(corrupted_entry
);
3448 const int kSize
= 50;
3449 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3450 CacheTestFillBuffer(buffer
->data(), kSize
, false);
3452 WriteData(corrupted_entry
, 0, 0, buffer
.get(), kSize
, false));
3453 ASSERT_EQ(kSize
, ReadData(corrupted_entry
, 0, 0, buffer
.get(), kSize
));
3454 corrupted_entry
->Close();
3456 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3458 EXPECT_EQ(key_pool
.size() + 1,
3459 implicit_cast
<size_t>(cache_
->GetEntryCount()));
3461 // Check that enumeration returns all entries but the corrupt one.
3462 std::set
<std::string
> keys_to_match(key_pool
);
3463 scoped_ptr
<TestIterator
> iter
= CreateIterator();
3465 ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter
.get(), &keys_to_match
, &count
));
3468 EXPECT_EQ(key_pool
.size(), count
);
3469 EXPECT_TRUE(keys_to_match
.empty());
3472 // Tests that enumerations don't leak memory when the backend is destructed
3474 TEST_F(DiskCacheBackendTest
, SimpleCacheEnumerationDestruction
) {
3475 SetSimpleCacheMode();
3477 std::set
<std::string
> key_pool
;
3478 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool
));
3480 scoped_ptr
<TestIterator
> iter
= CreateIterator();
3481 disk_cache::Entry
* entry
= NULL
;
3482 ASSERT_EQ(net::OK
, iter
->OpenNextEntry(&entry
));
3484 disk_cache::ScopedEntryPtr
entry_closer(entry
);
3487 // This test passes if we don't leak memory.
3490 // Tests that a SimpleCache doesn't crash when files are deleted very quickly
3492 // NOTE: IF THIS TEST IS FLAKY THEN IT IS FAILING. See https://crbug.com/416940
3493 TEST_F(DiskCacheBackendTest
, SimpleCacheDeleteQuickly
) {
3494 SetSimpleCacheMode();
3495 for (int i
= 0; i
< 100; ++i
) {
3498 EXPECT_TRUE(CleanupCacheDir());