1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/file_util.h"
7 #include "base/metrics/field_trial.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
12 #include "base/threading/platform_thread.h"
13 #include "base/threading/thread_restrictions.h"
14 #include "net/base/cache_type.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/net_errors.h"
17 #include "net/base/test_completion_callback.h"
18 #include "net/disk_cache/backend_impl.h"
19 #include "net/disk_cache/cache_util.h"
20 #include "net/disk_cache/disk_cache_test_base.h"
21 #include "net/disk_cache/disk_cache_test_util.h"
22 #include "net/disk_cache/entry_impl.h"
23 #include "net/disk_cache/experiments.h"
24 #include "net/disk_cache/histogram_macros.h"
25 #include "net/disk_cache/mapped_file.h"
26 #include "net/disk_cache/mem_backend_impl.h"
27 #include "net/disk_cache/simple/simple_backend_impl.h"
28 #include "net/disk_cache/simple/simple_entry_format.h"
29 #include "net/disk_cache/simple/simple_test_util.h"
30 #include "net/disk_cache/simple/simple_util.h"
31 #include "net/disk_cache/tracing_cache_backend.h"
32 #include "testing/gtest/include/gtest/gtest.h"
35 #include "base/win/scoped_handle.h"
42 const char kExistingEntryKey
[] = "existing entry key";
44 scoped_ptr
<disk_cache::BackendImpl
> CreateExistingEntryCache(
45 const base::Thread
& cache_thread
,
46 base::FilePath
& cache_path
) {
47 net::TestCompletionCallback cb
;
49 scoped_ptr
<disk_cache::BackendImpl
> cache(new disk_cache::BackendImpl(
50 cache_path
, cache_thread
.message_loop_proxy(), NULL
));
51 int rv
= cache
->Init(cb
.callback());
52 if (cb
.GetResult(rv
) != net::OK
)
53 return scoped_ptr
<disk_cache::BackendImpl
>();
55 disk_cache::Entry
* entry
= NULL
;
56 rv
= cache
->CreateEntry(kExistingEntryKey
, &entry
, cb
.callback());
57 if (cb
.GetResult(rv
) != net::OK
)
58 return scoped_ptr
<disk_cache::BackendImpl
>();
66 // Tests that can run with different types of caches.
67 class DiskCacheBackendTest
: public DiskCacheTestWithCache
{
69 // Some utility methods:
71 // Perform IO operations on the cache until there is pending IO.
72 int GeneratePendingIO(net::TestCompletionCallback
* cb
);
74 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
75 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
76 // There are 4 entries after doomed_start and 2 after doomed_end.
77 void InitSparseCache(base::Time
* doomed_start
, base::Time
* doomed_end
);
79 bool CreateSetOfRandomEntries(std::set
<std::string
>* key_pool
);
80 bool EnumerateAndMatchKeys(int max_to_open
,
82 std::set
<std::string
>* keys_to_match
,
88 void BackendShutdownWithPendingFileIO(bool fast
);
89 void BackendShutdownWithPendingIO(bool fast
);
90 void BackendShutdownWithPendingCreate(bool fast
);
91 void BackendSetSize();
94 void BackendValidEntry();
95 void BackendInvalidEntry();
96 void BackendInvalidEntryRead();
97 void BackendInvalidEntryWithLoad();
98 void BackendTrimInvalidEntry();
99 void BackendTrimInvalidEntry2();
100 void BackendEnumerations();
101 void BackendEnumerations2();
102 void BackendInvalidEntryEnumeration();
103 void BackendFixEnumerators();
104 void BackendDoomRecent();
105 void BackendDoomBetween();
106 void BackendTransaction(const std::string
& name
, int num_entries
, bool load
);
107 void BackendRecoverInsert();
108 void BackendRecoverRemove();
109 void BackendRecoverWithEviction();
110 void BackendInvalidEntry2();
111 void BackendInvalidEntry3();
112 void BackendInvalidEntry7();
113 void BackendInvalidEntry8();
114 void BackendInvalidEntry9(bool eviction
);
115 void BackendInvalidEntry10(bool eviction
);
116 void BackendInvalidEntry11(bool eviction
);
117 void BackendTrimInvalidEntry12();
118 void BackendDoomAll();
119 void BackendDoomAll2();
120 void BackendInvalidRankings();
121 void BackendInvalidRankings2();
122 void BackendDisable();
123 void BackendDisable2();
124 void BackendDisable3();
125 void BackendDisable4();
126 void TracingBackendBasics();
129 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback
* cb
) {
130 if (!use_current_thread_
) {
132 return net::ERR_FAILED
;
135 disk_cache::Entry
* entry
;
136 int rv
= cache_
->CreateEntry("some key", &entry
, cb
->callback());
137 if (cb
->GetResult(rv
) != net::OK
)
138 return net::ERR_CACHE_CREATE_FAILURE
;
140 const int kSize
= 25000;
141 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
142 CacheTestFillBuffer(buffer
->data(), kSize
, false);
144 for (int i
= 0; i
< 10 * 1024 * 1024; i
+= 64 * 1024) {
145 // We are using the current thread as the cache thread because we want to
146 // be able to call directly this method to make sure that the OS (instead
147 // of us switching thread) is returning IO pending.
148 if (!simple_cache_mode_
) {
149 rv
= static_cast<disk_cache::EntryImpl
*>(entry
)->WriteDataImpl(
150 0, i
, buffer
.get(), kSize
, cb
->callback(), false);
152 rv
= entry
->WriteData(0, i
, buffer
.get(), kSize
, cb
->callback(), false);
155 if (rv
== net::ERR_IO_PENDING
)
158 rv
= net::ERR_FAILED
;
161 // Don't call Close() to avoid going through the queue or we'll deadlock
162 // waiting for the operation to finish.
163 if (!simple_cache_mode_
)
164 static_cast<disk_cache::EntryImpl
*>(entry
)->Release();
171 void DiskCacheBackendTest::InitSparseCache(base::Time
* doomed_start
,
172 base::Time
* doomed_end
) {
175 const int kSize
= 50;
176 // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
177 const int kOffset
= 10 + 1024 * 1024;
179 disk_cache::Entry
* entry0
= NULL
;
180 disk_cache::Entry
* entry1
= NULL
;
181 disk_cache::Entry
* entry2
= NULL
;
183 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
184 CacheTestFillBuffer(buffer
->data(), kSize
, false);
186 ASSERT_EQ(net::OK
, CreateEntry("zeroth", &entry0
));
187 ASSERT_EQ(kSize
, WriteSparseData(entry0
, 0, buffer
.get(), kSize
));
189 WriteSparseData(entry0
, kOffset
+ kSize
, buffer
.get(), kSize
));
195 *doomed_start
= base::Time::Now();
197 // Order in rankings list:
198 // first_part1, first_part2, second_part1, second_part2
199 ASSERT_EQ(net::OK
, CreateEntry("first", &entry1
));
200 ASSERT_EQ(kSize
, WriteSparseData(entry1
, 0, buffer
.get(), kSize
));
202 WriteSparseData(entry1
, kOffset
+ kSize
, buffer
.get(), kSize
));
205 ASSERT_EQ(net::OK
, CreateEntry("second", &entry2
));
206 ASSERT_EQ(kSize
, WriteSparseData(entry2
, 0, buffer
.get(), kSize
));
208 WriteSparseData(entry2
, kOffset
+ kSize
, buffer
.get(), kSize
));
214 *doomed_end
= base::Time::Now();
216 // Order in rankings list:
217 // third_part1, fourth_part1, third_part2, fourth_part2
218 disk_cache::Entry
* entry3
= NULL
;
219 disk_cache::Entry
* entry4
= NULL
;
220 ASSERT_EQ(net::OK
, CreateEntry("third", &entry3
));
221 ASSERT_EQ(kSize
, WriteSparseData(entry3
, 0, buffer
.get(), kSize
));
222 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry4
));
223 ASSERT_EQ(kSize
, WriteSparseData(entry4
, 0, buffer
.get(), kSize
));
225 WriteSparseData(entry3
, kOffset
+ kSize
, buffer
.get(), kSize
));
227 WriteSparseData(entry4
, kOffset
+ kSize
, buffer
.get(), kSize
));
235 // Creates entries based on random keys. Stores these keys in |key_pool|.
236 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
237 std::set
<std::string
>* key_pool
) {
238 const int kNumEntries
= 10;
240 for (int i
= 0; i
< kNumEntries
; ++i
) {
241 std::string key
= GenerateKey(true);
242 disk_cache::Entry
* entry
;
243 if (CreateEntry(key
, &entry
) != net::OK
)
245 key_pool
->insert(key
);
248 return key_pool
->size() == implicit_cast
<size_t>(cache_
->GetEntryCount());
251 // Performs iteration over the backend and checks that the keys of entries
252 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
253 // will be opened, if it is positive. Otherwise, iteration will continue until
254 // OpenNextEntry stops returning net::OK.
255 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
258 std::set
<std::string
>* keys_to_match
,
260 disk_cache::Entry
* entry
;
262 while (OpenNextEntry(iter
, &entry
) == net::OK
) {
265 EXPECT_EQ(1U, keys_to_match
->erase(entry
->GetKey()));
268 if (max_to_open
>= 0 && implicit_cast
<int>(*count
) >= max_to_open
)
275 void DiskCacheBackendTest::BackendBasics() {
277 disk_cache::Entry
*entry1
= NULL
, *entry2
= NULL
;
278 EXPECT_NE(net::OK
, OpenEntry("the first key", &entry1
));
279 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry1
));
280 ASSERT_TRUE(NULL
!= entry1
);
284 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry1
));
285 ASSERT_TRUE(NULL
!= entry1
);
289 EXPECT_NE(net::OK
, CreateEntry("the first key", &entry1
));
290 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry1
));
291 EXPECT_NE(net::OK
, OpenEntry("some other key", &entry2
));
292 ASSERT_EQ(net::OK
, CreateEntry("some other key", &entry2
));
293 ASSERT_TRUE(NULL
!= entry1
);
294 ASSERT_TRUE(NULL
!= entry2
);
295 EXPECT_EQ(2, cache_
->GetEntryCount());
297 disk_cache::Entry
* entry3
= NULL
;
298 ASSERT_EQ(net::OK
, OpenEntry("some other key", &entry3
));
299 ASSERT_TRUE(NULL
!= entry3
);
300 EXPECT_TRUE(entry2
== entry3
);
301 EXPECT_EQ(2, cache_
->GetEntryCount());
303 EXPECT_EQ(net::OK
, DoomEntry("some other key"));
304 EXPECT_EQ(1, cache_
->GetEntryCount());
309 EXPECT_EQ(net::OK
, DoomEntry("the first key"));
310 EXPECT_EQ(0, cache_
->GetEntryCount());
312 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry1
));
313 ASSERT_EQ(net::OK
, CreateEntry("some other key", &entry2
));
316 EXPECT_EQ(net::OK
, DoomEntry("some other key"));
317 EXPECT_EQ(0, cache_
->GetEntryCount());
321 TEST_F(DiskCacheBackendTest
, Basics
) {
325 TEST_F(DiskCacheBackendTest
, NewEvictionBasics
) {
330 TEST_F(DiskCacheBackendTest
, MemoryOnlyBasics
) {
335 TEST_F(DiskCacheBackendTest
, AppCacheBasics
) {
336 SetCacheType(net::APP_CACHE
);
340 TEST_F(DiskCacheBackendTest
, ShaderCacheBasics
) {
341 SetCacheType(net::SHADER_CACHE
);
345 void DiskCacheBackendTest::BackendKeying() {
347 const char* kName1
= "the first key";
348 const char* kName2
= "the first Key";
349 disk_cache::Entry
*entry1
, *entry2
;
350 ASSERT_EQ(net::OK
, CreateEntry(kName1
, &entry1
));
352 ASSERT_EQ(net::OK
, CreateEntry(kName2
, &entry2
));
353 EXPECT_TRUE(entry1
!= entry2
) << "Case sensitive";
357 base::strlcpy(buffer
, kName1
, arraysize(buffer
));
358 ASSERT_EQ(net::OK
, OpenEntry(buffer
, &entry2
));
359 EXPECT_TRUE(entry1
== entry2
);
362 base::strlcpy(buffer
+ 1, kName1
, arraysize(buffer
) - 1);
363 ASSERT_EQ(net::OK
, OpenEntry(buffer
+ 1, &entry2
));
364 EXPECT_TRUE(entry1
== entry2
);
367 base::strlcpy(buffer
+ 3, kName1
, arraysize(buffer
) - 3);
368 ASSERT_EQ(net::OK
, OpenEntry(buffer
+ 3, &entry2
));
369 EXPECT_TRUE(entry1
== entry2
);
372 // Now verify long keys.
374 memset(buffer2
, 's', sizeof(buffer2
));
375 buffer2
[1023] = '\0';
376 ASSERT_EQ(net::OK
, CreateEntry(buffer2
, &entry2
)) << "key on block file";
380 buffer2
[19999] = '\0';
381 ASSERT_EQ(net::OK
, CreateEntry(buffer2
, &entry2
)) << "key on external file";
386 TEST_F(DiskCacheBackendTest
, Keying
) {
390 TEST_F(DiskCacheBackendTest
, NewEvictionKeying
) {
395 TEST_F(DiskCacheBackendTest
, MemoryOnlyKeying
) {
400 TEST_F(DiskCacheBackendTest
, AppCacheKeying
) {
401 SetCacheType(net::APP_CACHE
);
405 TEST_F(DiskCacheBackendTest
, ShaderCacheKeying
) {
406 SetCacheType(net::SHADER_CACHE
);
410 TEST_F(DiskCacheTest
, CreateBackend
) {
411 net::TestCompletionCallback cb
;
414 ASSERT_TRUE(CleanupCacheDir());
415 base::Thread
cache_thread("CacheThread");
416 ASSERT_TRUE(cache_thread
.StartWithOptions(
417 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
419 // Test the private factory method(s).
420 scoped_ptr
<disk_cache::Backend
> cache
;
421 cache
= disk_cache::MemBackendImpl::CreateBackend(0, NULL
);
422 ASSERT_TRUE(cache
.get());
425 // Now test the public API.
427 disk_cache::CreateCacheBackend(net::DISK_CACHE
,
428 net::CACHE_BACKEND_DEFAULT
,
432 cache_thread
.message_loop_proxy().get(),
436 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
437 ASSERT_TRUE(cache
.get());
440 rv
= disk_cache::CreateCacheBackend(net::MEMORY_CACHE
,
441 net::CACHE_BACKEND_DEFAULT
,
443 false, NULL
, NULL
, &cache
,
445 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
446 ASSERT_TRUE(cache
.get());
450 base::MessageLoop::current()->RunUntilIdle();
453 // Tests that |BackendImpl| fails to initialize with a missing file.
454 TEST_F(DiskCacheBackendTest
, CreateBackend_MissingFile
) {
455 ASSERT_TRUE(CopyTestCache("bad_entry"));
456 base::FilePath filename
= cache_path_
.AppendASCII("data_1");
457 base::DeleteFile(filename
, false);
458 base::Thread
cache_thread("CacheThread");
459 ASSERT_TRUE(cache_thread
.StartWithOptions(
460 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
461 net::TestCompletionCallback cb
;
463 bool prev
= base::ThreadRestrictions::SetIOAllowed(false);
464 scoped_ptr
<disk_cache::BackendImpl
> cache(new disk_cache::BackendImpl(
465 cache_path_
, cache_thread
.message_loop_proxy().get(), NULL
));
466 int rv
= cache
->Init(cb
.callback());
467 EXPECT_EQ(net::ERR_FAILED
, cb
.GetResult(rv
));
468 base::ThreadRestrictions::SetIOAllowed(prev
);
471 DisableIntegrityCheck();
474 TEST_F(DiskCacheBackendTest
, ExternalFiles
) {
476 // First, let's create a file on the folder.
477 base::FilePath filename
= cache_path_
.AppendASCII("f_000001");
479 const int kSize
= 50;
480 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
481 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
482 ASSERT_EQ(kSize
, file_util::WriteFile(filename
, buffer1
->data(), kSize
));
484 // Now let's create a file with the cache.
485 disk_cache::Entry
* entry
;
486 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
487 ASSERT_EQ(0, WriteData(entry
, 0, 20000, buffer1
.get(), 0, false));
490 // And verify that the first file is still there.
491 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
492 ASSERT_EQ(kSize
, base::ReadFile(filename
, buffer2
->data(), kSize
));
493 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer2
->data(), kSize
));
496 // Tests that we deal with file-level pending operations at destruction time.
497 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast
) {
498 ASSERT_TRUE(CleanupCacheDir());
499 uint32 flags
= disk_cache::kNoBuffering
;
501 flags
|= disk_cache::kNoRandom
;
504 CreateBackend(flags
, NULL
);
506 net::TestCompletionCallback cb
;
507 int rv
= GeneratePendingIO(&cb
);
509 // The cache destructor will see one pending operation here.
512 if (rv
== net::ERR_IO_PENDING
) {
513 if (fast
|| simple_cache_mode_
)
514 EXPECT_FALSE(cb
.have_result());
516 EXPECT_TRUE(cb
.have_result());
519 base::MessageLoop::current()->RunUntilIdle();
522 // Wait for the actual operation to complete, or we'll keep a file handle that
523 // may cause issues later. Note that on iOS systems even though this test
524 // uses a single thread, the actual IO is posted to a worker thread and the
525 // cache destructor breaks the link to reach cb when the operation completes.
526 rv
= cb
.GetResult(rv
);
530 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingFileIO
) {
531 BackendShutdownWithPendingFileIO(false);
534 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
535 // builds because they contain a lot of intentional memory leaks.
536 // The wrapper scripts used to run tests under Valgrind Memcheck and
537 // Heapchecker will also disable these tests under those tools. See:
538 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
539 // tools/heapcheck/net_unittests.gtest-heapcheck.txt
540 #if !defined(LEAK_SANITIZER)
541 // We'll be leaking from this test.
542 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingFileIO_Fast
) {
543 // The integrity test sets kNoRandom so there's a version mismatch if we don't
544 // force new eviction.
546 BackendShutdownWithPendingFileIO(true);
550 // See crbug.com/330074
552 // Tests that one cache instance is not affected by another one going away.
553 TEST_F(DiskCacheBackendTest
, MultipleInstancesWithPendingFileIO
) {
554 base::ScopedTempDir store
;
555 ASSERT_TRUE(store
.CreateUniqueTempDir());
557 net::TestCompletionCallback cb
;
558 scoped_ptr
<disk_cache::Backend
> extra_cache
;
559 int rv
= disk_cache::CreateCacheBackend(
560 net::DISK_CACHE
, net::CACHE_BACKEND_DEFAULT
, store
.path(), 0,
561 false, base::MessageLoopProxy::current().get(), NULL
,
562 &extra_cache
, cb
.callback());
563 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
564 ASSERT_TRUE(extra_cache
.get() != NULL
);
566 ASSERT_TRUE(CleanupCacheDir());
567 SetNewEviction(); // Match the expected behavior for integrity verification.
570 CreateBackend(disk_cache::kNoBuffering
, NULL
);
571 rv
= GeneratePendingIO(&cb
);
573 // cache_ has a pending operation, and extra_cache will go away.
576 if (rv
== net::ERR_IO_PENDING
)
577 EXPECT_FALSE(cb
.have_result());
579 base::MessageLoop::current()->RunUntilIdle();
581 // Wait for the actual operation to complete, or we'll keep a file handle that
582 // may cause issues later.
583 rv
= cb
.GetResult(rv
);
587 // Tests that we deal with background-thread pending operations.
588 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast
) {
589 net::TestCompletionCallback cb
;
592 ASSERT_TRUE(CleanupCacheDir());
593 base::Thread
cache_thread("CacheThread");
594 ASSERT_TRUE(cache_thread
.StartWithOptions(
595 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
597 uint32 flags
= disk_cache::kNoBuffering
;
599 flags
|= disk_cache::kNoRandom
;
601 CreateBackend(flags
, &cache_thread
);
603 disk_cache::Entry
* entry
;
604 int rv
= cache_
->CreateEntry("some key", &entry
, cb
.callback());
605 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
609 // The cache destructor will see one pending operation here.
613 base::MessageLoop::current()->RunUntilIdle();
616 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingIO
) {
617 BackendShutdownWithPendingIO(false);
620 #if !defined(LEAK_SANITIZER)
621 // We'll be leaking from this test.
622 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingIO_Fast
) {
623 // The integrity test sets kNoRandom so there's a version mismatch if we don't
624 // force new eviction.
626 BackendShutdownWithPendingIO(true);
630 // Tests that we deal with create-type pending operations.
631 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast
) {
632 net::TestCompletionCallback cb
;
635 ASSERT_TRUE(CleanupCacheDir());
636 base::Thread
cache_thread("CacheThread");
637 ASSERT_TRUE(cache_thread
.StartWithOptions(
638 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
640 disk_cache::BackendFlags flags
=
641 fast
? disk_cache::kNone
: disk_cache::kNoRandom
;
642 CreateBackend(flags
, &cache_thread
);
644 disk_cache::Entry
* entry
;
645 int rv
= cache_
->CreateEntry("some key", &entry
, cb
.callback());
646 ASSERT_EQ(net::ERR_IO_PENDING
, rv
);
649 EXPECT_FALSE(cb
.have_result());
652 base::MessageLoop::current()->RunUntilIdle();
655 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingCreate
) {
656 BackendShutdownWithPendingCreate(false);
659 #if !defined(LEAK_SANITIZER)
660 // We'll be leaking an entry from this test.
661 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingCreate_Fast
) {
662 // The integrity test sets kNoRandom so there's a version mismatch if we don't
663 // force new eviction.
665 BackendShutdownWithPendingCreate(true);
669 TEST_F(DiskCacheTest
, TruncatedIndex
) {
670 ASSERT_TRUE(CleanupCacheDir());
671 base::FilePath index
= cache_path_
.AppendASCII("index");
672 ASSERT_EQ(5, file_util::WriteFile(index
, "hello", 5));
674 base::Thread
cache_thread("CacheThread");
675 ASSERT_TRUE(cache_thread
.StartWithOptions(
676 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
677 net::TestCompletionCallback cb
;
679 scoped_ptr
<disk_cache::Backend
> backend
;
681 disk_cache::CreateCacheBackend(net::DISK_CACHE
,
682 net::CACHE_BACKEND_BLOCKFILE
,
686 cache_thread
.message_loop_proxy().get(),
690 ASSERT_NE(net::OK
, cb
.GetResult(rv
));
692 ASSERT_FALSE(backend
);
695 void DiskCacheBackendTest::BackendSetSize() {
696 const int cache_size
= 0x10000; // 64 kB
697 SetMaxSize(cache_size
);
700 std::string
first("some key");
701 std::string
second("something else");
702 disk_cache::Entry
* entry
;
703 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
705 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(cache_size
));
706 memset(buffer
->data(), 0, cache_size
);
707 EXPECT_EQ(cache_size
/ 10,
708 WriteData(entry
, 0, 0, buffer
.get(), cache_size
/ 10, false))
711 EXPECT_EQ(net::ERR_FAILED
,
712 WriteData(entry
, 1, 0, buffer
.get(), cache_size
/ 5, false))
713 << "file size above the limit";
715 // By doubling the total size, we make this file cacheable.
716 SetMaxSize(cache_size
* 2);
717 EXPECT_EQ(cache_size
/ 5,
718 WriteData(entry
, 1, 0, buffer
.get(), cache_size
/ 5, false));
720 // Let's fill up the cache!.
721 SetMaxSize(cache_size
* 10);
722 EXPECT_EQ(cache_size
* 3 / 4,
723 WriteData(entry
, 0, 0, buffer
.get(), cache_size
* 3 / 4, false));
727 SetMaxSize(cache_size
);
729 // The cache is 95% full.
731 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
732 EXPECT_EQ(cache_size
/ 10,
733 WriteData(entry
, 0, 0, buffer
.get(), cache_size
/ 10, false));
735 disk_cache::Entry
* entry2
;
736 ASSERT_EQ(net::OK
, CreateEntry("an extra key", &entry2
));
737 EXPECT_EQ(cache_size
/ 10,
738 WriteData(entry2
, 0, 0, buffer
.get(), cache_size
/ 10, false));
739 entry2
->Close(); // This will trigger the cache trim.
741 EXPECT_NE(net::OK
, OpenEntry(first
, &entry2
));
743 FlushQueueForTest(); // Make sure that we are done trimming the cache.
744 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
747 ASSERT_EQ(net::OK
, OpenEntry(second
, &entry
));
748 EXPECT_EQ(cache_size
/ 10, entry
->GetDataSize(0));
752 TEST_F(DiskCacheBackendTest
, SetSize
) {
756 TEST_F(DiskCacheBackendTest
, NewEvictionSetSize
) {
761 TEST_F(DiskCacheBackendTest
, MemoryOnlySetSize
) {
766 void DiskCacheBackendTest::BackendLoad() {
768 int seed
= static_cast<int>(Time::Now().ToInternalValue());
771 disk_cache::Entry
* entries
[100];
772 for (int i
= 0; i
< 100; i
++) {
773 std::string key
= GenerateKey(true);
774 ASSERT_EQ(net::OK
, CreateEntry(key
, &entries
[i
]));
776 EXPECT_EQ(100, cache_
->GetEntryCount());
778 for (int i
= 0; i
< 100; i
++) {
779 int source1
= rand() % 100;
780 int source2
= rand() % 100;
781 disk_cache::Entry
* temp
= entries
[source1
];
782 entries
[source1
] = entries
[source2
];
783 entries
[source2
] = temp
;
786 for (int i
= 0; i
< 100; i
++) {
787 disk_cache::Entry
* entry
;
788 ASSERT_EQ(net::OK
, OpenEntry(entries
[i
]->GetKey(), &entry
));
789 EXPECT_TRUE(entry
== entries
[i
]);
795 EXPECT_EQ(0, cache_
->GetEntryCount());
798 TEST_F(DiskCacheBackendTest
, Load
) {
799 // Work with a tiny index table (16 entries)
801 SetMaxSize(0x100000);
805 TEST_F(DiskCacheBackendTest
, NewEvictionLoad
) {
807 // Work with a tiny index table (16 entries)
809 SetMaxSize(0x100000);
813 TEST_F(DiskCacheBackendTest
, MemoryOnlyLoad
) {
814 SetMaxSize(0x100000);
819 TEST_F(DiskCacheBackendTest
, AppCacheLoad
) {
820 SetCacheType(net::APP_CACHE
);
821 // Work with a tiny index table (16 entries)
823 SetMaxSize(0x100000);
827 TEST_F(DiskCacheBackendTest
, ShaderCacheLoad
) {
828 SetCacheType(net::SHADER_CACHE
);
829 // Work with a tiny index table (16 entries)
831 SetMaxSize(0x100000);
835 // Tests the chaining of an entry to the current head.
836 void DiskCacheBackendTest::BackendChain() {
837 SetMask(0x1); // 2-entry table.
838 SetMaxSize(0x3000); // 12 kB.
841 disk_cache::Entry
* entry
;
842 ASSERT_EQ(net::OK
, CreateEntry("The first key", &entry
));
844 ASSERT_EQ(net::OK
, CreateEntry("The Second key", &entry
));
848 TEST_F(DiskCacheBackendTest
, Chain
) {
852 TEST_F(DiskCacheBackendTest
, NewEvictionChain
) {
857 TEST_F(DiskCacheBackendTest
, AppCacheChain
) {
858 SetCacheType(net::APP_CACHE
);
862 TEST_F(DiskCacheBackendTest
, ShaderCacheChain
) {
863 SetCacheType(net::SHADER_CACHE
);
867 TEST_F(DiskCacheBackendTest
, NewEvictionTrim
) {
871 disk_cache::Entry
* entry
;
872 for (int i
= 0; i
< 100; i
++) {
873 std::string
name(base::StringPrintf("Key %d", i
));
874 ASSERT_EQ(net::OK
, CreateEntry(name
, &entry
));
877 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
878 ASSERT_EQ(net::OK
, OpenEntry(name
, &entry
));
883 // The first eviction must come from list 1 (10% limit), the second must come
886 EXPECT_NE(net::OK
, OpenEntry("Key 0", &entry
));
888 EXPECT_NE(net::OK
, OpenEntry("Key 90", &entry
));
890 // Double check that we still have the list tails.
891 ASSERT_EQ(net::OK
, OpenEntry("Key 1", &entry
));
893 ASSERT_EQ(net::OK
, OpenEntry("Key 91", &entry
));
897 // Before looking for invalid entries, let's check a valid entry.
898 void DiskCacheBackendTest::BackendValidEntry() {
901 std::string
key("Some key");
902 disk_cache::Entry
* entry
;
903 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
905 const int kSize
= 50;
906 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
907 memset(buffer1
->data(), 0, kSize
);
908 base::strlcpy(buffer1
->data(), "And the data to save", kSize
);
909 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer1
.get(), kSize
, false));
913 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
915 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
916 memset(buffer2
->data(), 0, kSize
);
917 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer2
.get(), kSize
));
919 EXPECT_STREQ(buffer1
->data(), buffer2
->data());
922 TEST_F(DiskCacheBackendTest
, ValidEntry
) {
926 TEST_F(DiskCacheBackendTest
, NewEvictionValidEntry
) {
931 // The same logic of the previous test (ValidEntry), but this time force the
932 // entry to be invalid, simulating a crash in the middle.
933 // We'll be leaking memory from this test.
934 void DiskCacheBackendTest::BackendInvalidEntry() {
937 std::string
key("Some key");
938 disk_cache::Entry
* entry
;
939 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
941 const int kSize
= 50;
942 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
943 memset(buffer
->data(), 0, kSize
);
944 base::strlcpy(buffer
->data(), "And the data to save", kSize
);
945 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
948 EXPECT_NE(net::OK
, OpenEntry(key
, &entry
));
949 EXPECT_EQ(0, cache_
->GetEntryCount());
952 #if !defined(LEAK_SANITIZER)
953 // We'll be leaking memory from this test.
954 TEST_F(DiskCacheBackendTest
, InvalidEntry
) {
955 BackendInvalidEntry();
958 // We'll be leaking memory from this test.
959 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry
) {
961 BackendInvalidEntry();
964 // We'll be leaking memory from this test.
965 TEST_F(DiskCacheBackendTest
, AppCacheInvalidEntry
) {
966 SetCacheType(net::APP_CACHE
);
967 BackendInvalidEntry();
970 // We'll be leaking memory from this test.
971 TEST_F(DiskCacheBackendTest
, ShaderCacheInvalidEntry
) {
972 SetCacheType(net::SHADER_CACHE
);
973 BackendInvalidEntry();
976 // Almost the same test, but this time crash the cache after reading an entry.
977 // We'll be leaking memory from this test.
978 void DiskCacheBackendTest::BackendInvalidEntryRead() {
981 std::string
key("Some key");
982 disk_cache::Entry
* entry
;
983 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
985 const int kSize
= 50;
986 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
987 memset(buffer
->data(), 0, kSize
);
988 base::strlcpy(buffer
->data(), "And the data to save", kSize
);
989 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
991 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
992 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer
.get(), kSize
));
996 if (type_
== net::APP_CACHE
) {
997 // Reading an entry and crashing should not make it dirty.
998 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
999 EXPECT_EQ(1, cache_
->GetEntryCount());
1002 EXPECT_NE(net::OK
, OpenEntry(key
, &entry
));
1003 EXPECT_EQ(0, cache_
->GetEntryCount());
1007 // We'll be leaking memory from this test.
1008 TEST_F(DiskCacheBackendTest
, InvalidEntryRead
) {
1009 BackendInvalidEntryRead();
1012 // We'll be leaking memory from this test.
1013 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntryRead
) {
1015 BackendInvalidEntryRead();
1018 // We'll be leaking memory from this test.
1019 TEST_F(DiskCacheBackendTest
, AppCacheInvalidEntryRead
) {
1020 SetCacheType(net::APP_CACHE
);
1021 BackendInvalidEntryRead();
1024 // We'll be leaking memory from this test.
1025 TEST_F(DiskCacheBackendTest
, ShaderCacheInvalidEntryRead
) {
1026 SetCacheType(net::SHADER_CACHE
);
1027 BackendInvalidEntryRead();
1030 // We'll be leaking memory from this test.
1031 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1032 // Work with a tiny index table (16 entries)
1034 SetMaxSize(0x100000);
1037 int seed
= static_cast<int>(Time::Now().ToInternalValue());
1040 const int kNumEntries
= 100;
1041 disk_cache::Entry
* entries
[kNumEntries
];
1042 for (int i
= 0; i
< kNumEntries
; i
++) {
1043 std::string key
= GenerateKey(true);
1044 ASSERT_EQ(net::OK
, CreateEntry(key
, &entries
[i
]));
1046 EXPECT_EQ(kNumEntries
, cache_
->GetEntryCount());
1048 for (int i
= 0; i
< kNumEntries
; i
++) {
1049 int source1
= rand() % kNumEntries
;
1050 int source2
= rand() % kNumEntries
;
1051 disk_cache::Entry
* temp
= entries
[source1
];
1052 entries
[source1
] = entries
[source2
];
1053 entries
[source2
] = temp
;
1056 std::string keys
[kNumEntries
];
1057 for (int i
= 0; i
< kNumEntries
; i
++) {
1058 keys
[i
] = entries
[i
]->GetKey();
1059 if (i
< kNumEntries
/ 2)
1060 entries
[i
]->Close();
1065 for (int i
= kNumEntries
/ 2; i
< kNumEntries
; i
++) {
1066 disk_cache::Entry
* entry
;
1067 EXPECT_NE(net::OK
, OpenEntry(keys
[i
], &entry
));
1070 for (int i
= 0; i
< kNumEntries
/ 2; i
++) {
1071 disk_cache::Entry
* entry
;
1072 ASSERT_EQ(net::OK
, OpenEntry(keys
[i
], &entry
));
1076 EXPECT_EQ(kNumEntries
/ 2, cache_
->GetEntryCount());
1079 // We'll be leaking memory from this test.
1080 TEST_F(DiskCacheBackendTest
, InvalidEntryWithLoad
) {
1081 BackendInvalidEntryWithLoad();
1084 // We'll be leaking memory from this test.
1085 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntryWithLoad
) {
1087 BackendInvalidEntryWithLoad();
1090 // We'll be leaking memory from this test.
1091 TEST_F(DiskCacheBackendTest
, AppCacheInvalidEntryWithLoad
) {
1092 SetCacheType(net::APP_CACHE
);
1093 BackendInvalidEntryWithLoad();
1096 // We'll be leaking memory from this test.
1097 TEST_F(DiskCacheBackendTest
, ShaderCacheInvalidEntryWithLoad
) {
1098 SetCacheType(net::SHADER_CACHE
);
1099 BackendInvalidEntryWithLoad();
1102 // We'll be leaking memory from this test.
1103 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1104 const int kSize
= 0x3000; // 12 kB
1105 SetMaxSize(kSize
* 10);
1108 std::string
first("some key");
1109 std::string
second("something else");
1110 disk_cache::Entry
* entry
;
1111 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
1113 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1114 memset(buffer
->data(), 0, kSize
);
1115 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1117 // Simulate a crash.
1120 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
1121 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1123 EXPECT_EQ(2, cache_
->GetEntryCount());
1125 entry
->Close(); // Trim the cache.
1126 FlushQueueForTest();
1128 // If we evicted the entry in less than 20mS, we have one entry in the cache;
1129 // if it took more than that, we posted a task and we'll delete the second
1131 base::MessageLoop::current()->RunUntilIdle();
1133 // This may be not thread-safe in general, but for now it's OK so add some
1134 // ThreadSanitizer annotations to ignore data races on cache_.
1135 // See http://crbug.com/55970
1136 ANNOTATE_IGNORE_READS_BEGIN();
1137 EXPECT_GE(1, cache_
->GetEntryCount());
1138 ANNOTATE_IGNORE_READS_END();
1140 EXPECT_NE(net::OK
, OpenEntry(first
, &entry
));
1143 // We'll be leaking memory from this test.
1144 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry
) {
1145 BackendTrimInvalidEntry();
1148 // We'll be leaking memory from this test.
1149 TEST_F(DiskCacheBackendTest
, NewEvictionTrimInvalidEntry
) {
1151 BackendTrimInvalidEntry();
1154 // We'll be leaking memory from this test.
1155 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1156 SetMask(0xf); // 16-entry table.
1158 const int kSize
= 0x3000; // 12 kB
1159 SetMaxSize(kSize
* 40);
1162 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1163 memset(buffer
->data(), 0, kSize
);
1164 disk_cache::Entry
* entry
;
1166 // Writing 32 entries to this cache chains most of them.
1167 for (int i
= 0; i
< 32; i
++) {
1168 std::string
key(base::StringPrintf("some key %d", i
));
1169 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1170 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1172 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1173 // Note that we are not closing the entries.
1176 // Simulate a crash.
1179 ASSERT_EQ(net::OK
, CreateEntry("Something else", &entry
));
1180 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1182 FlushQueueForTest();
1183 EXPECT_EQ(33, cache_
->GetEntryCount());
1186 // For the new eviction code, all corrupt entries are on the second list so
1187 // they are not going away that easy.
1188 if (new_eviction_
) {
1189 EXPECT_EQ(net::OK
, DoomAllEntries());
1192 entry
->Close(); // Trim the cache.
1193 FlushQueueForTest();
1195 // We may abort the eviction before cleaning up everything.
1196 base::MessageLoop::current()->RunUntilIdle();
1197 FlushQueueForTest();
1198 // If it's not clear enough: we may still have eviction tasks running at this
1199 // time, so the number of entries is changing while we read it.
1200 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1201 EXPECT_GE(30, cache_
->GetEntryCount());
1202 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1205 // We'll be leaking memory from this test.
1206 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry2
) {
1207 BackendTrimInvalidEntry2();
1210 // We'll be leaking memory from this test.
1211 TEST_F(DiskCacheBackendTest
, NewEvictionTrimInvalidEntry2
) {
1213 BackendTrimInvalidEntry2();
1215 #endif // !defined(LEAK_SANITIZER)
1217 void DiskCacheBackendTest::BackendEnumerations() {
1219 Time initial
= Time::Now();
1221 const int kNumEntries
= 100;
1222 for (int i
= 0; i
< kNumEntries
; i
++) {
1223 std::string key
= GenerateKey(true);
1224 disk_cache::Entry
* entry
;
1225 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1228 EXPECT_EQ(kNumEntries
, cache_
->GetEntryCount());
1229 Time final
= Time::Now();
1231 disk_cache::Entry
* entry
;
1234 Time last_modified
[kNumEntries
];
1235 Time last_used
[kNumEntries
];
1236 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
1237 ASSERT_TRUE(NULL
!= entry
);
1238 if (count
< kNumEntries
) {
1239 last_modified
[count
] = entry
->GetLastModified();
1240 last_used
[count
] = entry
->GetLastUsed();
1241 EXPECT_TRUE(initial
<= last_modified
[count
]);
1242 EXPECT_TRUE(final
>= last_modified
[count
]);
1248 EXPECT_EQ(kNumEntries
, count
);
1252 // The previous enumeration should not have changed the timestamps.
1253 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
1254 ASSERT_TRUE(NULL
!= entry
);
1255 if (count
< kNumEntries
) {
1256 EXPECT_TRUE(last_modified
[count
] == entry
->GetLastModified());
1257 EXPECT_TRUE(last_used
[count
] == entry
->GetLastUsed());
1262 EXPECT_EQ(kNumEntries
, count
);
1265 TEST_F(DiskCacheBackendTest
, Enumerations
) {
1266 BackendEnumerations();
1269 TEST_F(DiskCacheBackendTest
, NewEvictionEnumerations
) {
1271 BackendEnumerations();
1274 TEST_F(DiskCacheBackendTest
, MemoryOnlyEnumerations
) {
1275 SetMemoryOnlyMode();
1276 BackendEnumerations();
1279 TEST_F(DiskCacheBackendTest
, ShaderCacheEnumerations
) {
1280 SetCacheType(net::SHADER_CACHE
);
1281 BackendEnumerations();
1284 TEST_F(DiskCacheBackendTest
, AppCacheEnumerations
) {
1285 SetCacheType(net::APP_CACHE
);
1286 BackendEnumerations();
1289 // Verifies enumerations while entries are open.
1290 void DiskCacheBackendTest::BackendEnumerations2() {
1292 const std::string
first("first");
1293 const std::string
second("second");
1294 disk_cache::Entry
*entry1
, *entry2
;
1295 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry1
));
1297 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry2
));
1299 FlushQueueForTest();
1301 // Make sure that the timestamp is not the same.
1303 ASSERT_EQ(net::OK
, OpenEntry(second
, &entry1
));
1305 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry2
));
1306 EXPECT_EQ(entry2
->GetKey(), second
);
1308 // Two entries and the iterator pointing at "first".
1312 // The iterator should still be valid, so we should not crash.
1313 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry2
));
1314 EXPECT_EQ(entry2
->GetKey(), first
);
1316 cache_
->EndEnumeration(&iter
);
1318 // Modify the oldest entry and get the newest element.
1319 ASSERT_EQ(net::OK
, OpenEntry(first
, &entry1
));
1320 EXPECT_EQ(0, WriteData(entry1
, 0, 200, NULL
, 0, false));
1321 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry2
));
1322 if (type_
== net::APP_CACHE
) {
1323 // The list is not updated.
1324 EXPECT_EQ(entry2
->GetKey(), second
);
1326 EXPECT_EQ(entry2
->GetKey(), first
);
1331 cache_
->EndEnumeration(&iter
);
1334 TEST_F(DiskCacheBackendTest
, Enumerations2
) {
1335 BackendEnumerations2();
1338 TEST_F(DiskCacheBackendTest
, NewEvictionEnumerations2
) {
1340 BackendEnumerations2();
1343 TEST_F(DiskCacheBackendTest
, MemoryOnlyEnumerations2
) {
1344 SetMemoryOnlyMode();
1345 BackendEnumerations2();
1348 TEST_F(DiskCacheBackendTest
, AppCacheEnumerations2
) {
1349 SetCacheType(net::APP_CACHE
);
1350 BackendEnumerations2();
1353 TEST_F(DiskCacheBackendTest
, ShaderCacheEnumerations2
) {
1354 SetCacheType(net::SHADER_CACHE
);
1355 BackendEnumerations2();
1358 // Verify that ReadData calls do not update the LRU cache
1359 // when using the SHADER_CACHE type.
1360 TEST_F(DiskCacheBackendTest
, ShaderCacheEnumerationReadData
) {
1361 SetCacheType(net::SHADER_CACHE
);
1363 const std::string
first("first");
1364 const std::string
second("second");
1365 disk_cache::Entry
*entry1
, *entry2
;
1366 const int kSize
= 50;
1367 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
1369 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry1
));
1370 memset(buffer1
->data(), 0, kSize
);
1371 base::strlcpy(buffer1
->data(), "And the data to save", kSize
);
1372 EXPECT_EQ(kSize
, WriteData(entry1
, 0, 0, buffer1
.get(), kSize
, false));
1374 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry2
));
1377 FlushQueueForTest();
1379 // Make sure that the timestamp is not the same.
1382 // Read from the last item in the LRU.
1383 EXPECT_EQ(kSize
, ReadData(entry1
, 0, 0, buffer1
.get(), kSize
));
1387 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry2
));
1388 EXPECT_EQ(entry2
->GetKey(), second
);
1390 cache_
->EndEnumeration(&iter
);
1393 #if !defined(LEAK_SANITIZER)
1394 // Verify handling of invalid entries while doing enumerations.
1395 // We'll be leaking memory from this test.
1396 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1399 std::string
key("Some key");
1400 disk_cache::Entry
*entry
, *entry1
, *entry2
;
1401 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry1
));
1403 const int kSize
= 50;
1404 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
1405 memset(buffer1
->data(), 0, kSize
);
1406 base::strlcpy(buffer1
->data(), "And the data to save", kSize
);
1407 EXPECT_EQ(kSize
, WriteData(entry1
, 0, 0, buffer1
.get(), kSize
, false));
1409 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry1
));
1410 EXPECT_EQ(kSize
, ReadData(entry1
, 0, 0, buffer1
.get(), kSize
));
1412 std::string
key2("Another key");
1413 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry2
));
1415 ASSERT_EQ(2, cache_
->GetEntryCount());
1421 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
1422 ASSERT_TRUE(NULL
!= entry
);
1423 EXPECT_EQ(key2
, entry
->GetKey());
1427 EXPECT_EQ(1, count
);
1428 EXPECT_EQ(1, cache_
->GetEntryCount());
1431 // We'll be leaking memory from this test.
1432 TEST_F(DiskCacheBackendTest
, InvalidEntryEnumeration
) {
1433 BackendInvalidEntryEnumeration();
1436 // We'll be leaking memory from this test.
1437 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntryEnumeration
) {
1439 BackendInvalidEntryEnumeration();
1441 #endif // !defined(LEAK_SANITIZER)
1443 // Tests that if for some reason entries are modified close to existing cache
1444 // iterators, we don't generate fatal errors or reset the cache.
1445 void DiskCacheBackendTest::BackendFixEnumerators() {
1448 int seed
= static_cast<int>(Time::Now().ToInternalValue());
1451 const int kNumEntries
= 10;
1452 for (int i
= 0; i
< kNumEntries
; i
++) {
1453 std::string key
= GenerateKey(true);
1454 disk_cache::Entry
* entry
;
1455 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1458 EXPECT_EQ(kNumEntries
, cache_
->GetEntryCount());
1460 disk_cache::Entry
*entry1
, *entry2
;
1463 ASSERT_EQ(net::OK
, OpenNextEntry(&iter1
, &entry1
));
1464 ASSERT_TRUE(NULL
!= entry1
);
1468 // Let's go to the middle of the list.
1469 for (int i
= 0; i
< kNumEntries
/ 2; i
++) {
1472 ASSERT_EQ(net::OK
, OpenNextEntry(&iter1
, &entry1
));
1473 ASSERT_TRUE(NULL
!= entry1
);
1475 ASSERT_EQ(net::OK
, OpenNextEntry(&iter2
, &entry2
));
1476 ASSERT_TRUE(NULL
!= entry2
);
1480 // Messing up with entry1 will modify entry2->next.
1482 ASSERT_EQ(net::OK
, OpenNextEntry(&iter2
, &entry2
));
1483 ASSERT_TRUE(NULL
!= entry2
);
1485 // The link entry2->entry1 should be broken.
1486 EXPECT_NE(entry2
->GetKey(), entry1
->GetKey());
1490 // And the second iterator should keep working.
1491 ASSERT_EQ(net::OK
, OpenNextEntry(&iter2
, &entry2
));
1492 ASSERT_TRUE(NULL
!= entry2
);
1495 cache_
->EndEnumeration(&iter1
);
1496 cache_
->EndEnumeration(&iter2
);
1499 TEST_F(DiskCacheBackendTest
, FixEnumerators
) {
1500 BackendFixEnumerators();
1503 TEST_F(DiskCacheBackendTest
, NewEvictionFixEnumerators
) {
1505 BackendFixEnumerators();
1508 void DiskCacheBackendTest::BackendDoomRecent() {
1511 disk_cache::Entry
*entry
;
1512 ASSERT_EQ(net::OK
, CreateEntry("first", &entry
));
1514 ASSERT_EQ(net::OK
, CreateEntry("second", &entry
));
1516 FlushQueueForTest();
1519 Time middle
= Time::Now();
1521 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
1523 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry
));
1525 FlushQueueForTest();
1528 Time final
= Time::Now();
1530 ASSERT_EQ(4, cache_
->GetEntryCount());
1531 EXPECT_EQ(net::OK
, DoomEntriesSince(final
));
1532 ASSERT_EQ(4, cache_
->GetEntryCount());
1534 EXPECT_EQ(net::OK
, DoomEntriesSince(middle
));
1535 ASSERT_EQ(2, cache_
->GetEntryCount());
1537 ASSERT_EQ(net::OK
, OpenEntry("second", &entry
));
1541 TEST_F(DiskCacheBackendTest
, DoomRecent
) {
1542 BackendDoomRecent();
1545 TEST_F(DiskCacheBackendTest
, NewEvictionDoomRecent
) {
1547 BackendDoomRecent();
1550 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomRecent
) {
1551 SetMemoryOnlyMode();
1552 BackendDoomRecent();
1555 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomEntriesSinceSparse
) {
1556 SetMemoryOnlyMode();
1558 InitSparseCache(&start
, NULL
);
1559 DoomEntriesSince(start
);
1560 EXPECT_EQ(1, cache_
->GetEntryCount());
1563 TEST_F(DiskCacheBackendTest
, DoomEntriesSinceSparse
) {
1565 InitSparseCache(&start
, NULL
);
1566 DoomEntriesSince(start
);
1567 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1568 // MemBackendImpl does not. Thats why expected value differs here from
1569 // MemoryOnlyDoomEntriesSinceSparse.
1570 EXPECT_EQ(3, cache_
->GetEntryCount());
1573 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomAllSparse
) {
1574 SetMemoryOnlyMode();
1575 InitSparseCache(NULL
, NULL
);
1576 EXPECT_EQ(net::OK
, DoomAllEntries());
1577 EXPECT_EQ(0, cache_
->GetEntryCount());
1580 TEST_F(DiskCacheBackendTest
, DoomAllSparse
) {
1581 InitSparseCache(NULL
, NULL
);
1582 EXPECT_EQ(net::OK
, DoomAllEntries());
1583 EXPECT_EQ(0, cache_
->GetEntryCount());
1586 void DiskCacheBackendTest::BackendDoomBetween() {
1589 disk_cache::Entry
*entry
;
1590 ASSERT_EQ(net::OK
, CreateEntry("first", &entry
));
1592 FlushQueueForTest();
1595 Time middle_start
= Time::Now();
1597 ASSERT_EQ(net::OK
, CreateEntry("second", &entry
));
1599 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
1601 FlushQueueForTest();
1604 Time middle_end
= Time::Now();
1607 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry
));
1609 ASSERT_EQ(net::OK
, OpenEntry("fourth", &entry
));
1611 FlushQueueForTest();
1614 Time final
= Time::Now();
1616 ASSERT_EQ(4, cache_
->GetEntryCount());
1617 EXPECT_EQ(net::OK
, DoomEntriesBetween(middle_start
, middle_end
));
1618 ASSERT_EQ(2, cache_
->GetEntryCount());
1620 ASSERT_EQ(net::OK
, OpenEntry("fourth", &entry
));
1623 EXPECT_EQ(net::OK
, DoomEntriesBetween(middle_start
, final
));
1624 ASSERT_EQ(1, cache_
->GetEntryCount());
1626 ASSERT_EQ(net::OK
, OpenEntry("first", &entry
));
1630 TEST_F(DiskCacheBackendTest
, DoomBetween
) {
1631 BackendDoomBetween();
1634 TEST_F(DiskCacheBackendTest
, NewEvictionDoomBetween
) {
1636 BackendDoomBetween();
1639 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomBetween
) {
1640 SetMemoryOnlyMode();
1641 BackendDoomBetween();
1644 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomEntriesBetweenSparse
) {
1645 SetMemoryOnlyMode();
1646 base::Time start
, end
;
1647 InitSparseCache(&start
, &end
);
1648 DoomEntriesBetween(start
, end
);
1649 EXPECT_EQ(3, cache_
->GetEntryCount());
1652 end
= base::Time::Now();
1653 DoomEntriesBetween(start
, end
);
1654 EXPECT_EQ(1, cache_
->GetEntryCount());
1657 TEST_F(DiskCacheBackendTest
, DoomEntriesBetweenSparse
) {
1658 base::Time start
, end
;
1659 InitSparseCache(&start
, &end
);
1660 DoomEntriesBetween(start
, end
);
1661 EXPECT_EQ(9, cache_
->GetEntryCount());
1664 end
= base::Time::Now();
1665 DoomEntriesBetween(start
, end
);
1666 EXPECT_EQ(3, cache_
->GetEntryCount());
1669 void DiskCacheBackendTest::BackendTransaction(const std::string
& name
,
1670 int num_entries
, bool load
) {
1672 ASSERT_TRUE(CopyTestCache(name
));
1673 DisableFirstCleanup();
1678 SetMaxSize(0x100000);
1680 // Clear the settings from the previous run.
1687 ASSERT_EQ(num_entries
+ 1, cache_
->GetEntryCount());
1689 std::string
key("the first key");
1690 disk_cache::Entry
* entry1
;
1691 ASSERT_NE(net::OK
, OpenEntry(key
, &entry1
));
1693 int actual
= cache_
->GetEntryCount();
1694 if (num_entries
!= actual
) {
1696 // If there is a heavy load, inserting an entry will make another entry
1697 // dirty (on the hash bucket) so two entries are removed.
1698 ASSERT_EQ(num_entries
- 1, actual
);
1704 ASSERT_TRUE(CheckCacheIntegrity(cache_path_
, new_eviction_
, mask
));
1708 void DiskCacheBackendTest::BackendRecoverInsert() {
1709 // Tests with an empty cache.
1710 BackendTransaction("insert_empty1", 0, false);
1711 ASSERT_TRUE(success_
) << "insert_empty1";
1712 BackendTransaction("insert_empty2", 0, false);
1713 ASSERT_TRUE(success_
) << "insert_empty2";
1714 BackendTransaction("insert_empty3", 0, false);
1715 ASSERT_TRUE(success_
) << "insert_empty3";
1717 // Tests with one entry on the cache.
1718 BackendTransaction("insert_one1", 1, false);
1719 ASSERT_TRUE(success_
) << "insert_one1";
1720 BackendTransaction("insert_one2", 1, false);
1721 ASSERT_TRUE(success_
) << "insert_one2";
1722 BackendTransaction("insert_one3", 1, false);
1723 ASSERT_TRUE(success_
) << "insert_one3";
1725 // Tests with one hundred entries on the cache, tiny index.
1726 BackendTransaction("insert_load1", 100, true);
1727 ASSERT_TRUE(success_
) << "insert_load1";
1728 BackendTransaction("insert_load2", 100, true);
1729 ASSERT_TRUE(success_
) << "insert_load2";
1732 TEST_F(DiskCacheBackendTest
, RecoverInsert
) {
1733 BackendRecoverInsert();
1736 TEST_F(DiskCacheBackendTest
, NewEvictionRecoverInsert
) {
1738 BackendRecoverInsert();
1741 void DiskCacheBackendTest::BackendRecoverRemove() {
1742 // Removing the only element.
1743 BackendTransaction("remove_one1", 0, false);
1744 ASSERT_TRUE(success_
) << "remove_one1";
1745 BackendTransaction("remove_one2", 0, false);
1746 ASSERT_TRUE(success_
) << "remove_one2";
1747 BackendTransaction("remove_one3", 0, false);
1748 ASSERT_TRUE(success_
) << "remove_one3";
1750 // Removing the head.
1751 BackendTransaction("remove_head1", 1, false);
1752 ASSERT_TRUE(success_
) << "remove_head1";
1753 BackendTransaction("remove_head2", 1, false);
1754 ASSERT_TRUE(success_
) << "remove_head2";
1755 BackendTransaction("remove_head3", 1, false);
1756 ASSERT_TRUE(success_
) << "remove_head3";
1758 // Removing the tail.
1759 BackendTransaction("remove_tail1", 1, false);
1760 ASSERT_TRUE(success_
) << "remove_tail1";
1761 BackendTransaction("remove_tail2", 1, false);
1762 ASSERT_TRUE(success_
) << "remove_tail2";
1763 BackendTransaction("remove_tail3", 1, false);
1764 ASSERT_TRUE(success_
) << "remove_tail3";
1766 // Removing with one hundred entries on the cache, tiny index.
1767 BackendTransaction("remove_load1", 100, true);
1768 ASSERT_TRUE(success_
) << "remove_load1";
1769 BackendTransaction("remove_load2", 100, true);
1770 ASSERT_TRUE(success_
) << "remove_load2";
1771 BackendTransaction("remove_load3", 100, true);
1772 ASSERT_TRUE(success_
) << "remove_load3";
1774 // This case cannot be reverted.
1775 BackendTransaction("remove_one4", 0, false);
1776 ASSERT_TRUE(success_
) << "remove_one4";
1777 BackendTransaction("remove_head4", 1, false);
1778 ASSERT_TRUE(success_
) << "remove_head4";
1781 TEST_F(DiskCacheBackendTest
, RecoverRemove
) {
1782 BackendRecoverRemove();
1785 TEST_F(DiskCacheBackendTest
, NewEvictionRecoverRemove
) {
1787 BackendRecoverRemove();
1790 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1792 ASSERT_TRUE(CopyTestCache("insert_load1"));
1793 DisableFirstCleanup();
1798 // We should not crash here.
1800 DisableIntegrityCheck();
1803 TEST_F(DiskCacheBackendTest
, RecoverWithEviction
) {
1804 BackendRecoverWithEviction();
1807 TEST_F(DiskCacheBackendTest
, NewEvictionRecoverWithEviction
) {
1809 BackendRecoverWithEviction();
1812 // Tests that the |BackendImpl| fails to start with the wrong cache version.
1813 TEST_F(DiskCacheTest
, WrongVersion
) {
1814 ASSERT_TRUE(CopyTestCache("wrong_version"));
1815 base::Thread
cache_thread("CacheThread");
1816 ASSERT_TRUE(cache_thread
.StartWithOptions(
1817 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1818 net::TestCompletionCallback cb
;
1820 scoped_ptr
<disk_cache::BackendImpl
> cache(new disk_cache::BackendImpl(
1821 cache_path_
, cache_thread
.message_loop_proxy().get(), NULL
));
1822 int rv
= cache
->Init(cb
.callback());
1823 ASSERT_EQ(net::ERR_FAILED
, cb
.GetResult(rv
));
1826 class BadEntropyProvider
: public base::FieldTrial::EntropyProvider
{
1828 virtual ~BadEntropyProvider() {}
1830 virtual double GetEntropyForTrial(const std::string
& trial_name
,
1831 uint32 randomization_seed
) const OVERRIDE
{
1836 // Tests that the disk cache successfully joins the control group, dropping the
1837 // existing cache in favour of a new empty cache.
1838 TEST_F(DiskCacheTest
, SimpleCacheControlJoin
) {
1839 base::Thread
cache_thread("CacheThread");
1840 ASSERT_TRUE(cache_thread
.StartWithOptions(
1841 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1843 scoped_ptr
<disk_cache::BackendImpl
> cache
=
1844 CreateExistingEntryCache(cache_thread
, cache_path_
);
1845 ASSERT_TRUE(cache
.get());
1848 // Instantiate the SimpleCacheTrial, forcing this run into the
1849 // ExperimentControl group.
1850 base::FieldTrialList
field_trial_list(new BadEntropyProvider());
1851 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1852 "ExperimentControl");
1853 net::TestCompletionCallback cb
;
1854 scoped_ptr
<disk_cache::Backend
> base_cache
;
1856 disk_cache::CreateCacheBackend(net::DISK_CACHE
,
1857 net::CACHE_BACKEND_BLOCKFILE
,
1861 cache_thread
.message_loop_proxy().get(),
1865 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
1866 EXPECT_EQ(0, base_cache
->GetEntryCount());
1869 // Tests that the disk cache can restart in the control group preserving
1870 // existing entries.
1871 TEST_F(DiskCacheTest
, SimpleCacheControlRestart
) {
1872 // Instantiate the SimpleCacheTrial, forcing this run into the
1873 // ExperimentControl group.
1874 base::FieldTrialList
field_trial_list(new BadEntropyProvider());
1875 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1876 "ExperimentControl");
1878 base::Thread
cache_thread("CacheThread");
1879 ASSERT_TRUE(cache_thread
.StartWithOptions(
1880 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1882 scoped_ptr
<disk_cache::BackendImpl
> cache
=
1883 CreateExistingEntryCache(cache_thread
, cache_path_
);
1884 ASSERT_TRUE(cache
.get());
1886 net::TestCompletionCallback cb
;
1888 const int kRestartCount
= 5;
1889 for (int i
= 0; i
< kRestartCount
; ++i
) {
1890 cache
.reset(new disk_cache::BackendImpl(
1891 cache_path_
, cache_thread
.message_loop_proxy(), NULL
));
1892 int rv
= cache
->Init(cb
.callback());
1893 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
1894 EXPECT_EQ(1, cache
->GetEntryCount());
1896 disk_cache::Entry
* entry
= NULL
;
1897 rv
= cache
->OpenEntry(kExistingEntryKey
, &entry
, cb
.callback());
1898 EXPECT_EQ(net::OK
, cb
.GetResult(rv
));
1904 // Tests that the disk cache can leave the control group preserving existing
1906 TEST_F(DiskCacheTest
, SimpleCacheControlLeave
) {
1907 base::Thread
cache_thread("CacheThread");
1908 ASSERT_TRUE(cache_thread
.StartWithOptions(
1909 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1912 // Instantiate the SimpleCacheTrial, forcing this run into the
1913 // ExperimentControl group.
1914 base::FieldTrialList
field_trial_list(new BadEntropyProvider());
1915 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1916 "ExperimentControl");
1918 scoped_ptr
<disk_cache::BackendImpl
> cache
=
1919 CreateExistingEntryCache(cache_thread
, cache_path_
);
1920 ASSERT_TRUE(cache
.get());
1923 // Instantiate the SimpleCacheTrial, forcing this run into the
1924 // ExperimentNo group.
1925 base::FieldTrialList
field_trial_list(new BadEntropyProvider());
1926 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1927 net::TestCompletionCallback cb
;
1929 const int kRestartCount
= 5;
1930 for (int i
= 0; i
< kRestartCount
; ++i
) {
1931 scoped_ptr
<disk_cache::BackendImpl
> cache(new disk_cache::BackendImpl(
1932 cache_path_
, cache_thread
.message_loop_proxy(), NULL
));
1933 int rv
= cache
->Init(cb
.callback());
1934 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
1935 EXPECT_EQ(1, cache
->GetEntryCount());
1937 disk_cache::Entry
* entry
= NULL
;
1938 rv
= cache
->OpenEntry(kExistingEntryKey
, &entry
, cb
.callback());
1939 EXPECT_EQ(net::OK
, cb
.GetResult(rv
));
1945 // Tests that the cache is properly restarted on recovery error.
1946 TEST_F(DiskCacheBackendTest
, DeleteOld
) {
1947 ASSERT_TRUE(CopyTestCache("wrong_version"));
1949 base::Thread
cache_thread("CacheThread");
1950 ASSERT_TRUE(cache_thread
.StartWithOptions(
1951 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1953 net::TestCompletionCallback cb
;
1954 bool prev
= base::ThreadRestrictions::SetIOAllowed(false);
1955 base::FilePath
path(cache_path_
);
1957 disk_cache::CreateCacheBackend(net::DISK_CACHE
,
1958 net::CACHE_BACKEND_BLOCKFILE
,
1962 cache_thread
.message_loop_proxy().get(),
1966 path
.clear(); // Make sure path was captured by the previous call.
1967 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
1968 base::ThreadRestrictions::SetIOAllowed(prev
);
1970 EXPECT_TRUE(CheckCacheIntegrity(cache_path_
, new_eviction_
, mask_
));
1973 // We want to be able to deal with messed up entries on disk.
1974 void DiskCacheBackendTest::BackendInvalidEntry2() {
1975 ASSERT_TRUE(CopyTestCache("bad_entry"));
1976 DisableFirstCleanup();
1979 disk_cache::Entry
*entry1
, *entry2
;
1980 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry1
));
1981 EXPECT_NE(net::OK
, OpenEntry("some other key", &entry2
));
1984 // CheckCacheIntegrity will fail at this point.
1985 DisableIntegrityCheck();
1988 TEST_F(DiskCacheBackendTest
, InvalidEntry2
) {
1989 BackendInvalidEntry2();
1992 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry2
) {
1994 BackendInvalidEntry2();
1997 // Tests that we don't crash or hang when enumerating this cache.
1998 void DiskCacheBackendTest::BackendInvalidEntry3() {
1999 SetMask(0x1); // 2-entry table.
2000 SetMaxSize(0x3000); // 12 kB.
2001 DisableFirstCleanup();
2004 disk_cache::Entry
* entry
;
2006 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
2011 TEST_F(DiskCacheBackendTest
, InvalidEntry3
) {
2012 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2013 BackendInvalidEntry3();
2016 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry3
) {
2017 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2019 BackendInvalidEntry3();
2020 DisableIntegrityCheck();
2023 // Test that we handle a dirty entry on the LRU list, already replaced with
2024 // the same key, and with hash collisions.
2025 TEST_F(DiskCacheBackendTest
, InvalidEntry4
) {
2026 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2027 SetMask(0x1); // 2-entry table.
2028 SetMaxSize(0x3000); // 12 kB.
2029 DisableFirstCleanup();
2035 // Test that we handle a dirty entry on the deleted list, already replaced with
2036 // the same key, and with hash collisions.
2037 TEST_F(DiskCacheBackendTest
, InvalidEntry5
) {
2038 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2040 SetMask(0x1); // 2-entry table.
2041 SetMaxSize(0x3000); // 12 kB.
2042 DisableFirstCleanup();
2045 TrimDeletedListForTest(false);
2048 TEST_F(DiskCacheBackendTest
, InvalidEntry6
) {
2049 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2050 SetMask(0x1); // 2-entry table.
2051 SetMaxSize(0x3000); // 12 kB.
2052 DisableFirstCleanup();
2055 // There is a dirty entry (but marked as clean) at the end, pointing to a
2056 // deleted entry through the hash collision list. We should not re-insert the
2057 // deleted entry into the index table.
2060 // The cache should be clean (as detected by CheckCacheIntegrity).
2063 // Tests that we don't hang when there is a loop on the hash collision list.
2064 // The test cache could be a result of bug 69135.
2065 TEST_F(DiskCacheBackendTest
, BadNextEntry1
) {
2066 ASSERT_TRUE(CopyTestCache("list_loop2"));
2067 SetMask(0x1); // 2-entry table.
2068 SetMaxSize(0x3000); // 12 kB.
2069 DisableFirstCleanup();
2072 // The second entry points at itselft, and the first entry is not accessible
2073 // though the index, but it is at the head of the LRU.
2075 disk_cache::Entry
* entry
;
2076 ASSERT_EQ(net::OK
, CreateEntry("The first key", &entry
));
2081 ASSERT_EQ(net::OK
, OpenEntry("The first key", &entry
));
2083 EXPECT_EQ(1, cache_
->GetEntryCount());
2086 // Tests that we don't hang when there is a loop on the hash collision list.
2087 // The test cache could be a result of bug 69135.
2088 TEST_F(DiskCacheBackendTest
, BadNextEntry2
) {
2089 ASSERT_TRUE(CopyTestCache("list_loop3"));
2090 SetMask(0x1); // 2-entry table.
2091 SetMaxSize(0x3000); // 12 kB.
2092 DisableFirstCleanup();
2095 // There is a wide loop of 5 entries.
2097 disk_cache::Entry
* entry
;
2098 ASSERT_NE(net::OK
, OpenEntry("Not present key", &entry
));
2101 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry6
) {
2102 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2103 DisableFirstCleanup();
2107 // The second entry is dirty, but removing it should not corrupt the list.
2108 disk_cache::Entry
* entry
;
2109 ASSERT_NE(net::OK
, OpenEntry("the second key", &entry
));
2110 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry
));
2112 // This should not delete the cache.
2114 FlushQueueForTest();
2117 ASSERT_EQ(net::OK
, OpenEntry("some other key", &entry
));
2121 // Tests handling of corrupt entries by keeping the rankings node around, with
2123 void DiskCacheBackendTest::BackendInvalidEntry7() {
2124 const int kSize
= 0x3000; // 12 kB.
2125 SetMaxSize(kSize
* 10);
2128 std::string
first("some key");
2129 std::string
second("something else");
2130 disk_cache::Entry
* entry
;
2131 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2133 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2135 // Corrupt this entry.
2136 disk_cache::EntryImpl
* entry_impl
=
2137 static_cast<disk_cache::EntryImpl
*>(entry
);
2139 entry_impl
->rankings()->Data()->next
= 0;
2140 entry_impl
->rankings()->Store();
2142 FlushQueueForTest();
2143 EXPECT_EQ(2, cache_
->GetEntryCount());
2145 // This should detect the bad entry.
2146 EXPECT_NE(net::OK
, OpenEntry(second
, &entry
));
2147 EXPECT_EQ(1, cache_
->GetEntryCount());
2149 // We should delete the cache. The list still has a corrupt node.
2151 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2152 FlushQueueForTest();
2153 EXPECT_EQ(0, cache_
->GetEntryCount());
2156 TEST_F(DiskCacheBackendTest
, InvalidEntry7
) {
2157 BackendInvalidEntry7();
2160 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry7
) {
2162 BackendInvalidEntry7();
2165 // Tests handling of corrupt entries by keeping the rankings node around, with
2166 // a non fatal failure.
2167 void DiskCacheBackendTest::BackendInvalidEntry8() {
2168 const int kSize
= 0x3000; // 12 kB
2169 SetMaxSize(kSize
* 10);
2172 std::string
first("some key");
2173 std::string
second("something else");
2174 disk_cache::Entry
* entry
;
2175 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2177 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2179 // Corrupt this entry.
2180 disk_cache::EntryImpl
* entry_impl
=
2181 static_cast<disk_cache::EntryImpl
*>(entry
);
2183 entry_impl
->rankings()->Data()->contents
= 0;
2184 entry_impl
->rankings()->Store();
2186 FlushQueueForTest();
2187 EXPECT_EQ(2, cache_
->GetEntryCount());
2189 // This should detect the bad entry.
2190 EXPECT_NE(net::OK
, OpenEntry(second
, &entry
));
2191 EXPECT_EQ(1, cache_
->GetEntryCount());
2193 // We should not delete the cache.
2195 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2197 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2198 EXPECT_EQ(1, cache_
->GetEntryCount());
2201 TEST_F(DiskCacheBackendTest
, InvalidEntry8
) {
2202 BackendInvalidEntry8();
2205 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry8
) {
2207 BackendInvalidEntry8();
2210 // Tests handling of corrupt entries detected by enumerations. Note that these
2211 // tests (xx9 to xx11) are basically just going though slightly different
2212 // codepaths so they are tighlty coupled with the code, but that is better than
2213 // not testing error handling code.
2214 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction
) {
2215 const int kSize
= 0x3000; // 12 kB.
2216 SetMaxSize(kSize
* 10);
2219 std::string
first("some key");
2220 std::string
second("something else");
2221 disk_cache::Entry
* entry
;
2222 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2224 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2226 // Corrupt this entry.
2227 disk_cache::EntryImpl
* entry_impl
=
2228 static_cast<disk_cache::EntryImpl
*>(entry
);
2230 entry_impl
->entry()->Data()->state
= 0xbad;
2231 entry_impl
->entry()->Store();
2233 FlushQueueForTest();
2234 EXPECT_EQ(2, cache_
->GetEntryCount());
2238 EXPECT_EQ(1, cache_
->GetEntryCount());
2240 EXPECT_EQ(1, cache_
->GetEntryCount());
2242 // We should detect the problem through the list, but we should not delete
2243 // the entry, just fail the iteration.
2245 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2247 // Now a full iteration will work, and return one entry.
2248 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2250 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2252 // This should detect what's left of the bad entry.
2253 EXPECT_NE(net::OK
, OpenEntry(second
, &entry
));
2254 EXPECT_EQ(2, cache_
->GetEntryCount());
2256 DisableIntegrityCheck();
2259 TEST_F(DiskCacheBackendTest
, InvalidEntry9
) {
2260 BackendInvalidEntry9(false);
2263 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry9
) {
2265 BackendInvalidEntry9(false);
2268 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry9
) {
2269 BackendInvalidEntry9(true);
2272 TEST_F(DiskCacheBackendTest
, NewEvictionTrimInvalidEntry9
) {
2274 BackendInvalidEntry9(true);
2277 // Tests handling of corrupt entries detected by enumerations.
2278 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction
) {
2279 const int kSize
= 0x3000; // 12 kB.
2280 SetMaxSize(kSize
* 10);
2284 std::string
first("some key");
2285 std::string
second("something else");
2286 disk_cache::Entry
* entry
;
2287 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2289 ASSERT_EQ(net::OK
, OpenEntry(first
, &entry
));
2290 EXPECT_EQ(0, WriteData(entry
, 0, 200, NULL
, 0, false));
2292 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2294 // Corrupt this entry.
2295 disk_cache::EntryImpl
* entry_impl
=
2296 static_cast<disk_cache::EntryImpl
*>(entry
);
2298 entry_impl
->entry()->Data()->state
= 0xbad;
2299 entry_impl
->entry()->Store();
2301 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
2303 EXPECT_EQ(3, cache_
->GetEntryCount());
2306 // List 0: third -> second (bad).
2310 // Detection order: second -> first -> third.
2312 EXPECT_EQ(3, cache_
->GetEntryCount());
2314 EXPECT_EQ(2, cache_
->GetEntryCount());
2316 EXPECT_EQ(1, cache_
->GetEntryCount());
2318 // Detection order: third -> second -> first.
2319 // We should detect the problem through the list, but we should not delete
2322 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2324 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2325 EXPECT_EQ(first
, entry
->GetKey());
2327 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2329 DisableIntegrityCheck();
2332 TEST_F(DiskCacheBackendTest
, InvalidEntry10
) {
2333 BackendInvalidEntry10(false);
2336 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry10
) {
2337 BackendInvalidEntry10(true);
2340 // Tests handling of corrupt entries detected by enumerations.
2341 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction
) {
2342 const int kSize
= 0x3000; // 12 kB.
2343 SetMaxSize(kSize
* 10);
2347 std::string
first("some key");
2348 std::string
second("something else");
2349 disk_cache::Entry
* entry
;
2350 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2352 ASSERT_EQ(net::OK
, OpenEntry(first
, &entry
));
2353 EXPECT_EQ(0, WriteData(entry
, 0, 200, NULL
, 0, false));
2355 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2357 ASSERT_EQ(net::OK
, OpenEntry(second
, &entry
));
2358 EXPECT_EQ(0, WriteData(entry
, 0, 200, NULL
, 0, false));
2360 // Corrupt this entry.
2361 disk_cache::EntryImpl
* entry_impl
=
2362 static_cast<disk_cache::EntryImpl
*>(entry
);
2364 entry_impl
->entry()->Data()->state
= 0xbad;
2365 entry_impl
->entry()->Store();
2367 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
2369 FlushQueueForTest();
2370 EXPECT_EQ(3, cache_
->GetEntryCount());
2374 // List 1: second (bad) -> first.
2377 // Detection order: third -> first -> second.
2379 EXPECT_EQ(2, cache_
->GetEntryCount());
2381 EXPECT_EQ(1, cache_
->GetEntryCount());
2383 EXPECT_EQ(1, cache_
->GetEntryCount());
2385 // Detection order: third -> second.
2386 // We should detect the problem through the list, but we should not delete
2387 // the entry, just fail the iteration.
2389 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2391 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2393 // Now a full iteration will work, and return two entries.
2394 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2396 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2398 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2400 DisableIntegrityCheck();
2403 TEST_F(DiskCacheBackendTest
, InvalidEntry11
) {
2404 BackendInvalidEntry11(false);
2407 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry11
) {
2408 BackendInvalidEntry11(true);
2411 // Tests handling of corrupt entries in the middle of a long eviction run.
2412 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2413 const int kSize
= 0x3000; // 12 kB
2414 SetMaxSize(kSize
* 10);
2417 std::string
first("some key");
2418 std::string
second("something else");
2419 disk_cache::Entry
* entry
;
2420 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2422 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2424 // Corrupt this entry.
2425 disk_cache::EntryImpl
* entry_impl
=
2426 static_cast<disk_cache::EntryImpl
*>(entry
);
2428 entry_impl
->entry()->Data()->state
= 0xbad;
2429 entry_impl
->entry()->Store();
2431 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
2433 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry
));
2435 EXPECT_EQ(1, cache_
->GetEntryCount());
2437 DisableIntegrityCheck();
2440 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry12
) {
2441 BackendTrimInvalidEntry12();
2444 TEST_F(DiskCacheBackendTest
, NewEvictionTrimInvalidEntry12
) {
2446 BackendTrimInvalidEntry12();
2449 // We want to be able to deal with messed up entries on disk.
2450 void DiskCacheBackendTest::BackendInvalidRankings2() {
2451 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2452 DisableFirstCleanup();
2455 disk_cache::Entry
*entry1
, *entry2
;
2456 EXPECT_NE(net::OK
, OpenEntry("the first key", &entry1
));
2457 ASSERT_EQ(net::OK
, OpenEntry("some other key", &entry2
));
2460 // CheckCacheIntegrity will fail at this point.
2461 DisableIntegrityCheck();
2464 TEST_F(DiskCacheBackendTest
, InvalidRankings2
) {
2465 BackendInvalidRankings2();
2468 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidRankings2
) {
2470 BackendInvalidRankings2();
2473 // If the LRU is corrupt, we delete the cache.
2474 void DiskCacheBackendTest::BackendInvalidRankings() {
2475 disk_cache::Entry
* entry
;
2477 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2479 EXPECT_EQ(2, cache_
->GetEntryCount());
2481 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2482 FlushQueueForTest(); // Allow the restart to finish.
2483 EXPECT_EQ(0, cache_
->GetEntryCount());
2486 TEST_F(DiskCacheBackendTest
, InvalidRankingsSuccess
) {
2487 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2488 DisableFirstCleanup();
2490 BackendInvalidRankings();
2493 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidRankingsSuccess
) {
2494 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2495 DisableFirstCleanup();
2498 BackendInvalidRankings();
2501 TEST_F(DiskCacheBackendTest
, InvalidRankingsFailure
) {
2502 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2503 DisableFirstCleanup();
2505 SetTestMode(); // Fail cache reinitialization.
2506 BackendInvalidRankings();
2509 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidRankingsFailure
) {
2510 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2511 DisableFirstCleanup();
2514 SetTestMode(); // Fail cache reinitialization.
2515 BackendInvalidRankings();
2518 // If the LRU is corrupt and we have open entries, we disable the cache.
2519 void DiskCacheBackendTest::BackendDisable() {
2520 disk_cache::Entry
*entry1
, *entry2
;
2522 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry1
));
2524 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry2
));
2525 EXPECT_EQ(0, cache_
->GetEntryCount());
2526 EXPECT_NE(net::OK
, CreateEntry("Something new", &entry2
));
2529 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2530 FlushQueueForTest(); // This one actually allows that task to complete.
2532 EXPECT_EQ(0, cache_
->GetEntryCount());
2535 TEST_F(DiskCacheBackendTest
, DisableSuccess
) {
2536 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2537 DisableFirstCleanup();
2542 TEST_F(DiskCacheBackendTest
, NewEvictionDisableSuccess
) {
2543 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2544 DisableFirstCleanup();
2550 TEST_F(DiskCacheBackendTest
, DisableFailure
) {
2551 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2552 DisableFirstCleanup();
2554 SetTestMode(); // Fail cache reinitialization.
2558 TEST_F(DiskCacheBackendTest
, NewEvictionDisableFailure
) {
2559 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2560 DisableFirstCleanup();
2563 SetTestMode(); // Fail cache reinitialization.
2567 // This is another type of corruption on the LRU; disable the cache.
2568 void DiskCacheBackendTest::BackendDisable2() {
2569 EXPECT_EQ(8, cache_
->GetEntryCount());
2571 disk_cache::Entry
* entry
;
2574 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
2575 ASSERT_TRUE(NULL
!= entry
);
2578 ASSERT_LT(count
, 9);
2581 FlushQueueForTest();
2582 EXPECT_EQ(0, cache_
->GetEntryCount());
2585 TEST_F(DiskCacheBackendTest
, DisableSuccess2
) {
2586 ASSERT_TRUE(CopyTestCache("list_loop"));
2587 DisableFirstCleanup();
2592 TEST_F(DiskCacheBackendTest
, NewEvictionDisableSuccess2
) {
2593 ASSERT_TRUE(CopyTestCache("list_loop"));
2594 DisableFirstCleanup();
2600 TEST_F(DiskCacheBackendTest
, DisableFailure2
) {
2601 ASSERT_TRUE(CopyTestCache("list_loop"));
2602 DisableFirstCleanup();
2604 SetTestMode(); // Fail cache reinitialization.
2608 TEST_F(DiskCacheBackendTest
, NewEvictionDisableFailure2
) {
2609 ASSERT_TRUE(CopyTestCache("list_loop"));
2610 DisableFirstCleanup();
2613 SetTestMode(); // Fail cache reinitialization.
2617 // If the index size changes when we disable the cache, we should not crash.
2618 void DiskCacheBackendTest::BackendDisable3() {
2619 disk_cache::Entry
*entry1
, *entry2
;
2621 EXPECT_EQ(2, cache_
->GetEntryCount());
2622 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry1
));
2625 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry2
));
2626 FlushQueueForTest();
2628 ASSERT_EQ(net::OK
, CreateEntry("Something new", &entry2
));
2631 EXPECT_EQ(1, cache_
->GetEntryCount());
2634 TEST_F(DiskCacheBackendTest
, DisableSuccess3
) {
2635 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2636 DisableFirstCleanup();
2637 SetMaxSize(20 * 1024 * 1024);
2642 TEST_F(DiskCacheBackendTest
, NewEvictionDisableSuccess3
) {
2643 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2644 DisableFirstCleanup();
2645 SetMaxSize(20 * 1024 * 1024);
2651 // If we disable the cache, already open entries should work as far as possible.
2652 void DiskCacheBackendTest::BackendDisable4() {
2653 disk_cache::Entry
*entry1
, *entry2
, *entry3
, *entry4
;
2655 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry1
));
2659 CacheTestFillBuffer(key2
, sizeof(key2
), true);
2660 CacheTestFillBuffer(key3
, sizeof(key3
), true);
2661 key2
[sizeof(key2
) - 1] = '\0';
2662 key3
[sizeof(key3
) - 1] = '\0';
2663 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry2
));
2664 ASSERT_EQ(net::OK
, CreateEntry(key3
, &entry3
));
2666 const int kBufSize
= 20000;
2667 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kBufSize
));
2668 memset(buf
->data(), 0, kBufSize
);
2669 EXPECT_EQ(100, WriteData(entry2
, 0, 0, buf
.get(), 100, false));
2670 EXPECT_EQ(kBufSize
, WriteData(entry3
, 0, 0, buf
.get(), kBufSize
, false));
2672 // This line should disable the cache but not delete it.
2673 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry4
));
2674 EXPECT_EQ(0, cache_
->GetEntryCount());
2676 EXPECT_NE(net::OK
, CreateEntry("cache is disabled", &entry4
));
2678 EXPECT_EQ(100, ReadData(entry2
, 0, 0, buf
.get(), 100));
2679 EXPECT_EQ(100, WriteData(entry2
, 0, 0, buf
.get(), 100, false));
2680 EXPECT_EQ(100, WriteData(entry2
, 1, 0, buf
.get(), 100, false));
2682 EXPECT_EQ(kBufSize
, ReadData(entry3
, 0, 0, buf
.get(), kBufSize
));
2683 EXPECT_EQ(kBufSize
, WriteData(entry3
, 0, 0, buf
.get(), kBufSize
, false));
2684 EXPECT_EQ(kBufSize
, WriteData(entry3
, 1, 0, buf
.get(), kBufSize
, false));
2686 std::string key
= entry2
->GetKey();
2687 EXPECT_EQ(sizeof(key2
) - 1, key
.size());
2688 key
= entry3
->GetKey();
2689 EXPECT_EQ(sizeof(key3
) - 1, key
.size());
2694 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2695 FlushQueueForTest(); // This one actually allows that task to complete.
2697 EXPECT_EQ(0, cache_
->GetEntryCount());
2700 TEST_F(DiskCacheBackendTest
, DisableSuccess4
) {
2701 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2702 DisableFirstCleanup();
2707 TEST_F(DiskCacheBackendTest
, NewEvictionDisableSuccess4
) {
2708 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2709 DisableFirstCleanup();
2715 TEST_F(DiskCacheTest
, Backend_UsageStatsTimer
) {
2716 MessageLoopHelper helper
;
2718 ASSERT_TRUE(CleanupCacheDir());
2719 scoped_ptr
<disk_cache::BackendImpl
> cache
;
2720 cache
.reset(new disk_cache::BackendImpl(
2721 cache_path_
, base::MessageLoopProxy::current().get(), NULL
));
2722 ASSERT_TRUE(NULL
!= cache
.get());
2723 cache
->SetUnitTestMode();
2724 ASSERT_EQ(net::OK
, cache
->SyncInit());
2726 // Wait for a callback that never comes... about 2 secs :). The message loop
2727 // has to run to allow invocation of the usage timer.
2728 helper
.WaitUntilCacheIoFinished(1);
2731 TEST_F(DiskCacheBackendTest
, TimerNotCreated
) {
2732 ASSERT_TRUE(CopyTestCache("wrong_version"));
2734 scoped_ptr
<disk_cache::BackendImpl
> cache
;
2735 cache
.reset(new disk_cache::BackendImpl(
2736 cache_path_
, base::MessageLoopProxy::current().get(), NULL
));
2737 ASSERT_TRUE(NULL
!= cache
.get());
2738 cache
->SetUnitTestMode();
2739 ASSERT_NE(net::OK
, cache
->SyncInit());
2741 ASSERT_TRUE(NULL
== cache
->GetTimerForTest());
2743 DisableIntegrityCheck();
2746 TEST_F(DiskCacheBackendTest
, Backend_UsageStats
) {
2748 disk_cache::Entry
* entry
;
2749 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
2751 FlushQueueForTest();
2753 disk_cache::StatsItems stats
;
2754 cache_
->GetStats(&stats
);
2755 EXPECT_FALSE(stats
.empty());
2757 disk_cache::StatsItems::value_type
hits("Create hit", "0x1");
2758 EXPECT_EQ(1, std::count(stats
.begin(), stats
.end(), hits
));
2762 // Now open the cache and verify that the stats are still there.
2763 DisableFirstCleanup();
2765 EXPECT_EQ(1, cache_
->GetEntryCount());
2768 cache_
->GetStats(&stats
);
2769 EXPECT_FALSE(stats
.empty());
2771 EXPECT_EQ(1, std::count(stats
.begin(), stats
.end(), hits
));
2774 void DiskCacheBackendTest::BackendDoomAll() {
2777 disk_cache::Entry
*entry1
, *entry2
;
2778 ASSERT_EQ(net::OK
, CreateEntry("first", &entry1
));
2779 ASSERT_EQ(net::OK
, CreateEntry("second", &entry2
));
2783 ASSERT_EQ(net::OK
, CreateEntry("third", &entry1
));
2784 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry2
));
2786 ASSERT_EQ(4, cache_
->GetEntryCount());
2787 EXPECT_EQ(net::OK
, DoomAllEntries());
2788 ASSERT_EQ(0, cache_
->GetEntryCount());
2790 // We should stop posting tasks at some point (if we post any).
2791 base::MessageLoop::current()->RunUntilIdle();
2793 disk_cache::Entry
*entry3
, *entry4
;
2794 EXPECT_NE(net::OK
, OpenEntry("third", &entry3
));
2795 ASSERT_EQ(net::OK
, CreateEntry("third", &entry3
));
2796 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry4
));
2798 EXPECT_EQ(net::OK
, DoomAllEntries());
2799 ASSERT_EQ(0, cache_
->GetEntryCount());
2803 entry3
->Doom(); // The entry should be already doomed, but this must work.
2807 // Now try with all references released.
2808 ASSERT_EQ(net::OK
, CreateEntry("third", &entry1
));
2809 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry2
));
2813 ASSERT_EQ(2, cache_
->GetEntryCount());
2814 EXPECT_EQ(net::OK
, DoomAllEntries());
2815 ASSERT_EQ(0, cache_
->GetEntryCount());
2817 EXPECT_EQ(net::OK
, DoomAllEntries());
2820 TEST_F(DiskCacheBackendTest
, DoomAll
) {
2824 TEST_F(DiskCacheBackendTest
, NewEvictionDoomAll
) {
2829 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomAll
) {
2830 SetMemoryOnlyMode();
2834 TEST_F(DiskCacheBackendTest
, AppCacheOnlyDoomAll
) {
2835 SetCacheType(net::APP_CACHE
);
2839 TEST_F(DiskCacheBackendTest
, ShaderCacheOnlyDoomAll
) {
2840 SetCacheType(net::SHADER_CACHE
);
2844 // If the index size changes when we doom the cache, we should not crash.
2845 void DiskCacheBackendTest::BackendDoomAll2() {
2846 EXPECT_EQ(2, cache_
->GetEntryCount());
2847 EXPECT_EQ(net::OK
, DoomAllEntries());
2849 disk_cache::Entry
* entry
;
2850 ASSERT_EQ(net::OK
, CreateEntry("Something new", &entry
));
2853 EXPECT_EQ(1, cache_
->GetEntryCount());
2856 TEST_F(DiskCacheBackendTest
, DoomAll2
) {
2857 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2858 DisableFirstCleanup();
2859 SetMaxSize(20 * 1024 * 1024);
2864 TEST_F(DiskCacheBackendTest
, NewEvictionDoomAll2
) {
2865 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2866 DisableFirstCleanup();
2867 SetMaxSize(20 * 1024 * 1024);
2873 // We should be able to create the same entry on multiple simultaneous instances
2875 TEST_F(DiskCacheTest
, MultipleInstances
) {
2876 base::ScopedTempDir store1
, store2
;
2877 ASSERT_TRUE(store1
.CreateUniqueTempDir());
2878 ASSERT_TRUE(store2
.CreateUniqueTempDir());
2880 base::Thread
cache_thread("CacheThread");
2881 ASSERT_TRUE(cache_thread
.StartWithOptions(
2882 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
2883 net::TestCompletionCallback cb
;
2885 const int kNumberOfCaches
= 2;
2886 scoped_ptr
<disk_cache::Backend
> cache
[kNumberOfCaches
];
2889 disk_cache::CreateCacheBackend(net::DISK_CACHE
,
2890 net::CACHE_BACKEND_DEFAULT
,
2894 cache_thread
.message_loop_proxy().get(),
2898 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
2899 rv
= disk_cache::CreateCacheBackend(net::MEDIA_CACHE
,
2900 net::CACHE_BACKEND_DEFAULT
,
2904 cache_thread
.message_loop_proxy().get(),
2908 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
2910 ASSERT_TRUE(cache
[0].get() != NULL
&& cache
[1].get() != NULL
);
2912 std::string
key("the first key");
2913 disk_cache::Entry
* entry
;
2914 for (int i
= 0; i
< kNumberOfCaches
; i
++) {
2915 rv
= cache
[i
]->CreateEntry(key
, &entry
, cb
.callback());
2916 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
2921 // Test the six regions of the curve that determines the max cache size.
2922 TEST_F(DiskCacheTest
, AutomaticMaxSize
) {
2923 using disk_cache::kDefaultCacheSize
;
2924 int64 large_size
= kDefaultCacheSize
;
2926 // Region 1: expected = available * 0.8
2927 EXPECT_EQ((kDefaultCacheSize
- 1) * 8 / 10,
2928 disk_cache::PreferredCacheSize(large_size
- 1));
2929 EXPECT_EQ(kDefaultCacheSize
* 8 / 10,
2930 disk_cache::PreferredCacheSize(large_size
));
2931 EXPECT_EQ(kDefaultCacheSize
- 1,
2932 disk_cache::PreferredCacheSize(large_size
* 10 / 8 - 1));
2934 // Region 2: expected = default_size
2935 EXPECT_EQ(kDefaultCacheSize
,
2936 disk_cache::PreferredCacheSize(large_size
* 10 / 8));
2937 EXPECT_EQ(kDefaultCacheSize
,
2938 disk_cache::PreferredCacheSize(large_size
* 10 - 1));
2940 // Region 3: expected = available * 0.1
2941 EXPECT_EQ(kDefaultCacheSize
,
2942 disk_cache::PreferredCacheSize(large_size
* 10));
2943 EXPECT_EQ((kDefaultCacheSize
* 25 - 1) / 10,
2944 disk_cache::PreferredCacheSize(large_size
* 25 - 1));
2946 // Region 4: expected = default_size * 2.5
2947 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2948 disk_cache::PreferredCacheSize(large_size
* 25));
2949 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2950 disk_cache::PreferredCacheSize(large_size
* 100 - 1));
2951 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2952 disk_cache::PreferredCacheSize(large_size
* 100));
2953 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2954 disk_cache::PreferredCacheSize(large_size
* 250 - 1));
2956 // Region 5: expected = available * 0.1
2957 int64 largest_size
= kDefaultCacheSize
* 4;
2958 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2959 disk_cache::PreferredCacheSize(large_size
* 250));
2960 EXPECT_EQ(largest_size
- 1,
2961 disk_cache::PreferredCacheSize(largest_size
* 100 - 1));
2963 // Region 6: expected = largest possible size
2964 EXPECT_EQ(largest_size
,
2965 disk_cache::PreferredCacheSize(largest_size
* 100));
2966 EXPECT_EQ(largest_size
,
2967 disk_cache::PreferredCacheSize(largest_size
* 10000));
2970 // Tests that we can "migrate" a running instance from one experiment group to
2972 TEST_F(DiskCacheBackendTest
, Histograms
) {
2974 disk_cache::BackendImpl
* backend_
= cache_impl_
; // Needed be the macro.
2976 for (int i
= 1; i
< 3; i
++) {
2977 CACHE_UMA(HOURS
, "FillupTime", i
, 28);
2981 // Make sure that we keep the total memory used by the internal buffers under
2983 TEST_F(DiskCacheBackendTest
, TotalBuffersSize1
) {
2985 std::string
key("the first key");
2986 disk_cache::Entry
* entry
;
2987 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
2989 const int kSize
= 200;
2990 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
2991 CacheTestFillBuffer(buffer
->data(), kSize
, true);
2993 for (int i
= 0; i
< 10; i
++) {
2995 // Allocate 2MB for this entry.
2996 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, true));
2997 EXPECT_EQ(kSize
, WriteData(entry
, 1, 0, buffer
.get(), kSize
, true));
2999 WriteData(entry
, 0, 1024 * 1024, buffer
.get(), kSize
, false));
3001 WriteData(entry
, 1, 1024 * 1024, buffer
.get(), kSize
, false));
3003 // Delete one of the buffers and truncate the other.
3004 EXPECT_EQ(0, WriteData(entry
, 0, 0, buffer
.get(), 0, true));
3005 EXPECT_EQ(0, WriteData(entry
, 1, 10, buffer
.get(), 0, true));
3007 // Delete the second buffer, writing 10 bytes to disk.
3009 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3013 EXPECT_EQ(0, cache_impl_
->GetTotalBuffersSize());
3016 // This test assumes at least 150MB of system memory.
3017 TEST_F(DiskCacheBackendTest
, TotalBuffersSize2
) {
3020 const int kOneMB
= 1024 * 1024;
3021 EXPECT_TRUE(cache_impl_
->IsAllocAllowed(0, kOneMB
));
3022 EXPECT_EQ(kOneMB
, cache_impl_
->GetTotalBuffersSize());
3024 EXPECT_TRUE(cache_impl_
->IsAllocAllowed(0, kOneMB
));
3025 EXPECT_EQ(kOneMB
* 2, cache_impl_
->GetTotalBuffersSize());
3027 EXPECT_TRUE(cache_impl_
->IsAllocAllowed(0, kOneMB
));
3028 EXPECT_EQ(kOneMB
* 3, cache_impl_
->GetTotalBuffersSize());
3030 cache_impl_
->BufferDeleted(kOneMB
);
3031 EXPECT_EQ(kOneMB
* 2, cache_impl_
->GetTotalBuffersSize());
3033 // Check the upper limit.
3034 EXPECT_FALSE(cache_impl_
->IsAllocAllowed(0, 30 * kOneMB
));
3036 for (int i
= 0; i
< 30; i
++)
3037 cache_impl_
->IsAllocAllowed(0, kOneMB
); // Ignore the result.
3039 EXPECT_FALSE(cache_impl_
->IsAllocAllowed(0, kOneMB
));
3042 // Tests that sharing of external files works and we are able to delete the
3043 // files when we need to.
3044 TEST_F(DiskCacheBackendTest
, FileSharing
) {
3047 disk_cache::Addr
address(0x80000001);
3048 ASSERT_TRUE(cache_impl_
->CreateExternalFile(&address
));
3049 base::FilePath name
= cache_impl_
->GetFileName(address
);
3051 scoped_refptr
<disk_cache::File
> file(new disk_cache::File(false));
3055 DWORD sharing
= FILE_SHARE_READ
| FILE_SHARE_WRITE
;
3056 DWORD access
= GENERIC_READ
| GENERIC_WRITE
;
3057 base::win::ScopedHandle
file2(CreateFile(
3058 name
.value().c_str(), access
, sharing
, NULL
, OPEN_EXISTING
, 0, NULL
));
3059 EXPECT_FALSE(file2
.IsValid());
3061 sharing
|= FILE_SHARE_DELETE
;
3062 file2
.Set(CreateFile(name
.value().c_str(), access
, sharing
, NULL
,
3063 OPEN_EXISTING
, 0, NULL
));
3064 EXPECT_TRUE(file2
.IsValid());
3067 EXPECT_TRUE(base::DeleteFile(name
, false));
3069 // We should be able to use the file.
3070 const int kSize
= 200;
3071 char buffer1
[kSize
];
3072 char buffer2
[kSize
];
3073 memset(buffer1
, 't', kSize
);
3074 memset(buffer2
, 0, kSize
);
3075 EXPECT_TRUE(file
->Write(buffer1
, kSize
, 0));
3076 EXPECT_TRUE(file
->Read(buffer2
, kSize
, 0));
3077 EXPECT_EQ(0, memcmp(buffer1
, buffer2
, kSize
));
3079 EXPECT_TRUE(disk_cache::DeleteCacheFile(name
));
3082 TEST_F(DiskCacheBackendTest
, UpdateRankForExternalCacheHit
) {
3085 disk_cache::Entry
* entry
;
3087 for (int i
= 0; i
< 2; ++i
) {
3088 std::string key
= base::StringPrintf("key%d", i
);
3089 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3093 // Ping the oldest entry.
3094 cache_
->OnExternalCacheHit("key0");
3098 // Make sure the older key remains.
3099 EXPECT_EQ(1, cache_
->GetEntryCount());
3100 ASSERT_EQ(net::OK
, OpenEntry("key0", &entry
));
3104 TEST_F(DiskCacheBackendTest
, ShaderCacheUpdateRankForExternalCacheHit
) {
3105 SetCacheType(net::SHADER_CACHE
);
3108 disk_cache::Entry
* entry
;
3110 for (int i
= 0; i
< 2; ++i
) {
3111 std::string key
= base::StringPrintf("key%d", i
);
3112 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3116 // Ping the oldest entry.
3117 cache_
->OnExternalCacheHit("key0");
3121 // Make sure the older key remains.
3122 EXPECT_EQ(1, cache_
->GetEntryCount());
3123 ASSERT_EQ(net::OK
, OpenEntry("key0", &entry
));
3127 void DiskCacheBackendTest::TracingBackendBasics() {
3129 cache_
.reset(new disk_cache::TracingCacheBackend(cache_
.Pass()));
3131 EXPECT_EQ(net::DISK_CACHE
, cache_
->GetCacheType());
3132 if (!simple_cache_mode_
) {
3133 EXPECT_EQ(0, cache_
->GetEntryCount());
3136 net::TestCompletionCallback cb
;
3137 disk_cache::Entry
* entry
= NULL
;
3138 EXPECT_NE(net::OK
, OpenEntry("key", &entry
));
3139 EXPECT_TRUE(NULL
== entry
);
3141 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
3142 EXPECT_TRUE(NULL
!= entry
);
3144 disk_cache::Entry
* same_entry
= NULL
;
3145 ASSERT_EQ(net::OK
, OpenEntry("key", &same_entry
));
3146 EXPECT_TRUE(NULL
!= same_entry
);
3148 if (!simple_cache_mode_
) {
3149 EXPECT_EQ(1, cache_
->GetEntryCount());
3153 same_entry
->Close();
3157 TEST_F(DiskCacheBackendTest
, TracingBackendBasics
) {
3158 TracingBackendBasics();
3161 // The Simple Cache backend requires a few guarantees from the filesystem like
3162 // atomic renaming of recently open files. Those guarantees are not provided in
3163 // general on Windows.
3164 #if defined(OS_POSIX)
3166 TEST_F(DiskCacheBackendTest
, SimpleCacheShutdownWithPendingCreate
) {
3167 SetCacheType(net::APP_CACHE
);
3168 SetSimpleCacheMode();
3169 BackendShutdownWithPendingCreate(false);
3172 TEST_F(DiskCacheBackendTest
, SimpleCacheShutdownWithPendingFileIO
) {
3173 SetCacheType(net::APP_CACHE
);
3174 SetSimpleCacheMode();
3175 BackendShutdownWithPendingFileIO(false);
3178 TEST_F(DiskCacheBackendTest
, SimpleCacheBasics
) {
3179 SetSimpleCacheMode();
3183 TEST_F(DiskCacheBackendTest
, SimpleCacheAppCacheBasics
) {
3184 SetCacheType(net::APP_CACHE
);
3185 SetSimpleCacheMode();
3189 TEST_F(DiskCacheBackendTest
, SimpleCacheKeying
) {
3190 SetSimpleCacheMode();
3194 TEST_F(DiskCacheBackendTest
, SimpleCacheAppCacheKeying
) {
3195 SetSimpleCacheMode();
3196 SetCacheType(net::APP_CACHE
);
3200 TEST_F(DiskCacheBackendTest
, DISABLED_SimpleCacheSetSize
) {
3201 SetSimpleCacheMode();
3205 // MacOS has a default open file limit of 256 files, which is incompatible with
3206 // this simple cache test.
3207 #if defined(OS_MACOSX)
3208 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3210 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3213 TEST_F(DiskCacheBackendTest
, SIMPLE_MAYBE_MACOS(SimpleCacheLoad
)) {
3214 SetMaxSize(0x100000);
3215 SetSimpleCacheMode();
3219 TEST_F(DiskCacheBackendTest
, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad
)) {
3220 SetCacheType(net::APP_CACHE
);
3221 SetSimpleCacheMode();
3222 SetMaxSize(0x100000);
3226 TEST_F(DiskCacheBackendTest
, SimpleDoomRecent
) {
3227 SetSimpleCacheMode();
3228 BackendDoomRecent();
3231 TEST_F(DiskCacheBackendTest
, SimpleDoomBetween
) {
3232 SetSimpleCacheMode();
3233 BackendDoomBetween();
3236 TEST_F(DiskCacheBackendTest
, SimpleCacheDoomAll
) {
3237 SetSimpleCacheMode();
3241 TEST_F(DiskCacheBackendTest
, SimpleCacheAppCacheOnlyDoomAll
) {
3242 SetCacheType(net::APP_CACHE
);
3243 SetSimpleCacheMode();
3247 TEST_F(DiskCacheBackendTest
, SimpleCacheTracingBackendBasics
) {
3248 SetSimpleCacheMode();
3249 TracingBackendBasics();
3250 // TODO(pasko): implement integrity checking on the Simple Backend.
3251 DisableIntegrityCheck();
3254 TEST_F(DiskCacheBackendTest
, SimpleCacheOpenMissingFile
) {
3255 SetSimpleCacheMode();
3258 const char* key
= "the first key";
3259 disk_cache::Entry
* entry
= NULL
;
3261 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3262 ASSERT_TRUE(entry
!= NULL
);
3266 // To make sure the file creation completed we need to call open again so that
3267 // we block until it actually created the files.
3268 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3269 ASSERT_TRUE(entry
!= NULL
);
3273 // Delete one of the files in the entry.
3274 base::FilePath to_delete_file
= cache_path_
.AppendASCII(
3275 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, 0));
3276 EXPECT_TRUE(base::PathExists(to_delete_file
));
3277 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file
));
3279 // Failing to open the entry should delete the rest of these files.
3280 ASSERT_EQ(net::ERR_FAILED
, OpenEntry(key
, &entry
));
3282 // Confirm the rest of the files are gone.
3283 for (int i
= 1; i
< disk_cache::kSimpleEntryFileCount
; ++i
) {
3284 base::FilePath
should_be_gone_file(cache_path_
.AppendASCII(
3285 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, i
)));
3286 EXPECT_FALSE(base::PathExists(should_be_gone_file
));
3290 TEST_F(DiskCacheBackendTest
, SimpleCacheOpenBadFile
) {
3291 SetSimpleCacheMode();
3294 const char* key
= "the first key";
3295 disk_cache::Entry
* entry
= NULL
;
3297 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3298 disk_cache::Entry
* null
= NULL
;
3299 ASSERT_NE(null
, entry
);
3303 // To make sure the file creation completed we need to call open again so that
3304 // we block until it actually created the files.
3305 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3306 ASSERT_NE(null
, entry
);
3310 // Write an invalid header for stream 0 and stream 1.
3311 base::FilePath entry_file1_path
= cache_path_
.AppendASCII(
3312 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, 0));
3314 disk_cache::SimpleFileHeader header
;
3315 header
.initial_magic_number
= GG_UINT64_C(0xbadf00d);
3317 implicit_cast
<int>(sizeof(header
)),
3318 file_util::WriteFile(entry_file1_path
, reinterpret_cast<char*>(&header
),
3320 ASSERT_EQ(net::ERR_FAILED
, OpenEntry(key
, &entry
));
3323 // Tests that the Simple Cache Backend fails to initialize with non-matching
3324 // file structure on disk.
3325 TEST_F(DiskCacheBackendTest
, SimpleCacheOverBlockfileCache
) {
3326 // Create a cache structure with the |BackendImpl|.
3328 disk_cache::Entry
* entry
;
3329 const int kSize
= 50;
3330 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3331 CacheTestFillBuffer(buffer
->data(), kSize
, false);
3332 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
3333 ASSERT_EQ(0, WriteData(entry
, 0, 0, buffer
.get(), 0, false));
3337 // Check that the |SimpleBackendImpl| does not favor this structure.
3338 base::Thread
cache_thread("CacheThread");
3339 ASSERT_TRUE(cache_thread
.StartWithOptions(
3340 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
3341 disk_cache::SimpleBackendImpl
* simple_cache
=
3342 new disk_cache::SimpleBackendImpl(cache_path_
,
3345 cache_thread
.message_loop_proxy().get(),
3347 net::TestCompletionCallback cb
;
3348 int rv
= simple_cache
->Init(cb
.callback());
3349 EXPECT_NE(net::OK
, cb
.GetResult(rv
));
3350 delete simple_cache
;
3351 DisableIntegrityCheck();
3354 // Tests that the |BackendImpl| refuses to initialize on top of the files
3355 // generated by the Simple Cache Backend.
3356 TEST_F(DiskCacheBackendTest
, BlockfileCacheOverSimpleCache
) {
3357 // Create a cache structure with the |SimpleBackendImpl|.
3358 SetSimpleCacheMode();
3360 disk_cache::Entry
* entry
;
3361 const int kSize
= 50;
3362 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3363 CacheTestFillBuffer(buffer
->data(), kSize
, false);
3364 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
3365 ASSERT_EQ(0, WriteData(entry
, 0, 0, buffer
.get(), 0, false));
3369 // Check that the |BackendImpl| does not favor this structure.
3370 base::Thread
cache_thread("CacheThread");
3371 ASSERT_TRUE(cache_thread
.StartWithOptions(
3372 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
3373 disk_cache::BackendImpl
* cache
= new disk_cache::BackendImpl(
3374 cache_path_
, base::MessageLoopProxy::current().get(), NULL
);
3375 cache
->SetUnitTestMode();
3376 net::TestCompletionCallback cb
;
3377 int rv
= cache
->Init(cb
.callback());
3378 EXPECT_NE(net::OK
, cb
.GetResult(rv
));
3380 DisableIntegrityCheck();
3383 TEST_F(DiskCacheBackendTest
, SimpleCacheFixEnumerators
) {
3384 SetSimpleCacheMode();
3385 BackendFixEnumerators();
3388 // Tests basic functionality of the SimpleBackend implementation of the
3390 TEST_F(DiskCacheBackendTest
, SimpleCacheEnumerationBasics
) {
3391 SetSimpleCacheMode();
3393 std::set
<std::string
> key_pool
;
3394 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool
));
3396 // Check that enumeration returns all entries.
3397 std::set
<std::string
> keys_to_match(key_pool
);
3400 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter
, &keys_to_match
, &count
));
3401 cache_
->EndEnumeration(&iter
);
3402 EXPECT_EQ(key_pool
.size(), count
);
3403 EXPECT_TRUE(keys_to_match
.empty());
3405 // Check that opening entries does not affect enumeration.
3406 keys_to_match
= key_pool
;
3409 disk_cache::Entry
* entry_opened_before
;
3410 ASSERT_EQ(net::OK
, OpenEntry(*(key_pool
.begin()), &entry_opened_before
));
3411 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool
.size()/2,
3416 disk_cache::Entry
* entry_opened_middle
;
3418 OpenEntry(*(keys_to_match
.begin()), &entry_opened_middle
));
3419 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter
, &keys_to_match
, &count
));
3420 cache_
->EndEnumeration(&iter
);
3421 entry_opened_before
->Close();
3422 entry_opened_middle
->Close();
3424 EXPECT_EQ(key_pool
.size(), count
);
3425 EXPECT_TRUE(keys_to_match
.empty());
3428 // Tests that the enumerations are not affected by dooming an entry in the
3430 TEST_F(DiskCacheBackendTest
, SimpleCacheEnumerationWhileDoomed
) {
3431 SetSimpleCacheMode();
3433 std::set
<std::string
> key_pool
;
3434 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool
));
3436 // Check that enumeration returns all entries but the doomed one.
3437 std::set
<std::string
> keys_to_match(key_pool
);
3440 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool
.size()/2,
3445 std::string key_to_delete
= *(keys_to_match
.begin());
3446 DoomEntry(key_to_delete
);
3447 keys_to_match
.erase(key_to_delete
);
3448 key_pool
.erase(key_to_delete
);
3449 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter
, &keys_to_match
, &count
));
3450 cache_
->EndEnumeration(&iter
);
3452 EXPECT_EQ(key_pool
.size(), count
);
3453 EXPECT_TRUE(keys_to_match
.empty());
3456 // Tests that enumerations are not affected by corrupt files.
3457 TEST_F(DiskCacheBackendTest
, SimpleCacheEnumerationCorruption
) {
3458 SetSimpleCacheMode();
3460 std::set
<std::string
> key_pool
;
3461 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool
));
3463 // Create a corrupt entry. The write/read sequence ensures that the entry will
3464 // have been created before corrupting the platform files, in the case of
3465 // optimistic operations.
3466 const std::string key
= "the key";
3467 disk_cache::Entry
* corrupted_entry
;
3469 ASSERT_EQ(net::OK
, CreateEntry(key
, &corrupted_entry
));
3470 ASSERT_TRUE(corrupted_entry
);
3471 const int kSize
= 50;
3472 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3473 CacheTestFillBuffer(buffer
->data(), kSize
, false);
3475 WriteData(corrupted_entry
, 0, 0, buffer
.get(), kSize
, false));
3476 ASSERT_EQ(kSize
, ReadData(corrupted_entry
, 0, 0, buffer
.get(), kSize
));
3477 corrupted_entry
->Close();
3479 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3481 EXPECT_EQ(key_pool
.size() + 1,
3482 implicit_cast
<size_t>(cache_
->GetEntryCount()));
3484 // Check that enumeration returns all entries but the corrupt one.
3485 std::set
<std::string
> keys_to_match(key_pool
);
3488 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter
, &keys_to_match
, &count
));
3489 cache_
->EndEnumeration(&iter
);
3491 EXPECT_EQ(key_pool
.size(), count
);
3492 EXPECT_TRUE(keys_to_match
.empty());
3495 #endif // defined(OS_POSIX)