1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/basictypes.h"
6 #include "base/file_util.h"
7 #include "base/metrics/field_trial.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
12 #include "base/threading/platform_thread.h"
13 #include "base/threading/thread_restrictions.h"
14 #include "net/base/cache_type.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/net_errors.h"
17 #include "net/base/test_completion_callback.h"
18 #include "net/disk_cache/blockfile/backend_impl.h"
19 #include "net/disk_cache/blockfile/entry_impl.h"
20 #include "net/disk_cache/blockfile/experiments.h"
21 #include "net/disk_cache/blockfile/histogram_macros.h"
22 #include "net/disk_cache/blockfile/mapped_file.h"
23 #include "net/disk_cache/cache_util.h"
24 #include "net/disk_cache/disk_cache_test_base.h"
25 #include "net/disk_cache/disk_cache_test_util.h"
26 #include "net/disk_cache/memory/mem_backend_impl.h"
27 #include "net/disk_cache/simple/simple_backend_impl.h"
28 #include "net/disk_cache/simple/simple_entry_format.h"
29 #include "net/disk_cache/simple/simple_test_util.h"
30 #include "net/disk_cache/simple/simple_util.h"
31 #include "net/disk_cache/tracing/tracing_cache_backend.h"
32 #include "testing/gtest/include/gtest/gtest.h"
35 #include "base/win/scoped_handle.h"
38 // Provide a BackendImpl object to macros from histogram_macros.h.
39 #define CACHE_UMA_BACKEND_IMPL_OBJ backend_
45 const char kExistingEntryKey
[] = "existing entry key";
47 scoped_ptr
<disk_cache::BackendImpl
> CreateExistingEntryCache(
48 const base::Thread
& cache_thread
,
49 base::FilePath
& cache_path
) {
50 net::TestCompletionCallback cb
;
52 scoped_ptr
<disk_cache::BackendImpl
> cache(new disk_cache::BackendImpl(
53 cache_path
, cache_thread
.message_loop_proxy(), NULL
));
54 int rv
= cache
->Init(cb
.callback());
55 if (cb
.GetResult(rv
) != net::OK
)
56 return scoped_ptr
<disk_cache::BackendImpl
>();
58 disk_cache::Entry
* entry
= NULL
;
59 rv
= cache
->CreateEntry(kExistingEntryKey
, &entry
, cb
.callback());
60 if (cb
.GetResult(rv
) != net::OK
)
61 return scoped_ptr
<disk_cache::BackendImpl
>();
69 // Tests that can run with different types of caches.
70 class DiskCacheBackendTest
: public DiskCacheTestWithCache
{
72 // Some utility methods:
74 // Perform IO operations on the cache until there is pending IO.
75 int GeneratePendingIO(net::TestCompletionCallback
* cb
);
77 // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
78 // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
79 // There are 4 entries after doomed_start and 2 after doomed_end.
80 void InitSparseCache(base::Time
* doomed_start
, base::Time
* doomed_end
);
82 bool CreateSetOfRandomEntries(std::set
<std::string
>* key_pool
);
83 bool EnumerateAndMatchKeys(int max_to_open
,
85 std::set
<std::string
>* keys_to_match
,
91 void BackendShutdownWithPendingFileIO(bool fast
);
92 void BackendShutdownWithPendingIO(bool fast
);
93 void BackendShutdownWithPendingCreate(bool fast
);
94 void BackendSetSize();
97 void BackendValidEntry();
98 void BackendInvalidEntry();
99 void BackendInvalidEntryRead();
100 void BackendInvalidEntryWithLoad();
101 void BackendTrimInvalidEntry();
102 void BackendTrimInvalidEntry2();
103 void BackendEnumerations();
104 void BackendEnumerations2();
105 void BackendInvalidEntryEnumeration();
106 void BackendFixEnumerators();
107 void BackendDoomRecent();
108 void BackendDoomBetween();
109 void BackendTransaction(const std::string
& name
, int num_entries
, bool load
);
110 void BackendRecoverInsert();
111 void BackendRecoverRemove();
112 void BackendRecoverWithEviction();
113 void BackendInvalidEntry2();
114 void BackendInvalidEntry3();
115 void BackendInvalidEntry7();
116 void BackendInvalidEntry8();
117 void BackendInvalidEntry9(bool eviction
);
118 void BackendInvalidEntry10(bool eviction
);
119 void BackendInvalidEntry11(bool eviction
);
120 void BackendTrimInvalidEntry12();
121 void BackendDoomAll();
122 void BackendDoomAll2();
123 void BackendInvalidRankings();
124 void BackendInvalidRankings2();
125 void BackendDisable();
126 void BackendDisable2();
127 void BackendDisable3();
128 void BackendDisable4();
129 void TracingBackendBasics();
132 int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback
* cb
) {
133 if (!use_current_thread_
) {
135 return net::ERR_FAILED
;
138 disk_cache::Entry
* entry
;
139 int rv
= cache_
->CreateEntry("some key", &entry
, cb
->callback());
140 if (cb
->GetResult(rv
) != net::OK
)
141 return net::ERR_CACHE_CREATE_FAILURE
;
143 const int kSize
= 25000;
144 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
145 CacheTestFillBuffer(buffer
->data(), kSize
, false);
147 for (int i
= 0; i
< 10 * 1024 * 1024; i
+= 64 * 1024) {
148 // We are using the current thread as the cache thread because we want to
149 // be able to call directly this method to make sure that the OS (instead
150 // of us switching thread) is returning IO pending.
151 if (!simple_cache_mode_
) {
152 rv
= static_cast<disk_cache::EntryImpl
*>(entry
)->WriteDataImpl(
153 0, i
, buffer
.get(), kSize
, cb
->callback(), false);
155 rv
= entry
->WriteData(0, i
, buffer
.get(), kSize
, cb
->callback(), false);
158 if (rv
== net::ERR_IO_PENDING
)
161 rv
= net::ERR_FAILED
;
164 // Don't call Close() to avoid going through the queue or we'll deadlock
165 // waiting for the operation to finish.
166 if (!simple_cache_mode_
)
167 static_cast<disk_cache::EntryImpl
*>(entry
)->Release();
174 void DiskCacheBackendTest::InitSparseCache(base::Time
* doomed_start
,
175 base::Time
* doomed_end
) {
178 const int kSize
= 50;
179 // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
180 const int kOffset
= 10 + 1024 * 1024;
182 disk_cache::Entry
* entry0
= NULL
;
183 disk_cache::Entry
* entry1
= NULL
;
184 disk_cache::Entry
* entry2
= NULL
;
186 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
187 CacheTestFillBuffer(buffer
->data(), kSize
, false);
189 ASSERT_EQ(net::OK
, CreateEntry("zeroth", &entry0
));
190 ASSERT_EQ(kSize
, WriteSparseData(entry0
, 0, buffer
.get(), kSize
));
192 WriteSparseData(entry0
, kOffset
+ kSize
, buffer
.get(), kSize
));
198 *doomed_start
= base::Time::Now();
200 // Order in rankings list:
201 // first_part1, first_part2, second_part1, second_part2
202 ASSERT_EQ(net::OK
, CreateEntry("first", &entry1
));
203 ASSERT_EQ(kSize
, WriteSparseData(entry1
, 0, buffer
.get(), kSize
));
205 WriteSparseData(entry1
, kOffset
+ kSize
, buffer
.get(), kSize
));
208 ASSERT_EQ(net::OK
, CreateEntry("second", &entry2
));
209 ASSERT_EQ(kSize
, WriteSparseData(entry2
, 0, buffer
.get(), kSize
));
211 WriteSparseData(entry2
, kOffset
+ kSize
, buffer
.get(), kSize
));
217 *doomed_end
= base::Time::Now();
219 // Order in rankings list:
220 // third_part1, fourth_part1, third_part2, fourth_part2
221 disk_cache::Entry
* entry3
= NULL
;
222 disk_cache::Entry
* entry4
= NULL
;
223 ASSERT_EQ(net::OK
, CreateEntry("third", &entry3
));
224 ASSERT_EQ(kSize
, WriteSparseData(entry3
, 0, buffer
.get(), kSize
));
225 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry4
));
226 ASSERT_EQ(kSize
, WriteSparseData(entry4
, 0, buffer
.get(), kSize
));
228 WriteSparseData(entry3
, kOffset
+ kSize
, buffer
.get(), kSize
));
230 WriteSparseData(entry4
, kOffset
+ kSize
, buffer
.get(), kSize
));
238 // Creates entries based on random keys. Stores these keys in |key_pool|.
239 bool DiskCacheBackendTest::CreateSetOfRandomEntries(
240 std::set
<std::string
>* key_pool
) {
241 const int kNumEntries
= 10;
243 for (int i
= 0; i
< kNumEntries
; ++i
) {
244 std::string key
= GenerateKey(true);
245 disk_cache::Entry
* entry
;
246 if (CreateEntry(key
, &entry
) != net::OK
)
248 key_pool
->insert(key
);
251 return key_pool
->size() == implicit_cast
<size_t>(cache_
->GetEntryCount());
254 // Performs iteration over the backend and checks that the keys of entries
255 // opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
256 // will be opened, if it is positive. Otherwise, iteration will continue until
257 // OpenNextEntry stops returning net::OK.
258 bool DiskCacheBackendTest::EnumerateAndMatchKeys(
261 std::set
<std::string
>* keys_to_match
,
263 disk_cache::Entry
* entry
;
265 while (OpenNextEntry(iter
, &entry
) == net::OK
) {
268 EXPECT_EQ(1U, keys_to_match
->erase(entry
->GetKey()));
271 if (max_to_open
>= 0 && implicit_cast
<int>(*count
) >= max_to_open
)
278 void DiskCacheBackendTest::BackendBasics() {
280 disk_cache::Entry
*entry1
= NULL
, *entry2
= NULL
;
281 EXPECT_NE(net::OK
, OpenEntry("the first key", &entry1
));
282 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry1
));
283 ASSERT_TRUE(NULL
!= entry1
);
287 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry1
));
288 ASSERT_TRUE(NULL
!= entry1
);
292 EXPECT_NE(net::OK
, CreateEntry("the first key", &entry1
));
293 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry1
));
294 EXPECT_NE(net::OK
, OpenEntry("some other key", &entry2
));
295 ASSERT_EQ(net::OK
, CreateEntry("some other key", &entry2
));
296 ASSERT_TRUE(NULL
!= entry1
);
297 ASSERT_TRUE(NULL
!= entry2
);
298 EXPECT_EQ(2, cache_
->GetEntryCount());
300 disk_cache::Entry
* entry3
= NULL
;
301 ASSERT_EQ(net::OK
, OpenEntry("some other key", &entry3
));
302 ASSERT_TRUE(NULL
!= entry3
);
303 EXPECT_TRUE(entry2
== entry3
);
304 EXPECT_EQ(2, cache_
->GetEntryCount());
306 EXPECT_EQ(net::OK
, DoomEntry("some other key"));
307 EXPECT_EQ(1, cache_
->GetEntryCount());
312 EXPECT_EQ(net::OK
, DoomEntry("the first key"));
313 EXPECT_EQ(0, cache_
->GetEntryCount());
315 ASSERT_EQ(net::OK
, CreateEntry("the first key", &entry1
));
316 ASSERT_EQ(net::OK
, CreateEntry("some other key", &entry2
));
319 EXPECT_EQ(net::OK
, DoomEntry("some other key"));
320 EXPECT_EQ(0, cache_
->GetEntryCount());
324 TEST_F(DiskCacheBackendTest
, Basics
) {
328 TEST_F(DiskCacheBackendTest
, NewEvictionBasics
) {
333 TEST_F(DiskCacheBackendTest
, MemoryOnlyBasics
) {
338 TEST_F(DiskCacheBackendTest
, AppCacheBasics
) {
339 SetCacheType(net::APP_CACHE
);
343 TEST_F(DiskCacheBackendTest
, ShaderCacheBasics
) {
344 SetCacheType(net::SHADER_CACHE
);
348 void DiskCacheBackendTest::BackendKeying() {
350 const char* kName1
= "the first key";
351 const char* kName2
= "the first Key";
352 disk_cache::Entry
*entry1
, *entry2
;
353 ASSERT_EQ(net::OK
, CreateEntry(kName1
, &entry1
));
355 ASSERT_EQ(net::OK
, CreateEntry(kName2
, &entry2
));
356 EXPECT_TRUE(entry1
!= entry2
) << "Case sensitive";
360 base::strlcpy(buffer
, kName1
, arraysize(buffer
));
361 ASSERT_EQ(net::OK
, OpenEntry(buffer
, &entry2
));
362 EXPECT_TRUE(entry1
== entry2
);
365 base::strlcpy(buffer
+ 1, kName1
, arraysize(buffer
) - 1);
366 ASSERT_EQ(net::OK
, OpenEntry(buffer
+ 1, &entry2
));
367 EXPECT_TRUE(entry1
== entry2
);
370 base::strlcpy(buffer
+ 3, kName1
, arraysize(buffer
) - 3);
371 ASSERT_EQ(net::OK
, OpenEntry(buffer
+ 3, &entry2
));
372 EXPECT_TRUE(entry1
== entry2
);
375 // Now verify long keys.
377 memset(buffer2
, 's', sizeof(buffer2
));
378 buffer2
[1023] = '\0';
379 ASSERT_EQ(net::OK
, CreateEntry(buffer2
, &entry2
)) << "key on block file";
383 buffer2
[19999] = '\0';
384 ASSERT_EQ(net::OK
, CreateEntry(buffer2
, &entry2
)) << "key on external file";
389 TEST_F(DiskCacheBackendTest
, Keying
) {
393 TEST_F(DiskCacheBackendTest
, NewEvictionKeying
) {
398 TEST_F(DiskCacheBackendTest
, MemoryOnlyKeying
) {
403 TEST_F(DiskCacheBackendTest
, AppCacheKeying
) {
404 SetCacheType(net::APP_CACHE
);
408 TEST_F(DiskCacheBackendTest
, ShaderCacheKeying
) {
409 SetCacheType(net::SHADER_CACHE
);
413 TEST_F(DiskCacheTest
, CreateBackend
) {
414 net::TestCompletionCallback cb
;
417 ASSERT_TRUE(CleanupCacheDir());
418 base::Thread
cache_thread("CacheThread");
419 ASSERT_TRUE(cache_thread
.StartWithOptions(
420 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
422 // Test the private factory method(s).
423 scoped_ptr
<disk_cache::Backend
> cache
;
424 cache
= disk_cache::MemBackendImpl::CreateBackend(0, NULL
);
425 ASSERT_TRUE(cache
.get());
428 // Now test the public API.
430 disk_cache::CreateCacheBackend(net::DISK_CACHE
,
431 net::CACHE_BACKEND_DEFAULT
,
435 cache_thread
.message_loop_proxy().get(),
439 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
440 ASSERT_TRUE(cache
.get());
443 rv
= disk_cache::CreateCacheBackend(net::MEMORY_CACHE
,
444 net::CACHE_BACKEND_DEFAULT
,
446 false, NULL
, NULL
, &cache
,
448 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
449 ASSERT_TRUE(cache
.get());
453 base::MessageLoop::current()->RunUntilIdle();
456 // Tests that |BackendImpl| fails to initialize with a missing file.
457 TEST_F(DiskCacheBackendTest
, CreateBackend_MissingFile
) {
458 ASSERT_TRUE(CopyTestCache("bad_entry"));
459 base::FilePath filename
= cache_path_
.AppendASCII("data_1");
460 base::DeleteFile(filename
, false);
461 base::Thread
cache_thread("CacheThread");
462 ASSERT_TRUE(cache_thread
.StartWithOptions(
463 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
464 net::TestCompletionCallback cb
;
466 bool prev
= base::ThreadRestrictions::SetIOAllowed(false);
467 scoped_ptr
<disk_cache::BackendImpl
> cache(new disk_cache::BackendImpl(
468 cache_path_
, cache_thread
.message_loop_proxy().get(), NULL
));
469 int rv
= cache
->Init(cb
.callback());
470 EXPECT_EQ(net::ERR_FAILED
, cb
.GetResult(rv
));
471 base::ThreadRestrictions::SetIOAllowed(prev
);
474 DisableIntegrityCheck();
477 TEST_F(DiskCacheBackendTest
, ExternalFiles
) {
479 // First, let's create a file on the folder.
480 base::FilePath filename
= cache_path_
.AppendASCII("f_000001");
482 const int kSize
= 50;
483 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
484 CacheTestFillBuffer(buffer1
->data(), kSize
, false);
485 ASSERT_EQ(kSize
, base::WriteFile(filename
, buffer1
->data(), kSize
));
487 // Now let's create a file with the cache.
488 disk_cache::Entry
* entry
;
489 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
490 ASSERT_EQ(0, WriteData(entry
, 0, 20000, buffer1
.get(), 0, false));
493 // And verify that the first file is still there.
494 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
495 ASSERT_EQ(kSize
, base::ReadFile(filename
, buffer2
->data(), kSize
));
496 EXPECT_EQ(0, memcmp(buffer1
->data(), buffer2
->data(), kSize
));
499 // Tests that we deal with file-level pending operations at destruction time.
500 void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast
) {
501 ASSERT_TRUE(CleanupCacheDir());
502 uint32 flags
= disk_cache::kNoBuffering
;
504 flags
|= disk_cache::kNoRandom
;
507 CreateBackend(flags
, NULL
);
509 net::TestCompletionCallback cb
;
510 int rv
= GeneratePendingIO(&cb
);
512 // The cache destructor will see one pending operation here.
515 if (rv
== net::ERR_IO_PENDING
) {
516 if (fast
|| simple_cache_mode_
)
517 EXPECT_FALSE(cb
.have_result());
519 EXPECT_TRUE(cb
.have_result());
522 base::MessageLoop::current()->RunUntilIdle();
525 // Wait for the actual operation to complete, or we'll keep a file handle that
526 // may cause issues later. Note that on iOS systems even though this test
527 // uses a single thread, the actual IO is posted to a worker thread and the
528 // cache destructor breaks the link to reach cb when the operation completes.
529 rv
= cb
.GetResult(rv
);
533 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingFileIO
) {
534 BackendShutdownWithPendingFileIO(false);
537 // Here and below, tests that simulate crashes are not compiled in LeakSanitizer
538 // builds because they contain a lot of intentional memory leaks.
539 // The wrapper scripts used to run tests under Valgrind Memcheck will also
540 // disable these tests. See:
541 // tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
542 #if !defined(LEAK_SANITIZER)
543 // We'll be leaking from this test.
544 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingFileIO_Fast
) {
545 // The integrity test sets kNoRandom so there's a version mismatch if we don't
546 // force new eviction.
548 BackendShutdownWithPendingFileIO(true);
552 // See crbug.com/330074
554 // Tests that one cache instance is not affected by another one going away.
555 TEST_F(DiskCacheBackendTest
, MultipleInstancesWithPendingFileIO
) {
556 base::ScopedTempDir store
;
557 ASSERT_TRUE(store
.CreateUniqueTempDir());
559 net::TestCompletionCallback cb
;
560 scoped_ptr
<disk_cache::Backend
> extra_cache
;
561 int rv
= disk_cache::CreateCacheBackend(
562 net::DISK_CACHE
, net::CACHE_BACKEND_DEFAULT
, store
.path(), 0,
563 false, base::MessageLoopProxy::current().get(), NULL
,
564 &extra_cache
, cb
.callback());
565 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
566 ASSERT_TRUE(extra_cache
.get() != NULL
);
568 ASSERT_TRUE(CleanupCacheDir());
569 SetNewEviction(); // Match the expected behavior for integrity verification.
572 CreateBackend(disk_cache::kNoBuffering
, NULL
);
573 rv
= GeneratePendingIO(&cb
);
575 // cache_ has a pending operation, and extra_cache will go away.
578 if (rv
== net::ERR_IO_PENDING
)
579 EXPECT_FALSE(cb
.have_result());
581 base::MessageLoop::current()->RunUntilIdle();
583 // Wait for the actual operation to complete, or we'll keep a file handle that
584 // may cause issues later.
585 rv
= cb
.GetResult(rv
);
589 // Tests that we deal with background-thread pending operations.
590 void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast
) {
591 net::TestCompletionCallback cb
;
594 ASSERT_TRUE(CleanupCacheDir());
595 base::Thread
cache_thread("CacheThread");
596 ASSERT_TRUE(cache_thread
.StartWithOptions(
597 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
599 uint32 flags
= disk_cache::kNoBuffering
;
601 flags
|= disk_cache::kNoRandom
;
603 CreateBackend(flags
, &cache_thread
);
605 disk_cache::Entry
* entry
;
606 int rv
= cache_
->CreateEntry("some key", &entry
, cb
.callback());
607 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
611 // The cache destructor will see one pending operation here.
615 base::MessageLoop::current()->RunUntilIdle();
618 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingIO
) {
619 BackendShutdownWithPendingIO(false);
622 #if !defined(LEAK_SANITIZER)
623 // We'll be leaking from this test.
624 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingIO_Fast
) {
625 // The integrity test sets kNoRandom so there's a version mismatch if we don't
626 // force new eviction.
628 BackendShutdownWithPendingIO(true);
632 // Tests that we deal with create-type pending operations.
633 void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast
) {
634 net::TestCompletionCallback cb
;
637 ASSERT_TRUE(CleanupCacheDir());
638 base::Thread
cache_thread("CacheThread");
639 ASSERT_TRUE(cache_thread
.StartWithOptions(
640 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
642 disk_cache::BackendFlags flags
=
643 fast
? disk_cache::kNone
: disk_cache::kNoRandom
;
644 CreateBackend(flags
, &cache_thread
);
646 disk_cache::Entry
* entry
;
647 int rv
= cache_
->CreateEntry("some key", &entry
, cb
.callback());
648 ASSERT_EQ(net::ERR_IO_PENDING
, rv
);
651 EXPECT_FALSE(cb
.have_result());
654 base::MessageLoop::current()->RunUntilIdle();
657 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingCreate
) {
658 BackendShutdownWithPendingCreate(false);
661 #if !defined(LEAK_SANITIZER)
662 // We'll be leaking an entry from this test.
663 TEST_F(DiskCacheBackendTest
, ShutdownWithPendingCreate_Fast
) {
664 // The integrity test sets kNoRandom so there's a version mismatch if we don't
665 // force new eviction.
667 BackendShutdownWithPendingCreate(true);
671 // Disabled on android since this test requires cache creator to create
673 #if !defined(OS_ANDROID)
674 TEST_F(DiskCacheTest
, TruncatedIndex
) {
675 ASSERT_TRUE(CleanupCacheDir());
676 base::FilePath index
= cache_path_
.AppendASCII("index");
677 ASSERT_EQ(5, base::WriteFile(index
, "hello", 5));
679 base::Thread
cache_thread("CacheThread");
680 ASSERT_TRUE(cache_thread
.StartWithOptions(
681 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
682 net::TestCompletionCallback cb
;
684 scoped_ptr
<disk_cache::Backend
> backend
;
686 disk_cache::CreateCacheBackend(net::DISK_CACHE
,
687 net::CACHE_BACKEND_BLOCKFILE
,
691 cache_thread
.message_loop_proxy().get(),
695 ASSERT_NE(net::OK
, cb
.GetResult(rv
));
697 ASSERT_FALSE(backend
);
701 void DiskCacheBackendTest::BackendSetSize() {
702 const int cache_size
= 0x10000; // 64 kB
703 SetMaxSize(cache_size
);
706 std::string
first("some key");
707 std::string
second("something else");
708 disk_cache::Entry
* entry
;
709 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
711 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(cache_size
));
712 memset(buffer
->data(), 0, cache_size
);
713 EXPECT_EQ(cache_size
/ 10,
714 WriteData(entry
, 0, 0, buffer
.get(), cache_size
/ 10, false))
717 EXPECT_EQ(net::ERR_FAILED
,
718 WriteData(entry
, 1, 0, buffer
.get(), cache_size
/ 5, false))
719 << "file size above the limit";
721 // By doubling the total size, we make this file cacheable.
722 SetMaxSize(cache_size
* 2);
723 EXPECT_EQ(cache_size
/ 5,
724 WriteData(entry
, 1, 0, buffer
.get(), cache_size
/ 5, false));
726 // Let's fill up the cache!.
727 SetMaxSize(cache_size
* 10);
728 EXPECT_EQ(cache_size
* 3 / 4,
729 WriteData(entry
, 0, 0, buffer
.get(), cache_size
* 3 / 4, false));
733 SetMaxSize(cache_size
);
735 // The cache is 95% full.
737 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
738 EXPECT_EQ(cache_size
/ 10,
739 WriteData(entry
, 0, 0, buffer
.get(), cache_size
/ 10, false));
741 disk_cache::Entry
* entry2
;
742 ASSERT_EQ(net::OK
, CreateEntry("an extra key", &entry2
));
743 EXPECT_EQ(cache_size
/ 10,
744 WriteData(entry2
, 0, 0, buffer
.get(), cache_size
/ 10, false));
745 entry2
->Close(); // This will trigger the cache trim.
747 EXPECT_NE(net::OK
, OpenEntry(first
, &entry2
));
749 FlushQueueForTest(); // Make sure that we are done trimming the cache.
750 FlushQueueForTest(); // We may have posted two tasks to evict stuff.
753 ASSERT_EQ(net::OK
, OpenEntry(second
, &entry
));
754 EXPECT_EQ(cache_size
/ 10, entry
->GetDataSize(0));
758 TEST_F(DiskCacheBackendTest
, SetSize
) {
762 TEST_F(DiskCacheBackendTest
, NewEvictionSetSize
) {
767 TEST_F(DiskCacheBackendTest
, MemoryOnlySetSize
) {
772 void DiskCacheBackendTest::BackendLoad() {
774 int seed
= static_cast<int>(Time::Now().ToInternalValue());
777 disk_cache::Entry
* entries
[100];
778 for (int i
= 0; i
< 100; i
++) {
779 std::string key
= GenerateKey(true);
780 ASSERT_EQ(net::OK
, CreateEntry(key
, &entries
[i
]));
782 EXPECT_EQ(100, cache_
->GetEntryCount());
784 for (int i
= 0; i
< 100; i
++) {
785 int source1
= rand() % 100;
786 int source2
= rand() % 100;
787 disk_cache::Entry
* temp
= entries
[source1
];
788 entries
[source1
] = entries
[source2
];
789 entries
[source2
] = temp
;
792 for (int i
= 0; i
< 100; i
++) {
793 disk_cache::Entry
* entry
;
794 ASSERT_EQ(net::OK
, OpenEntry(entries
[i
]->GetKey(), &entry
));
795 EXPECT_TRUE(entry
== entries
[i
]);
801 EXPECT_EQ(0, cache_
->GetEntryCount());
804 TEST_F(DiskCacheBackendTest
, Load
) {
805 // Work with a tiny index table (16 entries)
807 SetMaxSize(0x100000);
811 TEST_F(DiskCacheBackendTest
, NewEvictionLoad
) {
813 // Work with a tiny index table (16 entries)
815 SetMaxSize(0x100000);
819 TEST_F(DiskCacheBackendTest
, MemoryOnlyLoad
) {
820 SetMaxSize(0x100000);
825 TEST_F(DiskCacheBackendTest
, AppCacheLoad
) {
826 SetCacheType(net::APP_CACHE
);
827 // Work with a tiny index table (16 entries)
829 SetMaxSize(0x100000);
833 TEST_F(DiskCacheBackendTest
, ShaderCacheLoad
) {
834 SetCacheType(net::SHADER_CACHE
);
835 // Work with a tiny index table (16 entries)
837 SetMaxSize(0x100000);
841 // Tests the chaining of an entry to the current head.
842 void DiskCacheBackendTest::BackendChain() {
843 SetMask(0x1); // 2-entry table.
844 SetMaxSize(0x3000); // 12 kB.
847 disk_cache::Entry
* entry
;
848 ASSERT_EQ(net::OK
, CreateEntry("The first key", &entry
));
850 ASSERT_EQ(net::OK
, CreateEntry("The Second key", &entry
));
854 TEST_F(DiskCacheBackendTest
, Chain
) {
858 TEST_F(DiskCacheBackendTest
, NewEvictionChain
) {
863 TEST_F(DiskCacheBackendTest
, AppCacheChain
) {
864 SetCacheType(net::APP_CACHE
);
868 TEST_F(DiskCacheBackendTest
, ShaderCacheChain
) {
869 SetCacheType(net::SHADER_CACHE
);
873 TEST_F(DiskCacheBackendTest
, NewEvictionTrim
) {
877 disk_cache::Entry
* entry
;
878 for (int i
= 0; i
< 100; i
++) {
879 std::string
name(base::StringPrintf("Key %d", i
));
880 ASSERT_EQ(net::OK
, CreateEntry(name
, &entry
));
883 // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
884 ASSERT_EQ(net::OK
, OpenEntry(name
, &entry
));
889 // The first eviction must come from list 1 (10% limit), the second must come
892 EXPECT_NE(net::OK
, OpenEntry("Key 0", &entry
));
894 EXPECT_NE(net::OK
, OpenEntry("Key 90", &entry
));
896 // Double check that we still have the list tails.
897 ASSERT_EQ(net::OK
, OpenEntry("Key 1", &entry
));
899 ASSERT_EQ(net::OK
, OpenEntry("Key 91", &entry
));
903 // Before looking for invalid entries, let's check a valid entry.
904 void DiskCacheBackendTest::BackendValidEntry() {
907 std::string
key("Some key");
908 disk_cache::Entry
* entry
;
909 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
911 const int kSize
= 50;
912 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
913 memset(buffer1
->data(), 0, kSize
);
914 base::strlcpy(buffer1
->data(), "And the data to save", kSize
);
915 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer1
.get(), kSize
, false));
919 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
921 scoped_refptr
<net::IOBuffer
> buffer2(new net::IOBuffer(kSize
));
922 memset(buffer2
->data(), 0, kSize
);
923 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer2
.get(), kSize
));
925 EXPECT_STREQ(buffer1
->data(), buffer2
->data());
928 TEST_F(DiskCacheBackendTest
, ValidEntry
) {
932 TEST_F(DiskCacheBackendTest
, NewEvictionValidEntry
) {
937 // The same logic of the previous test (ValidEntry), but this time force the
938 // entry to be invalid, simulating a crash in the middle.
939 // We'll be leaking memory from this test.
940 void DiskCacheBackendTest::BackendInvalidEntry() {
943 std::string
key("Some key");
944 disk_cache::Entry
* entry
;
945 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
947 const int kSize
= 50;
948 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
949 memset(buffer
->data(), 0, kSize
);
950 base::strlcpy(buffer
->data(), "And the data to save", kSize
);
951 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
954 EXPECT_NE(net::OK
, OpenEntry(key
, &entry
));
955 EXPECT_EQ(0, cache_
->GetEntryCount());
958 #if !defined(LEAK_SANITIZER)
959 // We'll be leaking memory from this test.
960 TEST_F(DiskCacheBackendTest
, InvalidEntry
) {
961 BackendInvalidEntry();
964 // We'll be leaking memory from this test.
965 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry
) {
967 BackendInvalidEntry();
970 // We'll be leaking memory from this test.
971 TEST_F(DiskCacheBackendTest
, AppCacheInvalidEntry
) {
972 SetCacheType(net::APP_CACHE
);
973 BackendInvalidEntry();
976 // We'll be leaking memory from this test.
977 TEST_F(DiskCacheBackendTest
, ShaderCacheInvalidEntry
) {
978 SetCacheType(net::SHADER_CACHE
);
979 BackendInvalidEntry();
982 // Almost the same test, but this time crash the cache after reading an entry.
983 // We'll be leaking memory from this test.
984 void DiskCacheBackendTest::BackendInvalidEntryRead() {
987 std::string
key("Some key");
988 disk_cache::Entry
* entry
;
989 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
991 const int kSize
= 50;
992 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
993 memset(buffer
->data(), 0, kSize
);
994 base::strlcpy(buffer
->data(), "And the data to save", kSize
);
995 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
997 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
998 EXPECT_EQ(kSize
, ReadData(entry
, 0, 0, buffer
.get(), kSize
));
1002 if (type_
== net::APP_CACHE
) {
1003 // Reading an entry and crashing should not make it dirty.
1004 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1005 EXPECT_EQ(1, cache_
->GetEntryCount());
1008 EXPECT_NE(net::OK
, OpenEntry(key
, &entry
));
1009 EXPECT_EQ(0, cache_
->GetEntryCount());
1013 // We'll be leaking memory from this test.
1014 TEST_F(DiskCacheBackendTest
, InvalidEntryRead
) {
1015 BackendInvalidEntryRead();
1018 // We'll be leaking memory from this test.
1019 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntryRead
) {
1021 BackendInvalidEntryRead();
1024 // We'll be leaking memory from this test.
1025 TEST_F(DiskCacheBackendTest
, AppCacheInvalidEntryRead
) {
1026 SetCacheType(net::APP_CACHE
);
1027 BackendInvalidEntryRead();
1030 // We'll be leaking memory from this test.
1031 TEST_F(DiskCacheBackendTest
, ShaderCacheInvalidEntryRead
) {
1032 SetCacheType(net::SHADER_CACHE
);
1033 BackendInvalidEntryRead();
1036 // We'll be leaking memory from this test.
1037 void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1038 // Work with a tiny index table (16 entries)
1040 SetMaxSize(0x100000);
1043 int seed
= static_cast<int>(Time::Now().ToInternalValue());
1046 const int kNumEntries
= 100;
1047 disk_cache::Entry
* entries
[kNumEntries
];
1048 for (int i
= 0; i
< kNumEntries
; i
++) {
1049 std::string key
= GenerateKey(true);
1050 ASSERT_EQ(net::OK
, CreateEntry(key
, &entries
[i
]));
1052 EXPECT_EQ(kNumEntries
, cache_
->GetEntryCount());
1054 for (int i
= 0; i
< kNumEntries
; i
++) {
1055 int source1
= rand() % kNumEntries
;
1056 int source2
= rand() % kNumEntries
;
1057 disk_cache::Entry
* temp
= entries
[source1
];
1058 entries
[source1
] = entries
[source2
];
1059 entries
[source2
] = temp
;
1062 std::string keys
[kNumEntries
];
1063 for (int i
= 0; i
< kNumEntries
; i
++) {
1064 keys
[i
] = entries
[i
]->GetKey();
1065 if (i
< kNumEntries
/ 2)
1066 entries
[i
]->Close();
1071 for (int i
= kNumEntries
/ 2; i
< kNumEntries
; i
++) {
1072 disk_cache::Entry
* entry
;
1073 EXPECT_NE(net::OK
, OpenEntry(keys
[i
], &entry
));
1076 for (int i
= 0; i
< kNumEntries
/ 2; i
++) {
1077 disk_cache::Entry
* entry
;
1078 ASSERT_EQ(net::OK
, OpenEntry(keys
[i
], &entry
));
1082 EXPECT_EQ(kNumEntries
/ 2, cache_
->GetEntryCount());
1085 // We'll be leaking memory from this test.
1086 TEST_F(DiskCacheBackendTest
, InvalidEntryWithLoad
) {
1087 BackendInvalidEntryWithLoad();
1090 // We'll be leaking memory from this test.
1091 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntryWithLoad
) {
1093 BackendInvalidEntryWithLoad();
1096 // We'll be leaking memory from this test.
1097 TEST_F(DiskCacheBackendTest
, AppCacheInvalidEntryWithLoad
) {
1098 SetCacheType(net::APP_CACHE
);
1099 BackendInvalidEntryWithLoad();
1102 // We'll be leaking memory from this test.
1103 TEST_F(DiskCacheBackendTest
, ShaderCacheInvalidEntryWithLoad
) {
1104 SetCacheType(net::SHADER_CACHE
);
1105 BackendInvalidEntryWithLoad();
1108 // We'll be leaking memory from this test.
1109 void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1110 const int kSize
= 0x3000; // 12 kB
1111 SetMaxSize(kSize
* 10);
1114 std::string
first("some key");
1115 std::string
second("something else");
1116 disk_cache::Entry
* entry
;
1117 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
1119 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1120 memset(buffer
->data(), 0, kSize
);
1121 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1123 // Simulate a crash.
1126 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
1127 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1129 EXPECT_EQ(2, cache_
->GetEntryCount());
1131 entry
->Close(); // Trim the cache.
1132 FlushQueueForTest();
1134 // If we evicted the entry in less than 20mS, we have one entry in the cache;
1135 // if it took more than that, we posted a task and we'll delete the second
1137 base::MessageLoop::current()->RunUntilIdle();
1139 // This may be not thread-safe in general, but for now it's OK so add some
1140 // ThreadSanitizer annotations to ignore data races on cache_.
1141 // See http://crbug.com/55970
1142 ANNOTATE_IGNORE_READS_BEGIN();
1143 EXPECT_GE(1, cache_
->GetEntryCount());
1144 ANNOTATE_IGNORE_READS_END();
1146 EXPECT_NE(net::OK
, OpenEntry(first
, &entry
));
1149 // We'll be leaking memory from this test.
1150 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry
) {
1151 BackendTrimInvalidEntry();
1154 // We'll be leaking memory from this test.
1155 TEST_F(DiskCacheBackendTest
, NewEvictionTrimInvalidEntry
) {
1157 BackendTrimInvalidEntry();
1160 // We'll be leaking memory from this test.
1161 void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1162 SetMask(0xf); // 16-entry table.
1164 const int kSize
= 0x3000; // 12 kB
1165 SetMaxSize(kSize
* 40);
1168 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
1169 memset(buffer
->data(), 0, kSize
);
1170 disk_cache::Entry
* entry
;
1172 // Writing 32 entries to this cache chains most of them.
1173 for (int i
= 0; i
< 32; i
++) {
1174 std::string
key(base::StringPrintf("some key %d", i
));
1175 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1176 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1178 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
1179 // Note that we are not closing the entries.
1182 // Simulate a crash.
1185 ASSERT_EQ(net::OK
, CreateEntry("Something else", &entry
));
1186 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, false));
1188 FlushQueueForTest();
1189 EXPECT_EQ(33, cache_
->GetEntryCount());
1192 // For the new eviction code, all corrupt entries are on the second list so
1193 // they are not going away that easy.
1194 if (new_eviction_
) {
1195 EXPECT_EQ(net::OK
, DoomAllEntries());
1198 entry
->Close(); // Trim the cache.
1199 FlushQueueForTest();
1201 // We may abort the eviction before cleaning up everything.
1202 base::MessageLoop::current()->RunUntilIdle();
1203 FlushQueueForTest();
1204 // If it's not clear enough: we may still have eviction tasks running at this
1205 // time, so the number of entries is changing while we read it.
1206 ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1207 EXPECT_GE(30, cache_
->GetEntryCount());
1208 ANNOTATE_IGNORE_READS_AND_WRITES_END();
1211 // We'll be leaking memory from this test.
1212 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry2
) {
1213 BackendTrimInvalidEntry2();
1216 // We'll be leaking memory from this test.
1217 TEST_F(DiskCacheBackendTest
, NewEvictionTrimInvalidEntry2
) {
1219 BackendTrimInvalidEntry2();
1221 #endif // !defined(LEAK_SANITIZER)
1223 void DiskCacheBackendTest::BackendEnumerations() {
1225 Time initial
= Time::Now();
1227 const int kNumEntries
= 100;
1228 for (int i
= 0; i
< kNumEntries
; i
++) {
1229 std::string key
= GenerateKey(true);
1230 disk_cache::Entry
* entry
;
1231 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1234 EXPECT_EQ(kNumEntries
, cache_
->GetEntryCount());
1235 Time final
= Time::Now();
1237 disk_cache::Entry
* entry
;
1240 Time last_modified
[kNumEntries
];
1241 Time last_used
[kNumEntries
];
1242 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
1243 ASSERT_TRUE(NULL
!= entry
);
1244 if (count
< kNumEntries
) {
1245 last_modified
[count
] = entry
->GetLastModified();
1246 last_used
[count
] = entry
->GetLastUsed();
1247 EXPECT_TRUE(initial
<= last_modified
[count
]);
1248 EXPECT_TRUE(final
>= last_modified
[count
]);
1254 EXPECT_EQ(kNumEntries
, count
);
1258 // The previous enumeration should not have changed the timestamps.
1259 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
1260 ASSERT_TRUE(NULL
!= entry
);
1261 if (count
< kNumEntries
) {
1262 EXPECT_TRUE(last_modified
[count
] == entry
->GetLastModified());
1263 EXPECT_TRUE(last_used
[count
] == entry
->GetLastUsed());
1268 EXPECT_EQ(kNumEntries
, count
);
1271 TEST_F(DiskCacheBackendTest
, Enumerations
) {
1272 BackendEnumerations();
1275 TEST_F(DiskCacheBackendTest
, NewEvictionEnumerations
) {
1277 BackendEnumerations();
1280 TEST_F(DiskCacheBackendTest
, MemoryOnlyEnumerations
) {
1281 SetMemoryOnlyMode();
1282 BackendEnumerations();
1285 TEST_F(DiskCacheBackendTest
, ShaderCacheEnumerations
) {
1286 SetCacheType(net::SHADER_CACHE
);
1287 BackendEnumerations();
1290 TEST_F(DiskCacheBackendTest
, AppCacheEnumerations
) {
1291 SetCacheType(net::APP_CACHE
);
1292 BackendEnumerations();
1295 // Verifies enumerations while entries are open.
1296 void DiskCacheBackendTest::BackendEnumerations2() {
1298 const std::string
first("first");
1299 const std::string
second("second");
1300 disk_cache::Entry
*entry1
, *entry2
;
1301 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry1
));
1303 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry2
));
1305 FlushQueueForTest();
1307 // Make sure that the timestamp is not the same.
1309 ASSERT_EQ(net::OK
, OpenEntry(second
, &entry1
));
1311 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry2
));
1312 EXPECT_EQ(entry2
->GetKey(), second
);
1314 // Two entries and the iterator pointing at "first".
1318 // The iterator should still be valid, so we should not crash.
1319 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry2
));
1320 EXPECT_EQ(entry2
->GetKey(), first
);
1322 cache_
->EndEnumeration(&iter
);
1324 // Modify the oldest entry and get the newest element.
1325 ASSERT_EQ(net::OK
, OpenEntry(first
, &entry1
));
1326 EXPECT_EQ(0, WriteData(entry1
, 0, 200, NULL
, 0, false));
1327 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry2
));
1328 if (type_
== net::APP_CACHE
) {
1329 // The list is not updated.
1330 EXPECT_EQ(entry2
->GetKey(), second
);
1332 EXPECT_EQ(entry2
->GetKey(), first
);
1337 cache_
->EndEnumeration(&iter
);
1340 TEST_F(DiskCacheBackendTest
, Enumerations2
) {
1341 BackendEnumerations2();
1344 TEST_F(DiskCacheBackendTest
, NewEvictionEnumerations2
) {
1346 BackendEnumerations2();
1349 TEST_F(DiskCacheBackendTest
, MemoryOnlyEnumerations2
) {
1350 SetMemoryOnlyMode();
1351 BackendEnumerations2();
1354 TEST_F(DiskCacheBackendTest
, AppCacheEnumerations2
) {
1355 SetCacheType(net::APP_CACHE
);
1356 BackendEnumerations2();
1359 TEST_F(DiskCacheBackendTest
, ShaderCacheEnumerations2
) {
1360 SetCacheType(net::SHADER_CACHE
);
1361 BackendEnumerations2();
1364 // Verify that ReadData calls do not update the LRU cache
1365 // when using the SHADER_CACHE type.
1366 TEST_F(DiskCacheBackendTest
, ShaderCacheEnumerationReadData
) {
1367 SetCacheType(net::SHADER_CACHE
);
1369 const std::string
first("first");
1370 const std::string
second("second");
1371 disk_cache::Entry
*entry1
, *entry2
;
1372 const int kSize
= 50;
1373 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
1375 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry1
));
1376 memset(buffer1
->data(), 0, kSize
);
1377 base::strlcpy(buffer1
->data(), "And the data to save", kSize
);
1378 EXPECT_EQ(kSize
, WriteData(entry1
, 0, 0, buffer1
.get(), kSize
, false));
1380 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry2
));
1383 FlushQueueForTest();
1385 // Make sure that the timestamp is not the same.
1388 // Read from the last item in the LRU.
1389 EXPECT_EQ(kSize
, ReadData(entry1
, 0, 0, buffer1
.get(), kSize
));
1393 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry2
));
1394 EXPECT_EQ(entry2
->GetKey(), second
);
1396 cache_
->EndEnumeration(&iter
);
1399 #if !defined(LEAK_SANITIZER)
1400 // Verify handling of invalid entries while doing enumerations.
1401 // We'll be leaking memory from this test.
1402 void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1405 std::string
key("Some key");
1406 disk_cache::Entry
*entry
, *entry1
, *entry2
;
1407 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry1
));
1409 const int kSize
= 50;
1410 scoped_refptr
<net::IOBuffer
> buffer1(new net::IOBuffer(kSize
));
1411 memset(buffer1
->data(), 0, kSize
);
1412 base::strlcpy(buffer1
->data(), "And the data to save", kSize
);
1413 EXPECT_EQ(kSize
, WriteData(entry1
, 0, 0, buffer1
.get(), kSize
, false));
1415 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry1
));
1416 EXPECT_EQ(kSize
, ReadData(entry1
, 0, 0, buffer1
.get(), kSize
));
1418 std::string
key2("Another key");
1419 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry2
));
1421 ASSERT_EQ(2, cache_
->GetEntryCount());
1427 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
1428 ASSERT_TRUE(NULL
!= entry
);
1429 EXPECT_EQ(key2
, entry
->GetKey());
1433 EXPECT_EQ(1, count
);
1434 EXPECT_EQ(1, cache_
->GetEntryCount());
1437 // We'll be leaking memory from this test.
1438 TEST_F(DiskCacheBackendTest
, InvalidEntryEnumeration
) {
1439 BackendInvalidEntryEnumeration();
1442 // We'll be leaking memory from this test.
1443 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntryEnumeration
) {
1445 BackendInvalidEntryEnumeration();
1447 #endif // !defined(LEAK_SANITIZER)
1449 // Tests that if for some reason entries are modified close to existing cache
1450 // iterators, we don't generate fatal errors or reset the cache.
1451 void DiskCacheBackendTest::BackendFixEnumerators() {
1454 int seed
= static_cast<int>(Time::Now().ToInternalValue());
1457 const int kNumEntries
= 10;
1458 for (int i
= 0; i
< kNumEntries
; i
++) {
1459 std::string key
= GenerateKey(true);
1460 disk_cache::Entry
* entry
;
1461 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
1464 EXPECT_EQ(kNumEntries
, cache_
->GetEntryCount());
1466 disk_cache::Entry
*entry1
, *entry2
;
1469 ASSERT_EQ(net::OK
, OpenNextEntry(&iter1
, &entry1
));
1470 ASSERT_TRUE(NULL
!= entry1
);
1474 // Let's go to the middle of the list.
1475 for (int i
= 0; i
< kNumEntries
/ 2; i
++) {
1478 ASSERT_EQ(net::OK
, OpenNextEntry(&iter1
, &entry1
));
1479 ASSERT_TRUE(NULL
!= entry1
);
1481 ASSERT_EQ(net::OK
, OpenNextEntry(&iter2
, &entry2
));
1482 ASSERT_TRUE(NULL
!= entry2
);
1486 // Messing up with entry1 will modify entry2->next.
1488 ASSERT_EQ(net::OK
, OpenNextEntry(&iter2
, &entry2
));
1489 ASSERT_TRUE(NULL
!= entry2
);
1491 // The link entry2->entry1 should be broken.
1492 EXPECT_NE(entry2
->GetKey(), entry1
->GetKey());
1496 // And the second iterator should keep working.
1497 ASSERT_EQ(net::OK
, OpenNextEntry(&iter2
, &entry2
));
1498 ASSERT_TRUE(NULL
!= entry2
);
1501 cache_
->EndEnumeration(&iter1
);
1502 cache_
->EndEnumeration(&iter2
);
1505 TEST_F(DiskCacheBackendTest
, FixEnumerators
) {
1506 BackendFixEnumerators();
1509 TEST_F(DiskCacheBackendTest
, NewEvictionFixEnumerators
) {
1511 BackendFixEnumerators();
1514 void DiskCacheBackendTest::BackendDoomRecent() {
1517 disk_cache::Entry
*entry
;
1518 ASSERT_EQ(net::OK
, CreateEntry("first", &entry
));
1520 ASSERT_EQ(net::OK
, CreateEntry("second", &entry
));
1522 FlushQueueForTest();
1525 Time middle
= Time::Now();
1527 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
1529 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry
));
1531 FlushQueueForTest();
1534 Time final
= Time::Now();
1536 ASSERT_EQ(4, cache_
->GetEntryCount());
1537 EXPECT_EQ(net::OK
, DoomEntriesSince(final
));
1538 ASSERT_EQ(4, cache_
->GetEntryCount());
1540 EXPECT_EQ(net::OK
, DoomEntriesSince(middle
));
1541 ASSERT_EQ(2, cache_
->GetEntryCount());
1543 ASSERT_EQ(net::OK
, OpenEntry("second", &entry
));
1547 TEST_F(DiskCacheBackendTest
, DoomRecent
) {
1548 BackendDoomRecent();
1551 TEST_F(DiskCacheBackendTest
, NewEvictionDoomRecent
) {
1553 BackendDoomRecent();
1556 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomRecent
) {
1557 SetMemoryOnlyMode();
1558 BackendDoomRecent();
1561 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomEntriesSinceSparse
) {
1562 SetMemoryOnlyMode();
1564 InitSparseCache(&start
, NULL
);
1565 DoomEntriesSince(start
);
1566 EXPECT_EQ(1, cache_
->GetEntryCount());
1569 TEST_F(DiskCacheBackendTest
, DoomEntriesSinceSparse
) {
1571 InitSparseCache(&start
, NULL
);
1572 DoomEntriesSince(start
);
1573 // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1574 // MemBackendImpl does not. Thats why expected value differs here from
1575 // MemoryOnlyDoomEntriesSinceSparse.
1576 EXPECT_EQ(3, cache_
->GetEntryCount());
1579 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomAllSparse
) {
1580 SetMemoryOnlyMode();
1581 InitSparseCache(NULL
, NULL
);
1582 EXPECT_EQ(net::OK
, DoomAllEntries());
1583 EXPECT_EQ(0, cache_
->GetEntryCount());
1586 TEST_F(DiskCacheBackendTest
, DoomAllSparse
) {
1587 InitSparseCache(NULL
, NULL
);
1588 EXPECT_EQ(net::OK
, DoomAllEntries());
1589 EXPECT_EQ(0, cache_
->GetEntryCount());
1592 void DiskCacheBackendTest::BackendDoomBetween() {
1595 disk_cache::Entry
*entry
;
1596 ASSERT_EQ(net::OK
, CreateEntry("first", &entry
));
1598 FlushQueueForTest();
1601 Time middle_start
= Time::Now();
1603 ASSERT_EQ(net::OK
, CreateEntry("second", &entry
));
1605 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
1607 FlushQueueForTest();
1610 Time middle_end
= Time::Now();
1612 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry
));
1614 ASSERT_EQ(net::OK
, OpenEntry("fourth", &entry
));
1616 FlushQueueForTest();
1619 Time final
= Time::Now();
1621 ASSERT_EQ(4, cache_
->GetEntryCount());
1622 EXPECT_EQ(net::OK
, DoomEntriesBetween(middle_start
, middle_end
));
1623 ASSERT_EQ(2, cache_
->GetEntryCount());
1625 ASSERT_EQ(net::OK
, OpenEntry("fourth", &entry
));
1628 EXPECT_EQ(net::OK
, DoomEntriesBetween(middle_start
, final
));
1629 ASSERT_EQ(1, cache_
->GetEntryCount());
1631 ASSERT_EQ(net::OK
, OpenEntry("first", &entry
));
1635 TEST_F(DiskCacheBackendTest
, DoomBetween
) {
1636 BackendDoomBetween();
1639 TEST_F(DiskCacheBackendTest
, NewEvictionDoomBetween
) {
1641 BackendDoomBetween();
1644 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomBetween
) {
1645 SetMemoryOnlyMode();
1646 BackendDoomBetween();
1649 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomEntriesBetweenSparse
) {
1650 SetMemoryOnlyMode();
1651 base::Time start
, end
;
1652 InitSparseCache(&start
, &end
);
1653 DoomEntriesBetween(start
, end
);
1654 EXPECT_EQ(3, cache_
->GetEntryCount());
1657 end
= base::Time::Now();
1658 DoomEntriesBetween(start
, end
);
1659 EXPECT_EQ(1, cache_
->GetEntryCount());
1662 TEST_F(DiskCacheBackendTest
, DoomEntriesBetweenSparse
) {
1663 base::Time start
, end
;
1664 InitSparseCache(&start
, &end
);
1665 DoomEntriesBetween(start
, end
);
1666 EXPECT_EQ(9, cache_
->GetEntryCount());
1669 end
= base::Time::Now();
1670 DoomEntriesBetween(start
, end
);
1671 EXPECT_EQ(3, cache_
->GetEntryCount());
1674 void DiskCacheBackendTest::BackendTransaction(const std::string
& name
,
1675 int num_entries
, bool load
) {
1677 ASSERT_TRUE(CopyTestCache(name
));
1678 DisableFirstCleanup();
1683 SetMaxSize(0x100000);
1685 // Clear the settings from the previous run.
1692 ASSERT_EQ(num_entries
+ 1, cache_
->GetEntryCount());
1694 std::string
key("the first key");
1695 disk_cache::Entry
* entry1
;
1696 ASSERT_NE(net::OK
, OpenEntry(key
, &entry1
));
1698 int actual
= cache_
->GetEntryCount();
1699 if (num_entries
!= actual
) {
1701 // If there is a heavy load, inserting an entry will make another entry
1702 // dirty (on the hash bucket) so two entries are removed.
1703 ASSERT_EQ(num_entries
- 1, actual
);
1709 ASSERT_TRUE(CheckCacheIntegrity(cache_path_
, new_eviction_
, mask
));
1713 void DiskCacheBackendTest::BackendRecoverInsert() {
1714 // Tests with an empty cache.
1715 BackendTransaction("insert_empty1", 0, false);
1716 ASSERT_TRUE(success_
) << "insert_empty1";
1717 BackendTransaction("insert_empty2", 0, false);
1718 ASSERT_TRUE(success_
) << "insert_empty2";
1719 BackendTransaction("insert_empty3", 0, false);
1720 ASSERT_TRUE(success_
) << "insert_empty3";
1722 // Tests with one entry on the cache.
1723 BackendTransaction("insert_one1", 1, false);
1724 ASSERT_TRUE(success_
) << "insert_one1";
1725 BackendTransaction("insert_one2", 1, false);
1726 ASSERT_TRUE(success_
) << "insert_one2";
1727 BackendTransaction("insert_one3", 1, false);
1728 ASSERT_TRUE(success_
) << "insert_one3";
1730 // Tests with one hundred entries on the cache, tiny index.
1731 BackendTransaction("insert_load1", 100, true);
1732 ASSERT_TRUE(success_
) << "insert_load1";
1733 BackendTransaction("insert_load2", 100, true);
1734 ASSERT_TRUE(success_
) << "insert_load2";
1737 TEST_F(DiskCacheBackendTest
, RecoverInsert
) {
1738 BackendRecoverInsert();
1741 TEST_F(DiskCacheBackendTest
, NewEvictionRecoverInsert
) {
1743 BackendRecoverInsert();
1746 void DiskCacheBackendTest::BackendRecoverRemove() {
1747 // Removing the only element.
1748 BackendTransaction("remove_one1", 0, false);
1749 ASSERT_TRUE(success_
) << "remove_one1";
1750 BackendTransaction("remove_one2", 0, false);
1751 ASSERT_TRUE(success_
) << "remove_one2";
1752 BackendTransaction("remove_one3", 0, false);
1753 ASSERT_TRUE(success_
) << "remove_one3";
1755 // Removing the head.
1756 BackendTransaction("remove_head1", 1, false);
1757 ASSERT_TRUE(success_
) << "remove_head1";
1758 BackendTransaction("remove_head2", 1, false);
1759 ASSERT_TRUE(success_
) << "remove_head2";
1760 BackendTransaction("remove_head3", 1, false);
1761 ASSERT_TRUE(success_
) << "remove_head3";
1763 // Removing the tail.
1764 BackendTransaction("remove_tail1", 1, false);
1765 ASSERT_TRUE(success_
) << "remove_tail1";
1766 BackendTransaction("remove_tail2", 1, false);
1767 ASSERT_TRUE(success_
) << "remove_tail2";
1768 BackendTransaction("remove_tail3", 1, false);
1769 ASSERT_TRUE(success_
) << "remove_tail3";
1771 // Removing with one hundred entries on the cache, tiny index.
1772 BackendTransaction("remove_load1", 100, true);
1773 ASSERT_TRUE(success_
) << "remove_load1";
1774 BackendTransaction("remove_load2", 100, true);
1775 ASSERT_TRUE(success_
) << "remove_load2";
1776 BackendTransaction("remove_load3", 100, true);
1777 ASSERT_TRUE(success_
) << "remove_load3";
1779 // This case cannot be reverted.
1780 BackendTransaction("remove_one4", 0, false);
1781 ASSERT_TRUE(success_
) << "remove_one4";
1782 BackendTransaction("remove_head4", 1, false);
1783 ASSERT_TRUE(success_
) << "remove_head4";
1787 // http://crbug.com/396392
1788 #define MAYBE_RecoverRemove DISABLED_RecoverRemove
1790 #define MAYBE_RecoverRemove RecoverRemove
1792 TEST_F(DiskCacheBackendTest
, MAYBE_RecoverRemove
) {
1793 BackendRecoverRemove();
1797 // http://crbug.com/396392
1798 #define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove
1800 #define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove
1802 TEST_F(DiskCacheBackendTest
, MAYBE_NewEvictionRecoverRemove
) {
1804 BackendRecoverRemove();
1807 void DiskCacheBackendTest::BackendRecoverWithEviction() {
1809 ASSERT_TRUE(CopyTestCache("insert_load1"));
1810 DisableFirstCleanup();
1815 // We should not crash here.
1817 DisableIntegrityCheck();
1820 TEST_F(DiskCacheBackendTest
, RecoverWithEviction
) {
1821 BackendRecoverWithEviction();
1824 TEST_F(DiskCacheBackendTest
, NewEvictionRecoverWithEviction
) {
1826 BackendRecoverWithEviction();
1829 // Tests that the |BackendImpl| fails to start with the wrong cache version.
1830 TEST_F(DiskCacheTest
, WrongVersion
) {
1831 ASSERT_TRUE(CopyTestCache("wrong_version"));
1832 base::Thread
cache_thread("CacheThread");
1833 ASSERT_TRUE(cache_thread
.StartWithOptions(
1834 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1835 net::TestCompletionCallback cb
;
1837 scoped_ptr
<disk_cache::BackendImpl
> cache(new disk_cache::BackendImpl(
1838 cache_path_
, cache_thread
.message_loop_proxy().get(), NULL
));
1839 int rv
= cache
->Init(cb
.callback());
1840 ASSERT_EQ(net::ERR_FAILED
, cb
.GetResult(rv
));
1843 class BadEntropyProvider
: public base::FieldTrial::EntropyProvider
{
1845 virtual ~BadEntropyProvider() {}
1847 virtual double GetEntropyForTrial(const std::string
& trial_name
,
1848 uint32 randomization_seed
) const OVERRIDE
{
1853 // Tests that the disk cache successfully joins the control group, dropping the
1854 // existing cache in favour of a new empty cache.
1855 // Disabled on android since this test requires cache creator to create
1856 // blockfile caches.
1857 #if !defined(OS_ANDROID)
1858 TEST_F(DiskCacheTest
, SimpleCacheControlJoin
) {
1859 base::Thread
cache_thread("CacheThread");
1860 ASSERT_TRUE(cache_thread
.StartWithOptions(
1861 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1863 scoped_ptr
<disk_cache::BackendImpl
> cache
=
1864 CreateExistingEntryCache(cache_thread
, cache_path_
);
1865 ASSERT_TRUE(cache
.get());
1868 // Instantiate the SimpleCacheTrial, forcing this run into the
1869 // ExperimentControl group.
1870 base::FieldTrialList
field_trial_list(new BadEntropyProvider());
1871 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1872 "ExperimentControl");
1873 net::TestCompletionCallback cb
;
1874 scoped_ptr
<disk_cache::Backend
> base_cache
;
1876 disk_cache::CreateCacheBackend(net::DISK_CACHE
,
1877 net::CACHE_BACKEND_BLOCKFILE
,
1881 cache_thread
.message_loop_proxy().get(),
1885 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
1886 EXPECT_EQ(0, base_cache
->GetEntryCount());
1890 // Tests that the disk cache can restart in the control group preserving
1891 // existing entries.
1892 TEST_F(DiskCacheTest
, SimpleCacheControlRestart
) {
1893 // Instantiate the SimpleCacheTrial, forcing this run into the
1894 // ExperimentControl group.
1895 base::FieldTrialList
field_trial_list(new BadEntropyProvider());
1896 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1897 "ExperimentControl");
1899 base::Thread
cache_thread("CacheThread");
1900 ASSERT_TRUE(cache_thread
.StartWithOptions(
1901 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1903 scoped_ptr
<disk_cache::BackendImpl
> cache
=
1904 CreateExistingEntryCache(cache_thread
, cache_path_
);
1905 ASSERT_TRUE(cache
.get());
1907 net::TestCompletionCallback cb
;
1909 const int kRestartCount
= 5;
1910 for (int i
= 0; i
< kRestartCount
; ++i
) {
1911 cache
.reset(new disk_cache::BackendImpl(
1912 cache_path_
, cache_thread
.message_loop_proxy(), NULL
));
1913 int rv
= cache
->Init(cb
.callback());
1914 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
1915 EXPECT_EQ(1, cache
->GetEntryCount());
1917 disk_cache::Entry
* entry
= NULL
;
1918 rv
= cache
->OpenEntry(kExistingEntryKey
, &entry
, cb
.callback());
1919 EXPECT_EQ(net::OK
, cb
.GetResult(rv
));
1925 // Tests that the disk cache can leave the control group preserving existing
1927 TEST_F(DiskCacheTest
, SimpleCacheControlLeave
) {
1928 base::Thread
cache_thread("CacheThread");
1929 ASSERT_TRUE(cache_thread
.StartWithOptions(
1930 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1933 // Instantiate the SimpleCacheTrial, forcing this run into the
1934 // ExperimentControl group.
1935 base::FieldTrialList
field_trial_list(new BadEntropyProvider());
1936 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1937 "ExperimentControl");
1939 scoped_ptr
<disk_cache::BackendImpl
> cache
=
1940 CreateExistingEntryCache(cache_thread
, cache_path_
);
1941 ASSERT_TRUE(cache
.get());
1944 // Instantiate the SimpleCacheTrial, forcing this run into the
1945 // ExperimentNo group.
1946 base::FieldTrialList
field_trial_list(new BadEntropyProvider());
1947 base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1948 net::TestCompletionCallback cb
;
1950 const int kRestartCount
= 5;
1951 for (int i
= 0; i
< kRestartCount
; ++i
) {
1952 scoped_ptr
<disk_cache::BackendImpl
> cache(new disk_cache::BackendImpl(
1953 cache_path_
, cache_thread
.message_loop_proxy(), NULL
));
1954 int rv
= cache
->Init(cb
.callback());
1955 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
1956 EXPECT_EQ(1, cache
->GetEntryCount());
1958 disk_cache::Entry
* entry
= NULL
;
1959 rv
= cache
->OpenEntry(kExistingEntryKey
, &entry
, cb
.callback());
1960 EXPECT_EQ(net::OK
, cb
.GetResult(rv
));
1966 // Tests that the cache is properly restarted on recovery error.
1967 // Disabled on android since this test requires cache creator to create
1968 // blockfile caches.
1969 #if !defined(OS_ANDROID)
1970 TEST_F(DiskCacheBackendTest
, DeleteOld
) {
1971 ASSERT_TRUE(CopyTestCache("wrong_version"));
1973 base::Thread
cache_thread("CacheThread");
1974 ASSERT_TRUE(cache_thread
.StartWithOptions(
1975 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
1977 net::TestCompletionCallback cb
;
1978 bool prev
= base::ThreadRestrictions::SetIOAllowed(false);
1979 base::FilePath
path(cache_path_
);
1981 disk_cache::CreateCacheBackend(net::DISK_CACHE
,
1982 net::CACHE_BACKEND_BLOCKFILE
,
1986 cache_thread
.message_loop_proxy().get(),
1990 path
.clear(); // Make sure path was captured by the previous call.
1991 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
1992 base::ThreadRestrictions::SetIOAllowed(prev
);
1994 EXPECT_TRUE(CheckCacheIntegrity(cache_path_
, new_eviction_
, mask_
));
1998 // We want to be able to deal with messed up entries on disk.
1999 void DiskCacheBackendTest::BackendInvalidEntry2() {
2000 ASSERT_TRUE(CopyTestCache("bad_entry"));
2001 DisableFirstCleanup();
2004 disk_cache::Entry
*entry1
, *entry2
;
2005 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry1
));
2006 EXPECT_NE(net::OK
, OpenEntry("some other key", &entry2
));
2009 // CheckCacheIntegrity will fail at this point.
2010 DisableIntegrityCheck();
2013 TEST_F(DiskCacheBackendTest
, InvalidEntry2
) {
2014 BackendInvalidEntry2();
2017 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry2
) {
2019 BackendInvalidEntry2();
2022 // Tests that we don't crash or hang when enumerating this cache.
2023 void DiskCacheBackendTest::BackendInvalidEntry3() {
2024 SetMask(0x1); // 2-entry table.
2025 SetMaxSize(0x3000); // 12 kB.
2026 DisableFirstCleanup();
2029 disk_cache::Entry
* entry
;
2031 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
2036 TEST_F(DiskCacheBackendTest
, InvalidEntry3
) {
2037 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2038 BackendInvalidEntry3();
2041 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry3
) {
2042 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2044 BackendInvalidEntry3();
2045 DisableIntegrityCheck();
2048 // Test that we handle a dirty entry on the LRU list, already replaced with
2049 // the same key, and with hash collisions.
2050 TEST_F(DiskCacheBackendTest
, InvalidEntry4
) {
2051 ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2052 SetMask(0x1); // 2-entry table.
2053 SetMaxSize(0x3000); // 12 kB.
2054 DisableFirstCleanup();
2060 // Test that we handle a dirty entry on the deleted list, already replaced with
2061 // the same key, and with hash collisions.
2062 TEST_F(DiskCacheBackendTest
, InvalidEntry5
) {
2063 ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2065 SetMask(0x1); // 2-entry table.
2066 SetMaxSize(0x3000); // 12 kB.
2067 DisableFirstCleanup();
2070 TrimDeletedListForTest(false);
2073 TEST_F(DiskCacheBackendTest
, InvalidEntry6
) {
2074 ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2075 SetMask(0x1); // 2-entry table.
2076 SetMaxSize(0x3000); // 12 kB.
2077 DisableFirstCleanup();
2080 // There is a dirty entry (but marked as clean) at the end, pointing to a
2081 // deleted entry through the hash collision list. We should not re-insert the
2082 // deleted entry into the index table.
2085 // The cache should be clean (as detected by CheckCacheIntegrity).
2088 // Tests that we don't hang when there is a loop on the hash collision list.
2089 // The test cache could be a result of bug 69135.
2090 TEST_F(DiskCacheBackendTest
, BadNextEntry1
) {
2091 ASSERT_TRUE(CopyTestCache("list_loop2"));
2092 SetMask(0x1); // 2-entry table.
2093 SetMaxSize(0x3000); // 12 kB.
2094 DisableFirstCleanup();
2097 // The second entry points at itselft, and the first entry is not accessible
2098 // though the index, but it is at the head of the LRU.
2100 disk_cache::Entry
* entry
;
2101 ASSERT_EQ(net::OK
, CreateEntry("The first key", &entry
));
2106 ASSERT_EQ(net::OK
, OpenEntry("The first key", &entry
));
2108 EXPECT_EQ(1, cache_
->GetEntryCount());
2111 // Tests that we don't hang when there is a loop on the hash collision list.
2112 // The test cache could be a result of bug 69135.
2113 TEST_F(DiskCacheBackendTest
, BadNextEntry2
) {
2114 ASSERT_TRUE(CopyTestCache("list_loop3"));
2115 SetMask(0x1); // 2-entry table.
2116 SetMaxSize(0x3000); // 12 kB.
2117 DisableFirstCleanup();
2120 // There is a wide loop of 5 entries.
2122 disk_cache::Entry
* entry
;
2123 ASSERT_NE(net::OK
, OpenEntry("Not present key", &entry
));
2126 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry6
) {
2127 ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2128 DisableFirstCleanup();
2132 // The second entry is dirty, but removing it should not corrupt the list.
2133 disk_cache::Entry
* entry
;
2134 ASSERT_NE(net::OK
, OpenEntry("the second key", &entry
));
2135 ASSERT_EQ(net::OK
, OpenEntry("the first key", &entry
));
2137 // This should not delete the cache.
2139 FlushQueueForTest();
2142 ASSERT_EQ(net::OK
, OpenEntry("some other key", &entry
));
2146 // Tests handling of corrupt entries by keeping the rankings node around, with
2148 void DiskCacheBackendTest::BackendInvalidEntry7() {
2149 const int kSize
= 0x3000; // 12 kB.
2150 SetMaxSize(kSize
* 10);
2153 std::string
first("some key");
2154 std::string
second("something else");
2155 disk_cache::Entry
* entry
;
2156 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2158 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2160 // Corrupt this entry.
2161 disk_cache::EntryImpl
* entry_impl
=
2162 static_cast<disk_cache::EntryImpl
*>(entry
);
2164 entry_impl
->rankings()->Data()->next
= 0;
2165 entry_impl
->rankings()->Store();
2167 FlushQueueForTest();
2168 EXPECT_EQ(2, cache_
->GetEntryCount());
2170 // This should detect the bad entry.
2171 EXPECT_NE(net::OK
, OpenEntry(second
, &entry
));
2172 EXPECT_EQ(1, cache_
->GetEntryCount());
2174 // We should delete the cache. The list still has a corrupt node.
2176 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2177 FlushQueueForTest();
2178 EXPECT_EQ(0, cache_
->GetEntryCount());
2181 TEST_F(DiskCacheBackendTest
, InvalidEntry7
) {
2182 BackendInvalidEntry7();
2185 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry7
) {
2187 BackendInvalidEntry7();
2190 // Tests handling of corrupt entries by keeping the rankings node around, with
2191 // a non fatal failure.
2192 void DiskCacheBackendTest::BackendInvalidEntry8() {
2193 const int kSize
= 0x3000; // 12 kB
2194 SetMaxSize(kSize
* 10);
2197 std::string
first("some key");
2198 std::string
second("something else");
2199 disk_cache::Entry
* entry
;
2200 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2202 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2204 // Corrupt this entry.
2205 disk_cache::EntryImpl
* entry_impl
=
2206 static_cast<disk_cache::EntryImpl
*>(entry
);
2208 entry_impl
->rankings()->Data()->contents
= 0;
2209 entry_impl
->rankings()->Store();
2211 FlushQueueForTest();
2212 EXPECT_EQ(2, cache_
->GetEntryCount());
2214 // This should detect the bad entry.
2215 EXPECT_NE(net::OK
, OpenEntry(second
, &entry
));
2216 EXPECT_EQ(1, cache_
->GetEntryCount());
2218 // We should not delete the cache.
2220 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2222 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2223 EXPECT_EQ(1, cache_
->GetEntryCount());
2226 TEST_F(DiskCacheBackendTest
, InvalidEntry8
) {
2227 BackendInvalidEntry8();
2230 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry8
) {
2232 BackendInvalidEntry8();
2235 // Tests handling of corrupt entries detected by enumerations. Note that these
2236 // tests (xx9 to xx11) are basically just going though slightly different
2237 // codepaths so they are tighlty coupled with the code, but that is better than
2238 // not testing error handling code.
2239 void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction
) {
2240 const int kSize
= 0x3000; // 12 kB.
2241 SetMaxSize(kSize
* 10);
2244 std::string
first("some key");
2245 std::string
second("something else");
2246 disk_cache::Entry
* entry
;
2247 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2249 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2251 // Corrupt this entry.
2252 disk_cache::EntryImpl
* entry_impl
=
2253 static_cast<disk_cache::EntryImpl
*>(entry
);
2255 entry_impl
->entry()->Data()->state
= 0xbad;
2256 entry_impl
->entry()->Store();
2258 FlushQueueForTest();
2259 EXPECT_EQ(2, cache_
->GetEntryCount());
2263 EXPECT_EQ(1, cache_
->GetEntryCount());
2265 EXPECT_EQ(1, cache_
->GetEntryCount());
2267 // We should detect the problem through the list, but we should not delete
2268 // the entry, just fail the iteration.
2270 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2272 // Now a full iteration will work, and return one entry.
2273 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2275 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2277 // This should detect what's left of the bad entry.
2278 EXPECT_NE(net::OK
, OpenEntry(second
, &entry
));
2279 EXPECT_EQ(2, cache_
->GetEntryCount());
2281 DisableIntegrityCheck();
2284 TEST_F(DiskCacheBackendTest
, InvalidEntry9
) {
2285 BackendInvalidEntry9(false);
2288 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidEntry9
) {
2290 BackendInvalidEntry9(false);
2293 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry9
) {
2294 BackendInvalidEntry9(true);
2297 TEST_F(DiskCacheBackendTest
, NewEvictionTrimInvalidEntry9
) {
2299 BackendInvalidEntry9(true);
2302 // Tests handling of corrupt entries detected by enumerations.
2303 void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction
) {
2304 const int kSize
= 0x3000; // 12 kB.
2305 SetMaxSize(kSize
* 10);
2309 std::string
first("some key");
2310 std::string
second("something else");
2311 disk_cache::Entry
* entry
;
2312 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2314 ASSERT_EQ(net::OK
, OpenEntry(first
, &entry
));
2315 EXPECT_EQ(0, WriteData(entry
, 0, 200, NULL
, 0, false));
2317 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2319 // Corrupt this entry.
2320 disk_cache::EntryImpl
* entry_impl
=
2321 static_cast<disk_cache::EntryImpl
*>(entry
);
2323 entry_impl
->entry()->Data()->state
= 0xbad;
2324 entry_impl
->entry()->Store();
2326 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
2328 EXPECT_EQ(3, cache_
->GetEntryCount());
2331 // List 0: third -> second (bad).
2335 // Detection order: second -> first -> third.
2337 EXPECT_EQ(3, cache_
->GetEntryCount());
2339 EXPECT_EQ(2, cache_
->GetEntryCount());
2341 EXPECT_EQ(1, cache_
->GetEntryCount());
2343 // Detection order: third -> second -> first.
2344 // We should detect the problem through the list, but we should not delete
2347 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2349 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2350 EXPECT_EQ(first
, entry
->GetKey());
2352 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2354 DisableIntegrityCheck();
2357 TEST_F(DiskCacheBackendTest
, InvalidEntry10
) {
2358 BackendInvalidEntry10(false);
2361 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry10
) {
2362 BackendInvalidEntry10(true);
2365 // Tests handling of corrupt entries detected by enumerations.
2366 void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction
) {
2367 const int kSize
= 0x3000; // 12 kB.
2368 SetMaxSize(kSize
* 10);
2372 std::string
first("some key");
2373 std::string
second("something else");
2374 disk_cache::Entry
* entry
;
2375 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2377 ASSERT_EQ(net::OK
, OpenEntry(first
, &entry
));
2378 EXPECT_EQ(0, WriteData(entry
, 0, 200, NULL
, 0, false));
2380 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2382 ASSERT_EQ(net::OK
, OpenEntry(second
, &entry
));
2383 EXPECT_EQ(0, WriteData(entry
, 0, 200, NULL
, 0, false));
2385 // Corrupt this entry.
2386 disk_cache::EntryImpl
* entry_impl
=
2387 static_cast<disk_cache::EntryImpl
*>(entry
);
2389 entry_impl
->entry()->Data()->state
= 0xbad;
2390 entry_impl
->entry()->Store();
2392 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
2394 FlushQueueForTest();
2395 EXPECT_EQ(3, cache_
->GetEntryCount());
2399 // List 1: second (bad) -> first.
2402 // Detection order: third -> first -> second.
2404 EXPECT_EQ(2, cache_
->GetEntryCount());
2406 EXPECT_EQ(1, cache_
->GetEntryCount());
2408 EXPECT_EQ(1, cache_
->GetEntryCount());
2410 // Detection order: third -> second.
2411 // We should detect the problem through the list, but we should not delete
2412 // the entry, just fail the iteration.
2414 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2416 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2418 // Now a full iteration will work, and return two entries.
2419 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2421 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2423 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2425 DisableIntegrityCheck();
2428 TEST_F(DiskCacheBackendTest
, InvalidEntry11
) {
2429 BackendInvalidEntry11(false);
2432 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry11
) {
2433 BackendInvalidEntry11(true);
2436 // Tests handling of corrupt entries in the middle of a long eviction run.
2437 void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2438 const int kSize
= 0x3000; // 12 kB
2439 SetMaxSize(kSize
* 10);
2442 std::string
first("some key");
2443 std::string
second("something else");
2444 disk_cache::Entry
* entry
;
2445 ASSERT_EQ(net::OK
, CreateEntry(first
, &entry
));
2447 ASSERT_EQ(net::OK
, CreateEntry(second
, &entry
));
2449 // Corrupt this entry.
2450 disk_cache::EntryImpl
* entry_impl
=
2451 static_cast<disk_cache::EntryImpl
*>(entry
);
2453 entry_impl
->entry()->Data()->state
= 0xbad;
2454 entry_impl
->entry()->Store();
2456 ASSERT_EQ(net::OK
, CreateEntry("third", &entry
));
2458 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry
));
2460 EXPECT_EQ(1, cache_
->GetEntryCount());
2462 DisableIntegrityCheck();
2465 TEST_F(DiskCacheBackendTest
, TrimInvalidEntry12
) {
2466 BackendTrimInvalidEntry12();
2469 TEST_F(DiskCacheBackendTest
, NewEvictionTrimInvalidEntry12
) {
2471 BackendTrimInvalidEntry12();
2474 // We want to be able to deal with messed up entries on disk.
2475 void DiskCacheBackendTest::BackendInvalidRankings2() {
2476 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2477 DisableFirstCleanup();
2480 disk_cache::Entry
*entry1
, *entry2
;
2481 EXPECT_NE(net::OK
, OpenEntry("the first key", &entry1
));
2482 ASSERT_EQ(net::OK
, OpenEntry("some other key", &entry2
));
2485 // CheckCacheIntegrity will fail at this point.
2486 DisableIntegrityCheck();
2489 TEST_F(DiskCacheBackendTest
, InvalidRankings2
) {
2490 BackendInvalidRankings2();
2493 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidRankings2
) {
2495 BackendInvalidRankings2();
2498 // If the LRU is corrupt, we delete the cache.
2499 void DiskCacheBackendTest::BackendInvalidRankings() {
2500 disk_cache::Entry
* entry
;
2502 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry
));
2504 EXPECT_EQ(2, cache_
->GetEntryCount());
2506 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry
));
2507 FlushQueueForTest(); // Allow the restart to finish.
2508 EXPECT_EQ(0, cache_
->GetEntryCount());
2511 TEST_F(DiskCacheBackendTest
, InvalidRankingsSuccess
) {
2512 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2513 DisableFirstCleanup();
2515 BackendInvalidRankings();
2518 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidRankingsSuccess
) {
2519 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2520 DisableFirstCleanup();
2523 BackendInvalidRankings();
2526 TEST_F(DiskCacheBackendTest
, InvalidRankingsFailure
) {
2527 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2528 DisableFirstCleanup();
2530 SetTestMode(); // Fail cache reinitialization.
2531 BackendInvalidRankings();
2534 TEST_F(DiskCacheBackendTest
, NewEvictionInvalidRankingsFailure
) {
2535 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2536 DisableFirstCleanup();
2539 SetTestMode(); // Fail cache reinitialization.
2540 BackendInvalidRankings();
2543 // If the LRU is corrupt and we have open entries, we disable the cache.
2544 void DiskCacheBackendTest::BackendDisable() {
2545 disk_cache::Entry
*entry1
, *entry2
;
2547 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry1
));
2549 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry2
));
2550 EXPECT_EQ(0, cache_
->GetEntryCount());
2551 EXPECT_NE(net::OK
, CreateEntry("Something new", &entry2
));
2554 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2555 FlushQueueForTest(); // This one actually allows that task to complete.
2557 EXPECT_EQ(0, cache_
->GetEntryCount());
2560 TEST_F(DiskCacheBackendTest
, DisableSuccess
) {
2561 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2562 DisableFirstCleanup();
2567 TEST_F(DiskCacheBackendTest
, NewEvictionDisableSuccess
) {
2568 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2569 DisableFirstCleanup();
2575 TEST_F(DiskCacheBackendTest
, DisableFailure
) {
2576 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2577 DisableFirstCleanup();
2579 SetTestMode(); // Fail cache reinitialization.
2583 TEST_F(DiskCacheBackendTest
, NewEvictionDisableFailure
) {
2584 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2585 DisableFirstCleanup();
2588 SetTestMode(); // Fail cache reinitialization.
2592 // This is another type of corruption on the LRU; disable the cache.
2593 void DiskCacheBackendTest::BackendDisable2() {
2594 EXPECT_EQ(8, cache_
->GetEntryCount());
2596 disk_cache::Entry
* entry
;
2599 while (OpenNextEntry(&iter
, &entry
) == net::OK
) {
2600 ASSERT_TRUE(NULL
!= entry
);
2603 ASSERT_LT(count
, 9);
2606 FlushQueueForTest();
2607 EXPECT_EQ(0, cache_
->GetEntryCount());
2610 TEST_F(DiskCacheBackendTest
, DisableSuccess2
) {
2611 ASSERT_TRUE(CopyTestCache("list_loop"));
2612 DisableFirstCleanup();
2617 TEST_F(DiskCacheBackendTest
, NewEvictionDisableSuccess2
) {
2618 ASSERT_TRUE(CopyTestCache("list_loop"));
2619 DisableFirstCleanup();
2625 TEST_F(DiskCacheBackendTest
, DisableFailure2
) {
2626 ASSERT_TRUE(CopyTestCache("list_loop"));
2627 DisableFirstCleanup();
2629 SetTestMode(); // Fail cache reinitialization.
2633 TEST_F(DiskCacheBackendTest
, NewEvictionDisableFailure2
) {
2634 ASSERT_TRUE(CopyTestCache("list_loop"));
2635 DisableFirstCleanup();
2638 SetTestMode(); // Fail cache reinitialization.
2642 // If the index size changes when we disable the cache, we should not crash.
2643 void DiskCacheBackendTest::BackendDisable3() {
2644 disk_cache::Entry
*entry1
, *entry2
;
2646 EXPECT_EQ(2, cache_
->GetEntryCount());
2647 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry1
));
2650 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry2
));
2651 FlushQueueForTest();
2653 ASSERT_EQ(net::OK
, CreateEntry("Something new", &entry2
));
2656 EXPECT_EQ(1, cache_
->GetEntryCount());
2659 TEST_F(DiskCacheBackendTest
, DisableSuccess3
) {
2660 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2661 DisableFirstCleanup();
2662 SetMaxSize(20 * 1024 * 1024);
2667 TEST_F(DiskCacheBackendTest
, NewEvictionDisableSuccess3
) {
2668 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2669 DisableFirstCleanup();
2670 SetMaxSize(20 * 1024 * 1024);
2676 // If we disable the cache, already open entries should work as far as possible.
2677 void DiskCacheBackendTest::BackendDisable4() {
2678 disk_cache::Entry
*entry1
, *entry2
, *entry3
, *entry4
;
2680 ASSERT_EQ(net::OK
, OpenNextEntry(&iter
, &entry1
));
2684 CacheTestFillBuffer(key2
, sizeof(key2
), true);
2685 CacheTestFillBuffer(key3
, sizeof(key3
), true);
2686 key2
[sizeof(key2
) - 1] = '\0';
2687 key3
[sizeof(key3
) - 1] = '\0';
2688 ASSERT_EQ(net::OK
, CreateEntry(key2
, &entry2
));
2689 ASSERT_EQ(net::OK
, CreateEntry(key3
, &entry3
));
2691 const int kBufSize
= 20000;
2692 scoped_refptr
<net::IOBuffer
> buf(new net::IOBuffer(kBufSize
));
2693 memset(buf
->data(), 0, kBufSize
);
2694 EXPECT_EQ(100, WriteData(entry2
, 0, 0, buf
.get(), 100, false));
2695 EXPECT_EQ(kBufSize
, WriteData(entry3
, 0, 0, buf
.get(), kBufSize
, false));
2697 // This line should disable the cache but not delete it.
2698 EXPECT_NE(net::OK
, OpenNextEntry(&iter
, &entry4
));
2699 EXPECT_EQ(0, cache_
->GetEntryCount());
2701 EXPECT_NE(net::OK
, CreateEntry("cache is disabled", &entry4
));
2703 EXPECT_EQ(100, ReadData(entry2
, 0, 0, buf
.get(), 100));
2704 EXPECT_EQ(100, WriteData(entry2
, 0, 0, buf
.get(), 100, false));
2705 EXPECT_EQ(100, WriteData(entry2
, 1, 0, buf
.get(), 100, false));
2707 EXPECT_EQ(kBufSize
, ReadData(entry3
, 0, 0, buf
.get(), kBufSize
));
2708 EXPECT_EQ(kBufSize
, WriteData(entry3
, 0, 0, buf
.get(), kBufSize
, false));
2709 EXPECT_EQ(kBufSize
, WriteData(entry3
, 1, 0, buf
.get(), kBufSize
, false));
2711 std::string key
= entry2
->GetKey();
2712 EXPECT_EQ(sizeof(key2
) - 1, key
.size());
2713 key
= entry3
->GetKey();
2714 EXPECT_EQ(sizeof(key3
) - 1, key
.size());
2719 FlushQueueForTest(); // Flushing the Close posts a task to restart the cache.
2720 FlushQueueForTest(); // This one actually allows that task to complete.
2722 EXPECT_EQ(0, cache_
->GetEntryCount());
2725 TEST_F(DiskCacheBackendTest
, DisableSuccess4
) {
2726 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2727 DisableFirstCleanup();
2732 TEST_F(DiskCacheBackendTest
, NewEvictionDisableSuccess4
) {
2733 ASSERT_TRUE(CopyTestCache("bad_rankings"));
2734 DisableFirstCleanup();
2740 TEST_F(DiskCacheTest
, Backend_UsageStatsTimer
) {
2741 MessageLoopHelper helper
;
2743 ASSERT_TRUE(CleanupCacheDir());
2744 scoped_ptr
<disk_cache::BackendImpl
> cache
;
2745 cache
.reset(new disk_cache::BackendImpl(
2746 cache_path_
, base::MessageLoopProxy::current().get(), NULL
));
2747 ASSERT_TRUE(NULL
!= cache
.get());
2748 cache
->SetUnitTestMode();
2749 ASSERT_EQ(net::OK
, cache
->SyncInit());
2751 // Wait for a callback that never comes... about 2 secs :). The message loop
2752 // has to run to allow invocation of the usage timer.
2753 helper
.WaitUntilCacheIoFinished(1);
2756 TEST_F(DiskCacheBackendTest
, TimerNotCreated
) {
2757 ASSERT_TRUE(CopyTestCache("wrong_version"));
2759 scoped_ptr
<disk_cache::BackendImpl
> cache
;
2760 cache
.reset(new disk_cache::BackendImpl(
2761 cache_path_
, base::MessageLoopProxy::current().get(), NULL
));
2762 ASSERT_TRUE(NULL
!= cache
.get());
2763 cache
->SetUnitTestMode();
2764 ASSERT_NE(net::OK
, cache
->SyncInit());
2766 ASSERT_TRUE(NULL
== cache
->GetTimerForTest());
2768 DisableIntegrityCheck();
2771 TEST_F(DiskCacheBackendTest
, Backend_UsageStats
) {
2773 disk_cache::Entry
* entry
;
2774 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
2776 FlushQueueForTest();
2778 disk_cache::StatsItems stats
;
2779 cache_
->GetStats(&stats
);
2780 EXPECT_FALSE(stats
.empty());
2782 disk_cache::StatsItems::value_type
hits("Create hit", "0x1");
2783 EXPECT_EQ(1, std::count(stats
.begin(), stats
.end(), hits
));
2787 // Now open the cache and verify that the stats are still there.
2788 DisableFirstCleanup();
2790 EXPECT_EQ(1, cache_
->GetEntryCount());
2793 cache_
->GetStats(&stats
);
2794 EXPECT_FALSE(stats
.empty());
2796 EXPECT_EQ(1, std::count(stats
.begin(), stats
.end(), hits
));
2799 void DiskCacheBackendTest::BackendDoomAll() {
2802 disk_cache::Entry
*entry1
, *entry2
;
2803 ASSERT_EQ(net::OK
, CreateEntry("first", &entry1
));
2804 ASSERT_EQ(net::OK
, CreateEntry("second", &entry2
));
2808 ASSERT_EQ(net::OK
, CreateEntry("third", &entry1
));
2809 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry2
));
2811 ASSERT_EQ(4, cache_
->GetEntryCount());
2812 EXPECT_EQ(net::OK
, DoomAllEntries());
2813 ASSERT_EQ(0, cache_
->GetEntryCount());
2815 // We should stop posting tasks at some point (if we post any).
2816 base::MessageLoop::current()->RunUntilIdle();
2818 disk_cache::Entry
*entry3
, *entry4
;
2819 EXPECT_NE(net::OK
, OpenEntry("third", &entry3
));
2820 ASSERT_EQ(net::OK
, CreateEntry("third", &entry3
));
2821 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry4
));
2823 EXPECT_EQ(net::OK
, DoomAllEntries());
2824 ASSERT_EQ(0, cache_
->GetEntryCount());
2828 entry3
->Doom(); // The entry should be already doomed, but this must work.
2832 // Now try with all references released.
2833 ASSERT_EQ(net::OK
, CreateEntry("third", &entry1
));
2834 ASSERT_EQ(net::OK
, CreateEntry("fourth", &entry2
));
2838 ASSERT_EQ(2, cache_
->GetEntryCount());
2839 EXPECT_EQ(net::OK
, DoomAllEntries());
2840 ASSERT_EQ(0, cache_
->GetEntryCount());
2842 EXPECT_EQ(net::OK
, DoomAllEntries());
2845 TEST_F(DiskCacheBackendTest
, DoomAll
) {
2849 TEST_F(DiskCacheBackendTest
, NewEvictionDoomAll
) {
2854 TEST_F(DiskCacheBackendTest
, MemoryOnlyDoomAll
) {
2855 SetMemoryOnlyMode();
2859 TEST_F(DiskCacheBackendTest
, AppCacheOnlyDoomAll
) {
2860 SetCacheType(net::APP_CACHE
);
2864 TEST_F(DiskCacheBackendTest
, ShaderCacheOnlyDoomAll
) {
2865 SetCacheType(net::SHADER_CACHE
);
2869 // If the index size changes when we doom the cache, we should not crash.
2870 void DiskCacheBackendTest::BackendDoomAll2() {
2871 EXPECT_EQ(2, cache_
->GetEntryCount());
2872 EXPECT_EQ(net::OK
, DoomAllEntries());
2874 disk_cache::Entry
* entry
;
2875 ASSERT_EQ(net::OK
, CreateEntry("Something new", &entry
));
2878 EXPECT_EQ(1, cache_
->GetEntryCount());
2881 TEST_F(DiskCacheBackendTest
, DoomAll2
) {
2882 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2883 DisableFirstCleanup();
2884 SetMaxSize(20 * 1024 * 1024);
2889 TEST_F(DiskCacheBackendTest
, NewEvictionDoomAll2
) {
2890 ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2891 DisableFirstCleanup();
2892 SetMaxSize(20 * 1024 * 1024);
2898 // We should be able to create the same entry on multiple simultaneous instances
2900 TEST_F(DiskCacheTest
, MultipleInstances
) {
2901 base::ScopedTempDir store1
, store2
;
2902 ASSERT_TRUE(store1
.CreateUniqueTempDir());
2903 ASSERT_TRUE(store2
.CreateUniqueTempDir());
2905 base::Thread
cache_thread("CacheThread");
2906 ASSERT_TRUE(cache_thread
.StartWithOptions(
2907 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
2908 net::TestCompletionCallback cb
;
2910 const int kNumberOfCaches
= 2;
2911 scoped_ptr
<disk_cache::Backend
> cache
[kNumberOfCaches
];
2914 disk_cache::CreateCacheBackend(net::DISK_CACHE
,
2915 net::CACHE_BACKEND_DEFAULT
,
2919 cache_thread
.message_loop_proxy().get(),
2923 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
2924 rv
= disk_cache::CreateCacheBackend(net::MEDIA_CACHE
,
2925 net::CACHE_BACKEND_DEFAULT
,
2929 cache_thread
.message_loop_proxy().get(),
2933 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
2935 ASSERT_TRUE(cache
[0].get() != NULL
&& cache
[1].get() != NULL
);
2937 std::string
key("the first key");
2938 disk_cache::Entry
* entry
;
2939 for (int i
= 0; i
< kNumberOfCaches
; i
++) {
2940 rv
= cache
[i
]->CreateEntry(key
, &entry
, cb
.callback());
2941 ASSERT_EQ(net::OK
, cb
.GetResult(rv
));
2946 // Test the six regions of the curve that determines the max cache size.
2947 TEST_F(DiskCacheTest
, AutomaticMaxSize
) {
2948 using disk_cache::kDefaultCacheSize
;
2949 int64 large_size
= kDefaultCacheSize
;
2951 // Region 1: expected = available * 0.8
2952 EXPECT_EQ((kDefaultCacheSize
- 1) * 8 / 10,
2953 disk_cache::PreferredCacheSize(large_size
- 1));
2954 EXPECT_EQ(kDefaultCacheSize
* 8 / 10,
2955 disk_cache::PreferredCacheSize(large_size
));
2956 EXPECT_EQ(kDefaultCacheSize
- 1,
2957 disk_cache::PreferredCacheSize(large_size
* 10 / 8 - 1));
2959 // Region 2: expected = default_size
2960 EXPECT_EQ(kDefaultCacheSize
,
2961 disk_cache::PreferredCacheSize(large_size
* 10 / 8));
2962 EXPECT_EQ(kDefaultCacheSize
,
2963 disk_cache::PreferredCacheSize(large_size
* 10 - 1));
2965 // Region 3: expected = available * 0.1
2966 EXPECT_EQ(kDefaultCacheSize
,
2967 disk_cache::PreferredCacheSize(large_size
* 10));
2968 EXPECT_EQ((kDefaultCacheSize
* 25 - 1) / 10,
2969 disk_cache::PreferredCacheSize(large_size
* 25 - 1));
2971 // Region 4: expected = default_size * 2.5
2972 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2973 disk_cache::PreferredCacheSize(large_size
* 25));
2974 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2975 disk_cache::PreferredCacheSize(large_size
* 100 - 1));
2976 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2977 disk_cache::PreferredCacheSize(large_size
* 100));
2978 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2979 disk_cache::PreferredCacheSize(large_size
* 250 - 1));
2981 // Region 5: expected = available * 0.1
2982 int64 largest_size
= kDefaultCacheSize
* 4;
2983 EXPECT_EQ(kDefaultCacheSize
* 25 / 10,
2984 disk_cache::PreferredCacheSize(large_size
* 250));
2985 EXPECT_EQ(largest_size
- 1,
2986 disk_cache::PreferredCacheSize(largest_size
* 100 - 1));
2988 // Region 6: expected = largest possible size
2989 EXPECT_EQ(largest_size
,
2990 disk_cache::PreferredCacheSize(largest_size
* 100));
2991 EXPECT_EQ(largest_size
,
2992 disk_cache::PreferredCacheSize(largest_size
* 10000));
2995 // Tests that we can "migrate" a running instance from one experiment group to
2997 TEST_F(DiskCacheBackendTest
, Histograms
) {
2999 disk_cache::BackendImpl
* backend_
= cache_impl_
; // Needed be the macro.
3001 for (int i
= 1; i
< 3; i
++) {
3002 CACHE_UMA(HOURS
, "FillupTime", i
, 28);
3006 // Make sure that we keep the total memory used by the internal buffers under
3008 TEST_F(DiskCacheBackendTest
, TotalBuffersSize1
) {
3010 std::string
key("the first key");
3011 disk_cache::Entry
* entry
;
3012 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3014 const int kSize
= 200;
3015 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3016 CacheTestFillBuffer(buffer
->data(), kSize
, true);
3018 for (int i
= 0; i
< 10; i
++) {
3020 // Allocate 2MB for this entry.
3021 EXPECT_EQ(kSize
, WriteData(entry
, 0, 0, buffer
.get(), kSize
, true));
3022 EXPECT_EQ(kSize
, WriteData(entry
, 1, 0, buffer
.get(), kSize
, true));
3024 WriteData(entry
, 0, 1024 * 1024, buffer
.get(), kSize
, false));
3026 WriteData(entry
, 1, 1024 * 1024, buffer
.get(), kSize
, false));
3028 // Delete one of the buffers and truncate the other.
3029 EXPECT_EQ(0, WriteData(entry
, 0, 0, buffer
.get(), 0, true));
3030 EXPECT_EQ(0, WriteData(entry
, 1, 10, buffer
.get(), 0, true));
3032 // Delete the second buffer, writing 10 bytes to disk.
3034 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3038 EXPECT_EQ(0, cache_impl_
->GetTotalBuffersSize());
3041 // This test assumes at least 150MB of system memory.
3042 TEST_F(DiskCacheBackendTest
, TotalBuffersSize2
) {
3045 const int kOneMB
= 1024 * 1024;
3046 EXPECT_TRUE(cache_impl_
->IsAllocAllowed(0, kOneMB
));
3047 EXPECT_EQ(kOneMB
, cache_impl_
->GetTotalBuffersSize());
3049 EXPECT_TRUE(cache_impl_
->IsAllocAllowed(0, kOneMB
));
3050 EXPECT_EQ(kOneMB
* 2, cache_impl_
->GetTotalBuffersSize());
3052 EXPECT_TRUE(cache_impl_
->IsAllocAllowed(0, kOneMB
));
3053 EXPECT_EQ(kOneMB
* 3, cache_impl_
->GetTotalBuffersSize());
3055 cache_impl_
->BufferDeleted(kOneMB
);
3056 EXPECT_EQ(kOneMB
* 2, cache_impl_
->GetTotalBuffersSize());
3058 // Check the upper limit.
3059 EXPECT_FALSE(cache_impl_
->IsAllocAllowed(0, 30 * kOneMB
));
3061 for (int i
= 0; i
< 30; i
++)
3062 cache_impl_
->IsAllocAllowed(0, kOneMB
); // Ignore the result.
3064 EXPECT_FALSE(cache_impl_
->IsAllocAllowed(0, kOneMB
));
3067 // Tests that sharing of external files works and we are able to delete the
3068 // files when we need to.
3069 TEST_F(DiskCacheBackendTest
, FileSharing
) {
3072 disk_cache::Addr
address(0x80000001);
3073 ASSERT_TRUE(cache_impl_
->CreateExternalFile(&address
));
3074 base::FilePath name
= cache_impl_
->GetFileName(address
);
3076 scoped_refptr
<disk_cache::File
> file(new disk_cache::File(false));
3080 DWORD sharing
= FILE_SHARE_READ
| FILE_SHARE_WRITE
;
3081 DWORD access
= GENERIC_READ
| GENERIC_WRITE
;
3082 base::win::ScopedHandle
file2(CreateFile(
3083 name
.value().c_str(), access
, sharing
, NULL
, OPEN_EXISTING
, 0, NULL
));
3084 EXPECT_FALSE(file2
.IsValid());
3086 sharing
|= FILE_SHARE_DELETE
;
3087 file2
.Set(CreateFile(name
.value().c_str(), access
, sharing
, NULL
,
3088 OPEN_EXISTING
, 0, NULL
));
3089 EXPECT_TRUE(file2
.IsValid());
3092 EXPECT_TRUE(base::DeleteFile(name
, false));
3094 // We should be able to use the file.
3095 const int kSize
= 200;
3096 char buffer1
[kSize
];
3097 char buffer2
[kSize
];
3098 memset(buffer1
, 't', kSize
);
3099 memset(buffer2
, 0, kSize
);
3100 EXPECT_TRUE(file
->Write(buffer1
, kSize
, 0));
3101 EXPECT_TRUE(file
->Read(buffer2
, kSize
, 0));
3102 EXPECT_EQ(0, memcmp(buffer1
, buffer2
, kSize
));
3104 EXPECT_TRUE(disk_cache::DeleteCacheFile(name
));
3107 TEST_F(DiskCacheBackendTest
, UpdateRankForExternalCacheHit
) {
3110 disk_cache::Entry
* entry
;
3112 for (int i
= 0; i
< 2; ++i
) {
3113 std::string key
= base::StringPrintf("key%d", i
);
3114 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3118 // Ping the oldest entry.
3119 cache_
->OnExternalCacheHit("key0");
3123 // Make sure the older key remains.
3124 EXPECT_EQ(1, cache_
->GetEntryCount());
3125 ASSERT_EQ(net::OK
, OpenEntry("key0", &entry
));
3129 TEST_F(DiskCacheBackendTest
, ShaderCacheUpdateRankForExternalCacheHit
) {
3130 SetCacheType(net::SHADER_CACHE
);
3133 disk_cache::Entry
* entry
;
3135 for (int i
= 0; i
< 2; ++i
) {
3136 std::string key
= base::StringPrintf("key%d", i
);
3137 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3141 // Ping the oldest entry.
3142 cache_
->OnExternalCacheHit("key0");
3146 // Make sure the older key remains.
3147 EXPECT_EQ(1, cache_
->GetEntryCount());
3148 ASSERT_EQ(net::OK
, OpenEntry("key0", &entry
));
3152 void DiskCacheBackendTest::TracingBackendBasics() {
3154 cache_
.reset(new disk_cache::TracingCacheBackend(cache_
.Pass()));
3156 EXPECT_EQ(net::DISK_CACHE
, cache_
->GetCacheType());
3157 if (!simple_cache_mode_
) {
3158 EXPECT_EQ(0, cache_
->GetEntryCount());
3161 net::TestCompletionCallback cb
;
3162 disk_cache::Entry
* entry
= NULL
;
3163 EXPECT_NE(net::OK
, OpenEntry("key", &entry
));
3164 EXPECT_TRUE(NULL
== entry
);
3166 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
3167 EXPECT_TRUE(NULL
!= entry
);
3169 disk_cache::Entry
* same_entry
= NULL
;
3170 ASSERT_EQ(net::OK
, OpenEntry("key", &same_entry
));
3171 EXPECT_TRUE(NULL
!= same_entry
);
3173 if (!simple_cache_mode_
) {
3174 EXPECT_EQ(1, cache_
->GetEntryCount());
3178 same_entry
->Close();
3182 TEST_F(DiskCacheBackendTest
, TracingBackendBasics
) {
3183 TracingBackendBasics();
3186 // The Simple Cache backend requires a few guarantees from the filesystem like
3187 // atomic renaming of recently open files. Those guarantees are not provided in
3188 // general on Windows.
3189 #if defined(OS_POSIX)
3191 TEST_F(DiskCacheBackendTest
, SimpleCacheShutdownWithPendingCreate
) {
3192 SetCacheType(net::APP_CACHE
);
3193 SetSimpleCacheMode();
3194 BackendShutdownWithPendingCreate(false);
3197 TEST_F(DiskCacheBackendTest
, SimpleCacheShutdownWithPendingFileIO
) {
3198 SetCacheType(net::APP_CACHE
);
3199 SetSimpleCacheMode();
3200 BackendShutdownWithPendingFileIO(false);
3203 TEST_F(DiskCacheBackendTest
, SimpleCacheBasics
) {
3204 SetSimpleCacheMode();
3208 TEST_F(DiskCacheBackendTest
, SimpleCacheAppCacheBasics
) {
3209 SetCacheType(net::APP_CACHE
);
3210 SetSimpleCacheMode();
3214 TEST_F(DiskCacheBackendTest
, SimpleCacheKeying
) {
3215 SetSimpleCacheMode();
3219 TEST_F(DiskCacheBackendTest
, SimpleCacheAppCacheKeying
) {
3220 SetSimpleCacheMode();
3221 SetCacheType(net::APP_CACHE
);
3225 TEST_F(DiskCacheBackendTest
, DISABLED_SimpleCacheSetSize
) {
3226 SetSimpleCacheMode();
3230 // MacOS has a default open file limit of 256 files, which is incompatible with
3231 // this simple cache test.
3232 #if defined(OS_MACOSX)
3233 #define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3235 #define SIMPLE_MAYBE_MACOS(TestName) TestName
3238 TEST_F(DiskCacheBackendTest
, SIMPLE_MAYBE_MACOS(SimpleCacheLoad
)) {
3239 SetMaxSize(0x100000);
3240 SetSimpleCacheMode();
3244 TEST_F(DiskCacheBackendTest
, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad
)) {
3245 SetCacheType(net::APP_CACHE
);
3246 SetSimpleCacheMode();
3247 SetMaxSize(0x100000);
3251 TEST_F(DiskCacheBackendTest
, SimpleDoomRecent
) {
3252 SetSimpleCacheMode();
3253 BackendDoomRecent();
3256 // crbug.com/330926, crbug.com/370677
3257 TEST_F(DiskCacheBackendTest
, DISABLED_SimpleDoomBetween
) {
3258 SetSimpleCacheMode();
3259 BackendDoomBetween();
3262 TEST_F(DiskCacheBackendTest
, SimpleCacheDoomAll
) {
3263 SetSimpleCacheMode();
3267 TEST_F(DiskCacheBackendTest
, SimpleCacheAppCacheOnlyDoomAll
) {
3268 SetCacheType(net::APP_CACHE
);
3269 SetSimpleCacheMode();
3273 TEST_F(DiskCacheBackendTest
, SimpleCacheTracingBackendBasics
) {
3274 SetSimpleCacheMode();
3275 TracingBackendBasics();
3276 // TODO(pasko): implement integrity checking on the Simple Backend.
3277 DisableIntegrityCheck();
3280 TEST_F(DiskCacheBackendTest
, SimpleCacheOpenMissingFile
) {
3281 SetSimpleCacheMode();
3284 const char* key
= "the first key";
3285 disk_cache::Entry
* entry
= NULL
;
3287 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3288 ASSERT_TRUE(entry
!= NULL
);
3292 // To make sure the file creation completed we need to call open again so that
3293 // we block until it actually created the files.
3294 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3295 ASSERT_TRUE(entry
!= NULL
);
3299 // Delete one of the files in the entry.
3300 base::FilePath to_delete_file
= cache_path_
.AppendASCII(
3301 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, 0));
3302 EXPECT_TRUE(base::PathExists(to_delete_file
));
3303 EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file
));
3305 // Failing to open the entry should delete the rest of these files.
3306 ASSERT_EQ(net::ERR_FAILED
, OpenEntry(key
, &entry
));
3308 // Confirm the rest of the files are gone.
3309 for (int i
= 1; i
< disk_cache::kSimpleEntryFileCount
; ++i
) {
3310 base::FilePath
should_be_gone_file(cache_path_
.AppendASCII(
3311 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, i
)));
3312 EXPECT_FALSE(base::PathExists(should_be_gone_file
));
3316 TEST_F(DiskCacheBackendTest
, SimpleCacheOpenBadFile
) {
3317 SetSimpleCacheMode();
3320 const char* key
= "the first key";
3321 disk_cache::Entry
* entry
= NULL
;
3323 ASSERT_EQ(net::OK
, CreateEntry(key
, &entry
));
3324 disk_cache::Entry
* null
= NULL
;
3325 ASSERT_NE(null
, entry
);
3329 // To make sure the file creation completed we need to call open again so that
3330 // we block until it actually created the files.
3331 ASSERT_EQ(net::OK
, OpenEntry(key
, &entry
));
3332 ASSERT_NE(null
, entry
);
3336 // Write an invalid header for stream 0 and stream 1.
3337 base::FilePath entry_file1_path
= cache_path_
.AppendASCII(
3338 disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key
, 0));
3340 disk_cache::SimpleFileHeader header
;
3341 header
.initial_magic_number
= GG_UINT64_C(0xbadf00d);
3343 implicit_cast
<int>(sizeof(header
)),
3344 base::WriteFile(entry_file1_path
, reinterpret_cast<char*>(&header
),
3346 ASSERT_EQ(net::ERR_FAILED
, OpenEntry(key
, &entry
));
3349 // Tests that the Simple Cache Backend fails to initialize with non-matching
3350 // file structure on disk.
3351 TEST_F(DiskCacheBackendTest
, SimpleCacheOverBlockfileCache
) {
3352 // Create a cache structure with the |BackendImpl|.
3354 disk_cache::Entry
* entry
;
3355 const int kSize
= 50;
3356 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3357 CacheTestFillBuffer(buffer
->data(), kSize
, false);
3358 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
3359 ASSERT_EQ(0, WriteData(entry
, 0, 0, buffer
.get(), 0, false));
3363 // Check that the |SimpleBackendImpl| does not favor this structure.
3364 base::Thread
cache_thread("CacheThread");
3365 ASSERT_TRUE(cache_thread
.StartWithOptions(
3366 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
3367 disk_cache::SimpleBackendImpl
* simple_cache
=
3368 new disk_cache::SimpleBackendImpl(cache_path_
,
3371 cache_thread
.message_loop_proxy().get(),
3373 net::TestCompletionCallback cb
;
3374 int rv
= simple_cache
->Init(cb
.callback());
3375 EXPECT_NE(net::OK
, cb
.GetResult(rv
));
3376 delete simple_cache
;
3377 DisableIntegrityCheck();
3380 // Tests that the |BackendImpl| refuses to initialize on top of the files
3381 // generated by the Simple Cache Backend.
3382 TEST_F(DiskCacheBackendTest
, BlockfileCacheOverSimpleCache
) {
3383 // Create a cache structure with the |SimpleBackendImpl|.
3384 SetSimpleCacheMode();
3386 disk_cache::Entry
* entry
;
3387 const int kSize
= 50;
3388 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3389 CacheTestFillBuffer(buffer
->data(), kSize
, false);
3390 ASSERT_EQ(net::OK
, CreateEntry("key", &entry
));
3391 ASSERT_EQ(0, WriteData(entry
, 0, 0, buffer
.get(), 0, false));
3395 // Check that the |BackendImpl| does not favor this structure.
3396 base::Thread
cache_thread("CacheThread");
3397 ASSERT_TRUE(cache_thread
.StartWithOptions(
3398 base::Thread::Options(base::MessageLoop::TYPE_IO
, 0)));
3399 disk_cache::BackendImpl
* cache
= new disk_cache::BackendImpl(
3400 cache_path_
, base::MessageLoopProxy::current().get(), NULL
);
3401 cache
->SetUnitTestMode();
3402 net::TestCompletionCallback cb
;
3403 int rv
= cache
->Init(cb
.callback());
3404 EXPECT_NE(net::OK
, cb
.GetResult(rv
));
3406 DisableIntegrityCheck();
3409 TEST_F(DiskCacheBackendTest
, SimpleCacheFixEnumerators
) {
3410 SetSimpleCacheMode();
3411 BackendFixEnumerators();
3414 // Tests basic functionality of the SimpleBackend implementation of the
3416 TEST_F(DiskCacheBackendTest
, SimpleCacheEnumerationBasics
) {
3417 SetSimpleCacheMode();
3419 std::set
<std::string
> key_pool
;
3420 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool
));
3422 // Check that enumeration returns all entries.
3423 std::set
<std::string
> keys_to_match(key_pool
);
3426 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter
, &keys_to_match
, &count
));
3427 cache_
->EndEnumeration(&iter
);
3428 EXPECT_EQ(key_pool
.size(), count
);
3429 EXPECT_TRUE(keys_to_match
.empty());
3431 // Check that opening entries does not affect enumeration.
3432 keys_to_match
= key_pool
;
3435 disk_cache::Entry
* entry_opened_before
;
3436 ASSERT_EQ(net::OK
, OpenEntry(*(key_pool
.begin()), &entry_opened_before
));
3437 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool
.size()/2,
3442 disk_cache::Entry
* entry_opened_middle
;
3444 OpenEntry(*(keys_to_match
.begin()), &entry_opened_middle
));
3445 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter
, &keys_to_match
, &count
));
3446 cache_
->EndEnumeration(&iter
);
3447 entry_opened_before
->Close();
3448 entry_opened_middle
->Close();
3450 EXPECT_EQ(key_pool
.size(), count
);
3451 EXPECT_TRUE(keys_to_match
.empty());
3454 // Tests that the enumerations are not affected by dooming an entry in the
3456 TEST_F(DiskCacheBackendTest
, SimpleCacheEnumerationWhileDoomed
) {
3457 SetSimpleCacheMode();
3459 std::set
<std::string
> key_pool
;
3460 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool
));
3462 // Check that enumeration returns all entries but the doomed one.
3463 std::set
<std::string
> keys_to_match(key_pool
);
3466 ASSERT_TRUE(EnumerateAndMatchKeys(key_pool
.size()/2,
3471 std::string key_to_delete
= *(keys_to_match
.begin());
3472 DoomEntry(key_to_delete
);
3473 keys_to_match
.erase(key_to_delete
);
3474 key_pool
.erase(key_to_delete
);
3475 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter
, &keys_to_match
, &count
));
3476 cache_
->EndEnumeration(&iter
);
3478 EXPECT_EQ(key_pool
.size(), count
);
3479 EXPECT_TRUE(keys_to_match
.empty());
3482 // Tests that enumerations are not affected by corrupt files.
3483 TEST_F(DiskCacheBackendTest
, SimpleCacheEnumerationCorruption
) {
3484 SetSimpleCacheMode();
3486 std::set
<std::string
> key_pool
;
3487 ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool
));
3489 // Create a corrupt entry. The write/read sequence ensures that the entry will
3490 // have been created before corrupting the platform files, in the case of
3491 // optimistic operations.
3492 const std::string key
= "the key";
3493 disk_cache::Entry
* corrupted_entry
;
3495 ASSERT_EQ(net::OK
, CreateEntry(key
, &corrupted_entry
));
3496 ASSERT_TRUE(corrupted_entry
);
3497 const int kSize
= 50;
3498 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(kSize
));
3499 CacheTestFillBuffer(buffer
->data(), kSize
, false);
3501 WriteData(corrupted_entry
, 0, 0, buffer
.get(), kSize
, false));
3502 ASSERT_EQ(kSize
, ReadData(corrupted_entry
, 0, 0, buffer
.get(), kSize
));
3503 corrupted_entry
->Close();
3505 EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3507 EXPECT_EQ(key_pool
.size() + 1,
3508 implicit_cast
<size_t>(cache_
->GetEntryCount()));
3510 // Check that enumeration returns all entries but the corrupt one.
3511 std::set
<std::string
> keys_to_match(key_pool
);
3514 ASSERT_TRUE(EnumerateAndMatchKeys(-1, &iter
, &keys_to_match
, &count
));
3515 cache_
->EndEnumeration(&iter
);
3517 EXPECT_EQ(key_pool
.size(), count
);
3518 EXPECT_TRUE(keys_to_match
.empty());
3521 #endif // defined(OS_POSIX)