1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/http/mock_http_cache.h"
8 #include "base/message_loop/message_loop.h"
9 #include "net/base/completion_callback.h"
10 #include "net/base/net_errors.h"
11 #include "testing/gtest/include/gtest/gtest.h"
17 // We can override the test mode for a given operation by setting this global
21 int GetTestModeForEntry(const std::string
& key
) {
22 // 'key' is prefixed with an identifier if it corresponds to a cached POST.
23 // Skip past that to locate the actual URL.
25 // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an
26 // URL corresponding to a registered MockTransaction. It would be good to
27 // have another way to access the test_mode.
29 if (isdigit(key
[0])) {
30 size_t slash
= key
.find('/');
31 DCHECK(slash
!= std::string::npos
);
32 url
= GURL(key
.substr(slash
+ 1));
36 const MockTransaction
* t
= FindMockTransaction(url
);
41 void CallbackForwader(const CompletionCallback
& callback
, int result
) {
47 //-----------------------------------------------------------------------------
49 struct MockDiskEntry::CallbackInfo
{
50 scoped_refptr
<MockDiskEntry
> entry
;
51 CompletionCallback callback
;
55 MockDiskEntry::MockDiskEntry(const std::string
& key
)
56 : key_(key
), doomed_(false), sparse_(false),
57 fail_requests_(false), fail_sparse_requests_(false), busy_(false),
59 test_mode_
= GetTestModeForEntry(key
);
62 void MockDiskEntry::Doom() {
66 void MockDiskEntry::Close() {
70 std::string
MockDiskEntry::GetKey() const {
74 base::Time
MockDiskEntry::GetLastUsed() const {
75 return base::Time::FromInternalValue(0);
78 base::Time
MockDiskEntry::GetLastModified() const {
79 return base::Time::FromInternalValue(0);
82 int32
MockDiskEntry::GetDataSize(int index
) const {
83 DCHECK(index
>= 0 && index
< kNumCacheEntryDataIndices
);
84 return static_cast<int32
>(data_
[index
].size());
87 int MockDiskEntry::ReadData(int index
,
91 const CompletionCallback
& callback
) {
92 DCHECK(index
>= 0 && index
< kNumCacheEntryDataIndices
);
93 DCHECK(!callback
.is_null());
96 return ERR_CACHE_READ_FAILURE
;
98 if (offset
< 0 || offset
> static_cast<int>(data_
[index
].size()))
100 if (static_cast<size_t>(offset
) == data_
[index
].size())
103 int num
= std::min(buf_len
, static_cast<int>(data_
[index
].size()) - offset
);
104 memcpy(buf
->data(), &data_
[index
][offset
], num
);
106 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_READ
)
109 CallbackLater(callback
, num
);
110 return ERR_IO_PENDING
;
113 int MockDiskEntry::WriteData(int index
,
117 const CompletionCallback
& callback
,
119 DCHECK(index
>= 0 && index
< kNumCacheEntryDataIndices
);
120 DCHECK(!callback
.is_null());
123 if (fail_requests_
) {
124 CallbackLater(callback
, ERR_CACHE_READ_FAILURE
);
125 return ERR_IO_PENDING
;
128 if (offset
< 0 || offset
> static_cast<int>(data_
[index
].size()))
131 data_
[index
].resize(offset
+ buf_len
);
133 memcpy(&data_
[index
][offset
], buf
->data(), buf_len
);
135 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_WRITE
)
138 CallbackLater(callback
, buf_len
);
139 return ERR_IO_PENDING
;
142 int MockDiskEntry::ReadSparseData(int64 offset
,
145 const CompletionCallback
& callback
) {
146 DCHECK(!callback
.is_null());
147 if (fail_sparse_requests_
)
148 return ERR_NOT_IMPLEMENTED
;
149 if (!sparse_
|| busy_
)
150 return ERR_CACHE_OPERATION_NOT_SUPPORTED
;
155 return ERR_CACHE_READ_FAILURE
;
157 DCHECK(offset
< kint32max
);
158 int real_offset
= static_cast<int>(offset
);
162 int num
= std::min(static_cast<int>(data_
[1].size()) - real_offset
,
164 memcpy(buf
->data(), &data_
[1][real_offset
], num
);
166 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_READ
)
169 CallbackLater(callback
, num
);
172 return ERR_IO_PENDING
;
175 int MockDiskEntry::WriteSparseData(int64 offset
,
178 const CompletionCallback
& callback
) {
179 DCHECK(!callback
.is_null());
180 if (fail_sparse_requests_
)
181 return ERR_NOT_IMPLEMENTED
;
183 return ERR_CACHE_OPERATION_NOT_SUPPORTED
;
186 return ERR_CACHE_OPERATION_NOT_SUPPORTED
;
195 return ERR_CACHE_READ_FAILURE
;
197 DCHECK(offset
< kint32max
);
198 int real_offset
= static_cast<int>(offset
);
200 if (static_cast<int>(data_
[1].size()) < real_offset
+ buf_len
)
201 data_
[1].resize(real_offset
+ buf_len
);
203 memcpy(&data_
[1][real_offset
], buf
->data(), buf_len
);
204 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_WRITE
)
207 CallbackLater(callback
, buf_len
);
208 return ERR_IO_PENDING
;
211 int MockDiskEntry::GetAvailableRange(int64 offset
,
214 const CompletionCallback
& callback
) {
215 DCHECK(!callback
.is_null());
216 if (!sparse_
|| busy_
)
217 return ERR_CACHE_OPERATION_NOT_SUPPORTED
;
222 return ERR_CACHE_READ_FAILURE
;
225 DCHECK(offset
< kint32max
);
226 int real_offset
= static_cast<int>(offset
);
227 if (static_cast<int>(data_
[1].size()) < real_offset
)
230 int num
= std::min(static_cast<int>(data_
[1].size()) - real_offset
, len
);
232 for (; num
> 0; num
--, real_offset
++) {
234 if (data_
[1][real_offset
]) {
236 *start
= real_offset
;
239 if (!data_
[1][real_offset
])
244 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_WRITE
)
247 CallbackLater(callback
, count
);
248 return ERR_IO_PENDING
;
251 bool MockDiskEntry::CouldBeSparse() const {
252 if (fail_sparse_requests_
)
257 void MockDiskEntry::CancelSparseIO() {
261 int MockDiskEntry::ReadyForSparseIO(const CompletionCallback
& callback
) {
262 if (fail_sparse_requests_
)
263 return ERR_NOT_IMPLEMENTED
;
268 DCHECK(!callback
.is_null());
269 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_READ
)
272 // The pending operation is already in the message loop (and hopefully
273 // already in the second pass). Just notify the caller that it finished.
274 CallbackLater(callback
, 0);
275 return ERR_IO_PENDING
;
278 // If |value| is true, don't deliver any completion callbacks until called
279 // again with |value| set to false. Caution: remember to enable callbacks
280 // again or all subsequent tests will fail.
282 void MockDiskEntry::IgnoreCallbacks(bool value
) {
283 if (ignore_callbacks_
== value
)
285 ignore_callbacks_
= value
;
287 StoreAndDeliverCallbacks(false, NULL
, CompletionCallback(), 0);
290 MockDiskEntry::~MockDiskEntry() {
293 // Unlike the callbacks for MockHttpTransaction, we want this one to run even
294 // if the consumer called Close on the MockDiskEntry. We achieve that by
295 // leveraging the fact that this class is reference counted.
296 void MockDiskEntry::CallbackLater(const CompletionCallback
& callback
,
298 if (ignore_callbacks_
)
299 return StoreAndDeliverCallbacks(true, this, callback
, result
);
300 base::MessageLoop::current()->PostTask(
302 base::Bind(&MockDiskEntry::RunCallback
, this, callback
, result
));
305 void MockDiskEntry::RunCallback(const CompletionCallback
& callback
,
308 // This is kind of hacky, but controlling the behavior of just this entry
309 // from a test is sort of complicated. What we really want to do is
310 // delay the delivery of a sparse IO operation a little more so that the
311 // request start operation (async) will finish without seeing the end of
312 // this operation (already posted to the message loop)... and without
313 // just delaying for n mS (which may cause trouble with slow bots). So
314 // we re-post this operation (all async sparse IO operations will take two
315 // trips through the message loop instead of one).
318 return CallbackLater(callback
, result
);
322 callback
.Run(result
);
325 // When |store| is true, stores the callback to be delivered later; otherwise
326 // delivers any callback previously stored.
328 void MockDiskEntry::StoreAndDeliverCallbacks(bool store
,
329 MockDiskEntry
* entry
,
330 const CompletionCallback
& callback
,
332 static std::vector
<CallbackInfo
> callback_list
;
334 CallbackInfo c
= {entry
, callback
, result
};
335 callback_list
.push_back(c
);
337 for (size_t i
= 0; i
< callback_list
.size(); i
++) {
338 CallbackInfo
& c
= callback_list
[i
];
339 c
.entry
->CallbackLater(c
.callback
, c
.result
);
341 callback_list
.clear();
346 bool MockDiskEntry::cancel_
= false;
347 bool MockDiskEntry::ignore_callbacks_
= false;
349 //-----------------------------------------------------------------------------
351 MockDiskCache::MockDiskCache()
352 : open_count_(0), create_count_(0), fail_requests_(false),
353 soft_failures_(false), double_create_check_(true),
354 fail_sparse_requests_(false) {
357 MockDiskCache::~MockDiskCache() {
361 CacheType
MockDiskCache::GetCacheType() const {
365 int32
MockDiskCache::GetEntryCount() const {
366 return static_cast<int32
>(entries_
.size());
369 int MockDiskCache::OpenEntry(const std::string
& key
,
370 disk_cache::Entry
** entry
,
371 const CompletionCallback
& callback
) {
372 DCHECK(!callback
.is_null());
374 return ERR_CACHE_OPEN_FAILURE
;
376 EntryMap::iterator it
= entries_
.find(key
);
377 if (it
== entries_
.end())
378 return ERR_CACHE_OPEN_FAILURE
;
380 if (it
->second
->is_doomed()) {
381 it
->second
->Release();
383 return ERR_CACHE_OPEN_FAILURE
;
388 it
->second
->AddRef();
392 it
->second
->set_fail_requests();
394 if (GetTestModeForEntry(key
) & TEST_MODE_SYNC_CACHE_START
)
397 CallbackLater(callback
, OK
);
398 return ERR_IO_PENDING
;
401 int MockDiskCache::CreateEntry(const std::string
& key
,
402 disk_cache::Entry
** entry
,
403 const CompletionCallback
& callback
) {
404 DCHECK(!callback
.is_null());
406 return ERR_CACHE_CREATE_FAILURE
;
408 EntryMap::iterator it
= entries_
.find(key
);
409 if (it
!= entries_
.end()) {
410 if (!it
->second
->is_doomed()) {
411 if (double_create_check_
)
414 return ERR_CACHE_CREATE_FAILURE
;
416 it
->second
->Release();
422 MockDiskEntry
* new_entry
= new MockDiskEntry(key
);
425 entries_
[key
] = new_entry
;
431 new_entry
->set_fail_requests();
433 if (fail_sparse_requests_
)
434 new_entry
->set_fail_sparse_requests();
436 if (GetTestModeForEntry(key
) & TEST_MODE_SYNC_CACHE_START
)
439 CallbackLater(callback
, OK
);
440 return ERR_IO_PENDING
;
443 int MockDiskCache::DoomEntry(const std::string
& key
,
444 const CompletionCallback
& callback
) {
445 DCHECK(!callback
.is_null());
446 EntryMap::iterator it
= entries_
.find(key
);
447 if (it
!= entries_
.end()) {
448 it
->second
->Release();
452 if (GetTestModeForEntry(key
) & TEST_MODE_SYNC_CACHE_START
)
455 CallbackLater(callback
, OK
);
456 return ERR_IO_PENDING
;
459 int MockDiskCache::DoomAllEntries(const CompletionCallback
& callback
) {
460 return ERR_NOT_IMPLEMENTED
;
463 int MockDiskCache::DoomEntriesBetween(const base::Time initial_time
,
464 const base::Time end_time
,
465 const CompletionCallback
& callback
) {
466 return ERR_NOT_IMPLEMENTED
;
469 int MockDiskCache::DoomEntriesSince(const base::Time initial_time
,
470 const CompletionCallback
& callback
) {
471 return ERR_NOT_IMPLEMENTED
;
474 class MockDiskCache::NotImplementedIterator
: public Iterator
{
476 int OpenNextEntry(disk_cache::Entry
** next_entry
,
477 const CompletionCallback
& callback
) override
{
478 return ERR_NOT_IMPLEMENTED
;
482 scoped_ptr
<disk_cache::Backend::Iterator
> MockDiskCache::CreateIterator() {
483 return scoped_ptr
<Iterator
>(new NotImplementedIterator());
486 void MockDiskCache::GetStats(base::StringPairs
* stats
) {
489 void MockDiskCache::OnExternalCacheHit(const std::string
& key
) {
492 void MockDiskCache::ReleaseAll() {
493 EntryMap::iterator it
= entries_
.begin();
494 for (; it
!= entries_
.end(); ++it
)
495 it
->second
->Release();
499 void MockDiskCache::CallbackLater(const CompletionCallback
& callback
,
501 base::MessageLoop::current()->PostTask(
502 FROM_HERE
, base::Bind(&CallbackForwader
, callback
, result
));
505 //-----------------------------------------------------------------------------
507 int MockBackendFactory::CreateBackend(NetLog
* net_log
,
508 scoped_ptr
<disk_cache::Backend
>* backend
,
509 const CompletionCallback
& callback
) {
510 backend
->reset(new MockDiskCache());
514 //-----------------------------------------------------------------------------
516 MockHttpCache::MockHttpCache()
517 : http_cache_(new MockNetworkLayer(), NULL
, new MockBackendFactory()) {
520 MockHttpCache::MockHttpCache(HttpCache::BackendFactory
* disk_cache_factory
)
521 : http_cache_(new MockNetworkLayer(), NULL
, disk_cache_factory
) {
524 disk_cache::Backend
* MockHttpCache::backend() {
525 TestCompletionCallback cb
;
526 disk_cache::Backend
* backend
;
527 int rv
= http_cache_
.GetBackend(&backend
, cb
.callback());
528 rv
= cb
.GetResult(rv
);
529 return (rv
== OK
) ? backend
: NULL
;
532 MockDiskCache
* MockHttpCache::disk_cache() {
533 return static_cast<MockDiskCache
*>(backend());
536 int MockHttpCache::CreateTransaction(scoped_ptr
<HttpTransaction
>* trans
) {
537 return http_cache_
.CreateTransaction(DEFAULT_PRIORITY
, trans
);
540 void MockHttpCache::BypassCacheLock() {
541 http_cache_
.BypassLockForTest();
544 void MockHttpCache::FailConditionalizations() {
545 http_cache_
.FailConditionalizationForTest();
548 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry
* disk_entry
,
549 HttpResponseInfo
* response_info
,
550 bool* response_truncated
) {
551 int size
= disk_entry
->GetDataSize(0);
553 TestCompletionCallback cb
;
554 scoped_refptr
<IOBuffer
> buffer(new IOBuffer(size
));
555 int rv
= disk_entry
->ReadData(0, 0, buffer
.get(), size
, cb
.callback());
556 rv
= cb
.GetResult(rv
);
559 return HttpCache::ParseResponseInfo(buffer
->data(), size
, response_info
,
563 bool MockHttpCache::WriteResponseInfo(disk_cache::Entry
* disk_entry
,
564 const HttpResponseInfo
* response_info
,
565 bool skip_transient_headers
,
566 bool response_truncated
) {
568 response_info
->Persist(
569 &pickle
, skip_transient_headers
, response_truncated
);
571 TestCompletionCallback cb
;
572 scoped_refptr
<WrappedIOBuffer
> data(
573 new WrappedIOBuffer(reinterpret_cast<const char*>(pickle
.data())));
574 int len
= static_cast<int>(pickle
.size());
576 int rv
= disk_entry
->WriteData(0, 0, data
.get(), len
, cb
.callback(), true);
577 rv
= cb
.GetResult(rv
);
581 bool MockHttpCache::OpenBackendEntry(const std::string
& key
,
582 disk_cache::Entry
** entry
) {
583 TestCompletionCallback cb
;
584 int rv
= backend()->OpenEntry(key
, entry
, cb
.callback());
585 return (cb
.GetResult(rv
) == OK
);
588 bool MockHttpCache::CreateBackendEntry(const std::string
& key
,
589 disk_cache::Entry
** entry
,
591 TestCompletionCallback cb
;
592 int rv
= backend()->CreateEntry(key
, entry
, cb
.callback());
593 return (cb
.GetResult(rv
) == OK
);
597 int MockHttpCache::GetTestMode(int test_mode
) {
605 void MockHttpCache::SetTestMode(int test_mode
) {
606 g_test_mode
= test_mode
;
609 //-----------------------------------------------------------------------------
611 int MockDiskCacheNoCB::CreateEntry(const std::string
& key
,
612 disk_cache::Entry
** entry
,
613 const CompletionCallback
& callback
) {
614 return ERR_IO_PENDING
;
617 //-----------------------------------------------------------------------------
619 int MockBackendNoCbFactory::CreateBackend(
621 scoped_ptr
<disk_cache::Backend
>* backend
,
622 const CompletionCallback
& callback
) {
623 backend
->reset(new MockDiskCacheNoCB());
627 //-----------------------------------------------------------------------------
629 MockBlockingBackendFactory::MockBlockingBackendFactory()
635 MockBlockingBackendFactory::~MockBlockingBackendFactory() {
638 int MockBlockingBackendFactory::CreateBackend(
640 scoped_ptr
<disk_cache::Backend
>* backend
,
641 const CompletionCallback
& callback
) {
644 backend
->reset(new MockDiskCache());
649 callback_
= callback
;
650 return ERR_IO_PENDING
;
653 void MockBlockingBackendFactory::FinishCreation() {
655 if (!callback_
.is_null()) {
657 backend_
->reset(new MockDiskCache());
658 CompletionCallback cb
= callback_
;
660 cb
.Run(Result()); // This object can be deleted here.