1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/http/mock_http_cache.h"
8 #include "base/location.h"
9 #include "base/single_thread_task_runner.h"
10 #include "base/thread_task_runner_handle.h"
11 #include "net/base/completion_callback.h"
12 #include "net/base/net_errors.h"
13 #include "testing/gtest/include/gtest/gtest.h"
19 // We can override the test mode for a given operation by setting this global
23 int GetTestModeForEntry(const std::string
& key
) {
24 // 'key' is prefixed with an identifier if it corresponds to a cached POST.
25 // Skip past that to locate the actual URL.
27 // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an
28 // URL corresponding to a registered MockTransaction. It would be good to
29 // have another way to access the test_mode.
31 if (isdigit(key
[0])) {
32 size_t slash
= key
.find('/');
33 DCHECK(slash
!= std::string::npos
);
34 url
= GURL(key
.substr(slash
+ 1));
38 const MockTransaction
* t
= FindMockTransaction(url
);
43 void CallbackForwader(const CompletionCallback
& callback
, int result
) {
49 //-----------------------------------------------------------------------------
51 struct MockDiskEntry::CallbackInfo
{
52 scoped_refptr
<MockDiskEntry
> entry
;
53 CompletionCallback callback
;
57 MockDiskEntry::MockDiskEntry(const std::string
& key
)
61 fail_requests_(false),
62 fail_sparse_requests_(false),
66 test_mode_
= GetTestModeForEntry(key
);
69 void MockDiskEntry::Doom() {
73 void MockDiskEntry::Close() {
77 std::string
MockDiskEntry::GetKey() const {
81 base::Time
MockDiskEntry::GetLastUsed() const {
82 return base::Time::FromInternalValue(0);
85 base::Time
MockDiskEntry::GetLastModified() const {
86 return base::Time::FromInternalValue(0);
89 int32
MockDiskEntry::GetDataSize(int index
) const {
90 DCHECK(index
>= 0 && index
< kNumCacheEntryDataIndices
);
91 return static_cast<int32
>(data_
[index
].size());
94 int MockDiskEntry::ReadData(int index
,
98 const CompletionCallback
& callback
) {
99 DCHECK(index
>= 0 && index
< kNumCacheEntryDataIndices
);
100 DCHECK(!callback
.is_null());
103 return ERR_CACHE_READ_FAILURE
;
105 if (offset
< 0 || offset
> static_cast<int>(data_
[index
].size()))
107 if (static_cast<size_t>(offset
) == data_
[index
].size())
110 int num
= std::min(buf_len
, static_cast<int>(data_
[index
].size()) - offset
);
111 memcpy(buf
->data(), &data_
[index
][offset
], num
);
113 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_READ
)
116 CallbackLater(callback
, num
);
117 return ERR_IO_PENDING
;
120 int MockDiskEntry::WriteData(int index
,
124 const CompletionCallback
& callback
,
126 DCHECK(index
>= 0 && index
< kNumCacheEntryDataIndices
);
127 DCHECK(!callback
.is_null());
130 if (fail_requests_
) {
131 CallbackLater(callback
, ERR_CACHE_READ_FAILURE
);
132 return ERR_IO_PENDING
;
135 if (offset
< 0 || offset
> static_cast<int>(data_
[index
].size()))
138 data_
[index
].resize(offset
+ buf_len
);
140 memcpy(&data_
[index
][offset
], buf
->data(), buf_len
);
142 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_WRITE
)
145 CallbackLater(callback
, buf_len
);
146 return ERR_IO_PENDING
;
149 int MockDiskEntry::ReadSparseData(int64 offset
,
152 const CompletionCallback
& callback
) {
153 DCHECK(!callback
.is_null());
154 if (fail_sparse_requests_
)
155 return ERR_NOT_IMPLEMENTED
;
156 if (!sparse_
|| busy_
|| cancel_
)
157 return ERR_CACHE_OPERATION_NOT_SUPPORTED
;
162 return ERR_CACHE_READ_FAILURE
;
164 DCHECK(offset
< kint32max
);
165 int real_offset
= static_cast<int>(offset
);
169 int num
= std::min(static_cast<int>(data_
[1].size()) - real_offset
,
171 memcpy(buf
->data(), &data_
[1][real_offset
], num
);
173 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_READ
)
176 CallbackLater(callback
, num
);
179 return ERR_IO_PENDING
;
182 int MockDiskEntry::WriteSparseData(int64 offset
,
185 const CompletionCallback
& callback
) {
186 DCHECK(!callback
.is_null());
187 if (fail_sparse_requests_
)
188 return ERR_NOT_IMPLEMENTED
;
189 if (busy_
|| cancel_
)
190 return ERR_CACHE_OPERATION_NOT_SUPPORTED
;
193 return ERR_CACHE_OPERATION_NOT_SUPPORTED
;
202 return ERR_CACHE_READ_FAILURE
;
204 DCHECK(offset
< kint32max
);
205 int real_offset
= static_cast<int>(offset
);
207 if (static_cast<int>(data_
[1].size()) < real_offset
+ buf_len
)
208 data_
[1].resize(real_offset
+ buf_len
);
210 memcpy(&data_
[1][real_offset
], buf
->data(), buf_len
);
211 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_WRITE
)
214 CallbackLater(callback
, buf_len
);
215 return ERR_IO_PENDING
;
218 int MockDiskEntry::GetAvailableRange(int64 offset
,
221 const CompletionCallback
& callback
) {
222 DCHECK(!callback
.is_null());
223 if (!sparse_
|| busy_
|| cancel_
)
224 return ERR_CACHE_OPERATION_NOT_SUPPORTED
;
229 return ERR_CACHE_READ_FAILURE
;
232 DCHECK(offset
< kint32max
);
233 int real_offset
= static_cast<int>(offset
);
234 if (static_cast<int>(data_
[1].size()) < real_offset
)
237 int num
= std::min(static_cast<int>(data_
[1].size()) - real_offset
, len
);
239 for (; num
> 0; num
--, real_offset
++) {
241 if (data_
[1][real_offset
]) {
243 *start
= real_offset
;
246 if (!data_
[1][real_offset
])
251 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_WRITE
)
254 CallbackLater(callback
, count
);
255 return ERR_IO_PENDING
;
258 bool MockDiskEntry::CouldBeSparse() const {
259 if (fail_sparse_requests_
)
264 void MockDiskEntry::CancelSparseIO() {
268 int MockDiskEntry::ReadyForSparseIO(const CompletionCallback
& callback
) {
269 if (fail_sparse_requests_
)
270 return ERR_NOT_IMPLEMENTED
;
275 DCHECK(!callback
.is_null());
276 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_READ
)
279 // The pending operation is already in the message loop (and hopefully
280 // already in the second pass). Just notify the caller that it finished.
281 CallbackLater(callback
, 0);
282 return ERR_IO_PENDING
;
285 // If |value| is true, don't deliver any completion callbacks until called
286 // again with |value| set to false. Caution: remember to enable callbacks
287 // again or all subsequent tests will fail.
289 void MockDiskEntry::IgnoreCallbacks(bool value
) {
290 if (ignore_callbacks_
== value
)
292 ignore_callbacks_
= value
;
294 StoreAndDeliverCallbacks(false, NULL
, CompletionCallback(), 0);
297 MockDiskEntry::~MockDiskEntry() {
300 // Unlike the callbacks for MockHttpTransaction, we want this one to run even
301 // if the consumer called Close on the MockDiskEntry. We achieve that by
302 // leveraging the fact that this class is reference counted.
303 void MockDiskEntry::CallbackLater(const CompletionCallback
& callback
,
305 if (ignore_callbacks_
)
306 return StoreAndDeliverCallbacks(true, this, callback
, result
);
307 base::ThreadTaskRunnerHandle::Get()->PostTask(
309 base::Bind(&MockDiskEntry::RunCallback
, this, callback
, result
));
312 void MockDiskEntry::RunCallback(const CompletionCallback
& callback
,
315 // This is kind of hacky, but controlling the behavior of just this entry
316 // from a test is sort of complicated. What we really want to do is
317 // delay the delivery of a sparse IO operation a little more so that the
318 // request start operation (async) will finish without seeing the end of
319 // this operation (already posted to the message loop)... and without
320 // just delaying for n mS (which may cause trouble with slow bots). So
321 // we re-post this operation (all async sparse IO operations will take two
322 // trips through the message loop instead of one).
325 return CallbackLater(callback
, result
);
329 callback
.Run(result
);
332 // When |store| is true, stores the callback to be delivered later; otherwise
333 // delivers any callback previously stored.
335 void MockDiskEntry::StoreAndDeliverCallbacks(bool store
,
336 MockDiskEntry
* entry
,
337 const CompletionCallback
& callback
,
339 static std::vector
<CallbackInfo
> callback_list
;
341 CallbackInfo c
= {entry
, callback
, result
};
342 callback_list
.push_back(c
);
344 for (size_t i
= 0; i
< callback_list
.size(); i
++) {
345 CallbackInfo
& c
= callback_list
[i
];
346 c
.entry
->CallbackLater(c
.callback
, c
.result
);
348 callback_list
.clear();
353 bool MockDiskEntry::ignore_callbacks_
= false;
355 //-----------------------------------------------------------------------------
357 MockDiskCache::MockDiskCache()
358 : open_count_(0), create_count_(0), fail_requests_(false),
359 soft_failures_(false), double_create_check_(true),
360 fail_sparse_requests_(false) {
363 MockDiskCache::~MockDiskCache() {
367 CacheType
MockDiskCache::GetCacheType() const {
371 int32
MockDiskCache::GetEntryCount() const {
372 return static_cast<int32
>(entries_
.size());
375 int MockDiskCache::OpenEntry(const std::string
& key
,
376 disk_cache::Entry
** entry
,
377 const CompletionCallback
& callback
) {
378 DCHECK(!callback
.is_null());
380 return ERR_CACHE_OPEN_FAILURE
;
382 EntryMap::iterator it
= entries_
.find(key
);
383 if (it
== entries_
.end())
384 return ERR_CACHE_OPEN_FAILURE
;
386 if (it
->second
->is_doomed()) {
387 it
->second
->Release();
389 return ERR_CACHE_OPEN_FAILURE
;
394 it
->second
->AddRef();
398 it
->second
->set_fail_requests();
400 if (GetTestModeForEntry(key
) & TEST_MODE_SYNC_CACHE_START
)
403 CallbackLater(callback
, OK
);
404 return ERR_IO_PENDING
;
407 int MockDiskCache::CreateEntry(const std::string
& key
,
408 disk_cache::Entry
** entry
,
409 const CompletionCallback
& callback
) {
410 DCHECK(!callback
.is_null());
412 return ERR_CACHE_CREATE_FAILURE
;
414 EntryMap::iterator it
= entries_
.find(key
);
415 if (it
!= entries_
.end()) {
416 if (!it
->second
->is_doomed()) {
417 if (double_create_check_
)
420 return ERR_CACHE_CREATE_FAILURE
;
422 it
->second
->Release();
428 MockDiskEntry
* new_entry
= new MockDiskEntry(key
);
431 entries_
[key
] = new_entry
;
437 new_entry
->set_fail_requests();
439 if (fail_sparse_requests_
)
440 new_entry
->set_fail_sparse_requests();
442 if (GetTestModeForEntry(key
) & TEST_MODE_SYNC_CACHE_START
)
445 CallbackLater(callback
, OK
);
446 return ERR_IO_PENDING
;
449 int MockDiskCache::DoomEntry(const std::string
& key
,
450 const CompletionCallback
& callback
) {
451 DCHECK(!callback
.is_null());
452 EntryMap::iterator it
= entries_
.find(key
);
453 if (it
!= entries_
.end()) {
454 it
->second
->Release();
458 if (GetTestModeForEntry(key
) & TEST_MODE_SYNC_CACHE_START
)
461 CallbackLater(callback
, OK
);
462 return ERR_IO_PENDING
;
465 int MockDiskCache::DoomAllEntries(const CompletionCallback
& callback
) {
466 return ERR_NOT_IMPLEMENTED
;
469 int MockDiskCache::DoomEntriesBetween(const base::Time initial_time
,
470 const base::Time end_time
,
471 const CompletionCallback
& callback
) {
472 return ERR_NOT_IMPLEMENTED
;
475 int MockDiskCache::DoomEntriesSince(const base::Time initial_time
,
476 const CompletionCallback
& callback
) {
477 return ERR_NOT_IMPLEMENTED
;
480 class MockDiskCache::NotImplementedIterator
: public Iterator
{
482 int OpenNextEntry(disk_cache::Entry
** next_entry
,
483 const CompletionCallback
& callback
) override
{
484 return ERR_NOT_IMPLEMENTED
;
488 scoped_ptr
<disk_cache::Backend::Iterator
> MockDiskCache::CreateIterator() {
489 return scoped_ptr
<Iterator
>(new NotImplementedIterator());
492 void MockDiskCache::GetStats(base::StringPairs
* stats
) {
495 void MockDiskCache::OnExternalCacheHit(const std::string
& key
) {
498 void MockDiskCache::ReleaseAll() {
499 EntryMap::iterator it
= entries_
.begin();
500 for (; it
!= entries_
.end(); ++it
)
501 it
->second
->Release();
505 void MockDiskCache::CallbackLater(const CompletionCallback
& callback
,
507 base::ThreadTaskRunnerHandle::Get()->PostTask(
508 FROM_HERE
, base::Bind(&CallbackForwader
, callback
, result
));
511 //-----------------------------------------------------------------------------
513 int MockBackendFactory::CreateBackend(NetLog
* net_log
,
514 scoped_ptr
<disk_cache::Backend
>* backend
,
515 const CompletionCallback
& callback
) {
516 backend
->reset(new MockDiskCache());
520 //-----------------------------------------------------------------------------
522 MockHttpCache::MockHttpCache()
523 : http_cache_(new MockNetworkLayer(), NULL
, new MockBackendFactory()) {
526 MockHttpCache::MockHttpCache(HttpCache::BackendFactory
* disk_cache_factory
)
527 : http_cache_(new MockNetworkLayer(), NULL
, disk_cache_factory
) {
530 disk_cache::Backend
* MockHttpCache::backend() {
531 TestCompletionCallback cb
;
532 disk_cache::Backend
* backend
;
533 int rv
= http_cache_
.GetBackend(&backend
, cb
.callback());
534 rv
= cb
.GetResult(rv
);
535 return (rv
== OK
) ? backend
: NULL
;
538 MockDiskCache
* MockHttpCache::disk_cache() {
539 return static_cast<MockDiskCache
*>(backend());
542 int MockHttpCache::CreateTransaction(scoped_ptr
<HttpTransaction
>* trans
) {
543 return http_cache_
.CreateTransaction(DEFAULT_PRIORITY
, trans
);
546 void MockHttpCache::BypassCacheLock() {
547 http_cache_
.BypassLockForTest();
550 void MockHttpCache::FailConditionalizations() {
551 http_cache_
.FailConditionalizationForTest();
554 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry
* disk_entry
,
555 HttpResponseInfo
* response_info
,
556 bool* response_truncated
) {
557 int size
= disk_entry
->GetDataSize(0);
559 TestCompletionCallback cb
;
560 scoped_refptr
<IOBuffer
> buffer(new IOBuffer(size
));
561 int rv
= disk_entry
->ReadData(0, 0, buffer
.get(), size
, cb
.callback());
562 rv
= cb
.GetResult(rv
);
565 return HttpCache::ParseResponseInfo(buffer
->data(), size
, response_info
,
569 bool MockHttpCache::WriteResponseInfo(disk_cache::Entry
* disk_entry
,
570 const HttpResponseInfo
* response_info
,
571 bool skip_transient_headers
,
572 bool response_truncated
) {
574 response_info
->Persist(
575 &pickle
, skip_transient_headers
, response_truncated
);
577 TestCompletionCallback cb
;
578 scoped_refptr
<WrappedIOBuffer
> data(
579 new WrappedIOBuffer(reinterpret_cast<const char*>(pickle
.data())));
580 int len
= static_cast<int>(pickle
.size());
582 int rv
= disk_entry
->WriteData(0, 0, data
.get(), len
, cb
.callback(), true);
583 rv
= cb
.GetResult(rv
);
587 bool MockHttpCache::OpenBackendEntry(const std::string
& key
,
588 disk_cache::Entry
** entry
) {
589 TestCompletionCallback cb
;
590 int rv
= backend()->OpenEntry(key
, entry
, cb
.callback());
591 return (cb
.GetResult(rv
) == OK
);
594 bool MockHttpCache::CreateBackendEntry(const std::string
& key
,
595 disk_cache::Entry
** entry
,
597 TestCompletionCallback cb
;
598 int rv
= backend()->CreateEntry(key
, entry
, cb
.callback());
599 return (cb
.GetResult(rv
) == OK
);
603 int MockHttpCache::GetTestMode(int test_mode
) {
611 void MockHttpCache::SetTestMode(int test_mode
) {
612 g_test_mode
= test_mode
;
615 //-----------------------------------------------------------------------------
617 int MockDiskCacheNoCB::CreateEntry(const std::string
& key
,
618 disk_cache::Entry
** entry
,
619 const CompletionCallback
& callback
) {
620 return ERR_IO_PENDING
;
623 //-----------------------------------------------------------------------------
625 int MockBackendNoCbFactory::CreateBackend(
627 scoped_ptr
<disk_cache::Backend
>* backend
,
628 const CompletionCallback
& callback
) {
629 backend
->reset(new MockDiskCacheNoCB());
633 //-----------------------------------------------------------------------------
635 MockBlockingBackendFactory::MockBlockingBackendFactory()
641 MockBlockingBackendFactory::~MockBlockingBackendFactory() {
644 int MockBlockingBackendFactory::CreateBackend(
646 scoped_ptr
<disk_cache::Backend
>* backend
,
647 const CompletionCallback
& callback
) {
650 backend
->reset(new MockDiskCache());
655 callback_
= callback
;
656 return ERR_IO_PENDING
;
659 void MockBlockingBackendFactory::FinishCreation() {
661 if (!callback_
.is_null()) {
663 backend_
->reset(new MockDiskCache());
664 CompletionCallback cb
= callback_
;
666 cb
.Run(Result()); // This object can be deleted here.