Adds presentation_context and presentation_media_sinks_observer
[chromium-blink-merge.git] / net / http / mock_http_cache.cc
blobdd8abd71bb0dea7ba671237faab75dedf419ac95
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/http/mock_http_cache.h"
7 #include "base/bind.h"
8 #include "base/message_loop/message_loop.h"
9 #include "net/base/completion_callback.h"
10 #include "net/base/net_errors.h"
11 #include "testing/gtest/include/gtest/gtest.h"
13 namespace net {
15 namespace {
17 // We can override the test mode for a given operation by setting this global
18 // variable.
19 int g_test_mode = 0;
21 int GetTestModeForEntry(const std::string& key) {
22 // 'key' is prefixed with an identifier if it corresponds to a cached POST.
23 // Skip past that to locate the actual URL.
25 // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an
26 // URL corresponding to a registered MockTransaction. It would be good to
27 // have another way to access the test_mode.
28 GURL url;
29 if (isdigit(key[0])) {
30 size_t slash = key.find('/');
31 DCHECK(slash != std::string::npos);
32 url = GURL(key.substr(slash + 1));
33 } else {
34 url = GURL(key);
36 const MockTransaction* t = FindMockTransaction(url);
37 DCHECK(t);
38 return t->test_mode;
41 void CallbackForwader(const CompletionCallback& callback, int result) {
42 callback.Run(result);
45 } // namespace
47 //-----------------------------------------------------------------------------
49 struct MockDiskEntry::CallbackInfo {
50 scoped_refptr<MockDiskEntry> entry;
51 CompletionCallback callback;
52 int result;
55 MockDiskEntry::MockDiskEntry(const std::string& key)
56 : key_(key), doomed_(false), sparse_(false),
57 fail_requests_(false), fail_sparse_requests_(false), busy_(false),
58 delayed_(false) {
59 test_mode_ = GetTestModeForEntry(key);
62 void MockDiskEntry::Doom() {
63 doomed_ = true;
66 void MockDiskEntry::Close() {
67 Release();
70 std::string MockDiskEntry::GetKey() const {
71 return key_;
74 base::Time MockDiskEntry::GetLastUsed() const {
75 return base::Time::FromInternalValue(0);
78 base::Time MockDiskEntry::GetLastModified() const {
79 return base::Time::FromInternalValue(0);
82 int32 MockDiskEntry::GetDataSize(int index) const {
83 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
84 return static_cast<int32>(data_[index].size());
87 int MockDiskEntry::ReadData(int index,
88 int offset,
89 IOBuffer* buf,
90 int buf_len,
91 const CompletionCallback& callback) {
92 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
93 DCHECK(!callback.is_null());
95 if (fail_requests_)
96 return ERR_CACHE_READ_FAILURE;
98 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
99 return ERR_FAILED;
100 if (static_cast<size_t>(offset) == data_[index].size())
101 return 0;
103 int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset);
104 memcpy(buf->data(), &data_[index][offset], num);
106 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
107 return num;
109 CallbackLater(callback, num);
110 return ERR_IO_PENDING;
113 int MockDiskEntry::WriteData(int index,
114 int offset,
115 IOBuffer* buf,
116 int buf_len,
117 const CompletionCallback& callback,
118 bool truncate) {
119 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
120 DCHECK(!callback.is_null());
121 DCHECK(truncate);
123 if (fail_requests_) {
124 CallbackLater(callback, ERR_CACHE_READ_FAILURE);
125 return ERR_IO_PENDING;
128 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
129 return ERR_FAILED;
131 data_[index].resize(offset + buf_len);
132 if (buf_len)
133 memcpy(&data_[index][offset], buf->data(), buf_len);
135 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
136 return buf_len;
138 CallbackLater(callback, buf_len);
139 return ERR_IO_PENDING;
142 int MockDiskEntry::ReadSparseData(int64 offset,
143 IOBuffer* buf,
144 int buf_len,
145 const CompletionCallback& callback) {
146 DCHECK(!callback.is_null());
147 if (fail_sparse_requests_)
148 return ERR_NOT_IMPLEMENTED;
149 if (!sparse_ || busy_)
150 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
151 if (offset < 0)
152 return ERR_FAILED;
154 if (fail_requests_)
155 return ERR_CACHE_READ_FAILURE;
157 DCHECK(offset < kint32max);
158 int real_offset = static_cast<int>(offset);
159 if (!buf_len)
160 return 0;
162 int num = std::min(static_cast<int>(data_[1].size()) - real_offset,
163 buf_len);
164 memcpy(buf->data(), &data_[1][real_offset], num);
166 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
167 return num;
169 CallbackLater(callback, num);
170 busy_ = true;
171 delayed_ = false;
172 return ERR_IO_PENDING;
175 int MockDiskEntry::WriteSparseData(int64 offset,
176 IOBuffer* buf,
177 int buf_len,
178 const CompletionCallback& callback) {
179 DCHECK(!callback.is_null());
180 if (fail_sparse_requests_)
181 return ERR_NOT_IMPLEMENTED;
182 if (busy_)
183 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
184 if (!sparse_) {
185 if (data_[1].size())
186 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
187 sparse_ = true;
189 if (offset < 0)
190 return ERR_FAILED;
191 if (!buf_len)
192 return 0;
194 if (fail_requests_)
195 return ERR_CACHE_READ_FAILURE;
197 DCHECK(offset < kint32max);
198 int real_offset = static_cast<int>(offset);
200 if (static_cast<int>(data_[1].size()) < real_offset + buf_len)
201 data_[1].resize(real_offset + buf_len);
203 memcpy(&data_[1][real_offset], buf->data(), buf_len);
204 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
205 return buf_len;
207 CallbackLater(callback, buf_len);
208 return ERR_IO_PENDING;
211 int MockDiskEntry::GetAvailableRange(int64 offset,
212 int len,
213 int64* start,
214 const CompletionCallback& callback) {
215 DCHECK(!callback.is_null());
216 if (!sparse_ || busy_)
217 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
218 if (offset < 0)
219 return ERR_FAILED;
221 if (fail_requests_)
222 return ERR_CACHE_READ_FAILURE;
224 *start = offset;
225 DCHECK(offset < kint32max);
226 int real_offset = static_cast<int>(offset);
227 if (static_cast<int>(data_[1].size()) < real_offset)
228 return 0;
230 int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len);
231 int count = 0;
232 for (; num > 0; num--, real_offset++) {
233 if (!count) {
234 if (data_[1][real_offset]) {
235 count++;
236 *start = real_offset;
238 } else {
239 if (!data_[1][real_offset])
240 break;
241 count++;
244 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
245 return count;
247 CallbackLater(callback, count);
248 return ERR_IO_PENDING;
251 bool MockDiskEntry::CouldBeSparse() const {
252 if (fail_sparse_requests_)
253 return false;
254 return sparse_;
257 void MockDiskEntry::CancelSparseIO() {
258 cancel_ = true;
261 int MockDiskEntry::ReadyForSparseIO(const CompletionCallback& callback) {
262 if (fail_sparse_requests_)
263 return ERR_NOT_IMPLEMENTED;
264 if (!cancel_)
265 return OK;
267 cancel_ = false;
268 DCHECK(!callback.is_null());
269 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
270 return OK;
272 // The pending operation is already in the message loop (and hopefully
273 // already in the second pass). Just notify the caller that it finished.
274 CallbackLater(callback, 0);
275 return ERR_IO_PENDING;
278 // If |value| is true, don't deliver any completion callbacks until called
279 // again with |value| set to false. Caution: remember to enable callbacks
280 // again or all subsequent tests will fail.
281 // Static.
282 void MockDiskEntry::IgnoreCallbacks(bool value) {
283 if (ignore_callbacks_ == value)
284 return;
285 ignore_callbacks_ = value;
286 if (!value)
287 StoreAndDeliverCallbacks(false, NULL, CompletionCallback(), 0);
290 MockDiskEntry::~MockDiskEntry() {
293 // Unlike the callbacks for MockHttpTransaction, we want this one to run even
294 // if the consumer called Close on the MockDiskEntry. We achieve that by
295 // leveraging the fact that this class is reference counted.
296 void MockDiskEntry::CallbackLater(const CompletionCallback& callback,
297 int result) {
298 if (ignore_callbacks_)
299 return StoreAndDeliverCallbacks(true, this, callback, result);
300 base::MessageLoop::current()->PostTask(
301 FROM_HERE,
302 base::Bind(&MockDiskEntry::RunCallback, this, callback, result));
305 void MockDiskEntry::RunCallback(const CompletionCallback& callback,
306 int result) {
307 if (busy_) {
308 // This is kind of hacky, but controlling the behavior of just this entry
309 // from a test is sort of complicated. What we really want to do is
310 // delay the delivery of a sparse IO operation a little more so that the
311 // request start operation (async) will finish without seeing the end of
312 // this operation (already posted to the message loop)... and without
313 // just delaying for n mS (which may cause trouble with slow bots). So
314 // we re-post this operation (all async sparse IO operations will take two
315 // trips through the message loop instead of one).
316 if (!delayed_) {
317 delayed_ = true;
318 return CallbackLater(callback, result);
321 busy_ = false;
322 callback.Run(result);
325 // When |store| is true, stores the callback to be delivered later; otherwise
326 // delivers any callback previously stored.
327 // Static.
328 void MockDiskEntry::StoreAndDeliverCallbacks(bool store,
329 MockDiskEntry* entry,
330 const CompletionCallback& callback,
331 int result) {
332 static std::vector<CallbackInfo> callback_list;
333 if (store) {
334 CallbackInfo c = {entry, callback, result};
335 callback_list.push_back(c);
336 } else {
337 for (size_t i = 0; i < callback_list.size(); i++) {
338 CallbackInfo& c = callback_list[i];
339 c.entry->CallbackLater(c.callback, c.result);
341 callback_list.clear();
345 // Statics.
346 bool MockDiskEntry::cancel_ = false;
347 bool MockDiskEntry::ignore_callbacks_ = false;
349 //-----------------------------------------------------------------------------
351 MockDiskCache::MockDiskCache()
352 : open_count_(0), create_count_(0), fail_requests_(false),
353 soft_failures_(false), double_create_check_(true),
354 fail_sparse_requests_(false) {
357 MockDiskCache::~MockDiskCache() {
358 ReleaseAll();
361 CacheType MockDiskCache::GetCacheType() const {
362 return DISK_CACHE;
365 int32 MockDiskCache::GetEntryCount() const {
366 return static_cast<int32>(entries_.size());
369 int MockDiskCache::OpenEntry(const std::string& key,
370 disk_cache::Entry** entry,
371 const CompletionCallback& callback) {
372 DCHECK(!callback.is_null());
373 if (fail_requests_)
374 return ERR_CACHE_OPEN_FAILURE;
376 EntryMap::iterator it = entries_.find(key);
377 if (it == entries_.end())
378 return ERR_CACHE_OPEN_FAILURE;
380 if (it->second->is_doomed()) {
381 it->second->Release();
382 entries_.erase(it);
383 return ERR_CACHE_OPEN_FAILURE;
386 open_count_++;
388 it->second->AddRef();
389 *entry = it->second;
391 if (soft_failures_)
392 it->second->set_fail_requests();
394 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
395 return OK;
397 CallbackLater(callback, OK);
398 return ERR_IO_PENDING;
401 int MockDiskCache::CreateEntry(const std::string& key,
402 disk_cache::Entry** entry,
403 const CompletionCallback& callback) {
404 DCHECK(!callback.is_null());
405 if (fail_requests_)
406 return ERR_CACHE_CREATE_FAILURE;
408 EntryMap::iterator it = entries_.find(key);
409 if (it != entries_.end()) {
410 if (!it->second->is_doomed()) {
411 if (double_create_check_)
412 NOTREACHED();
413 else
414 return ERR_CACHE_CREATE_FAILURE;
416 it->second->Release();
417 entries_.erase(it);
420 create_count_++;
422 MockDiskEntry* new_entry = new MockDiskEntry(key);
424 new_entry->AddRef();
425 entries_[key] = new_entry;
427 new_entry->AddRef();
428 *entry = new_entry;
430 if (soft_failures_)
431 new_entry->set_fail_requests();
433 if (fail_sparse_requests_)
434 new_entry->set_fail_sparse_requests();
436 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
437 return OK;
439 CallbackLater(callback, OK);
440 return ERR_IO_PENDING;
443 int MockDiskCache::DoomEntry(const std::string& key,
444 const CompletionCallback& callback) {
445 DCHECK(!callback.is_null());
446 EntryMap::iterator it = entries_.find(key);
447 if (it != entries_.end()) {
448 it->second->Release();
449 entries_.erase(it);
452 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
453 return OK;
455 CallbackLater(callback, OK);
456 return ERR_IO_PENDING;
459 int MockDiskCache::DoomAllEntries(const CompletionCallback& callback) {
460 return ERR_NOT_IMPLEMENTED;
463 int MockDiskCache::DoomEntriesBetween(const base::Time initial_time,
464 const base::Time end_time,
465 const CompletionCallback& callback) {
466 return ERR_NOT_IMPLEMENTED;
469 int MockDiskCache::DoomEntriesSince(const base::Time initial_time,
470 const CompletionCallback& callback) {
471 return ERR_NOT_IMPLEMENTED;
474 class MockDiskCache::NotImplementedIterator : public Iterator {
475 public:
476 int OpenNextEntry(disk_cache::Entry** next_entry,
477 const CompletionCallback& callback) override {
478 return ERR_NOT_IMPLEMENTED;
482 scoped_ptr<disk_cache::Backend::Iterator> MockDiskCache::CreateIterator() {
483 return scoped_ptr<Iterator>(new NotImplementedIterator());
486 void MockDiskCache::GetStats(base::StringPairs* stats) {
489 void MockDiskCache::OnExternalCacheHit(const std::string& key) {
492 void MockDiskCache::ReleaseAll() {
493 EntryMap::iterator it = entries_.begin();
494 for (; it != entries_.end(); ++it)
495 it->second->Release();
496 entries_.clear();
499 void MockDiskCache::CallbackLater(const CompletionCallback& callback,
500 int result) {
501 base::MessageLoop::current()->PostTask(
502 FROM_HERE, base::Bind(&CallbackForwader, callback, result));
505 //-----------------------------------------------------------------------------
507 int MockBackendFactory::CreateBackend(NetLog* net_log,
508 scoped_ptr<disk_cache::Backend>* backend,
509 const CompletionCallback& callback) {
510 backend->reset(new MockDiskCache());
511 return OK;
514 //-----------------------------------------------------------------------------
516 MockHttpCache::MockHttpCache()
517 : http_cache_(new MockNetworkLayer(), NULL, new MockBackendFactory()) {
520 MockHttpCache::MockHttpCache(HttpCache::BackendFactory* disk_cache_factory)
521 : http_cache_(new MockNetworkLayer(), NULL, disk_cache_factory) {
524 disk_cache::Backend* MockHttpCache::backend() {
525 TestCompletionCallback cb;
526 disk_cache::Backend* backend;
527 int rv = http_cache_.GetBackend(&backend, cb.callback());
528 rv = cb.GetResult(rv);
529 return (rv == OK) ? backend : NULL;
532 MockDiskCache* MockHttpCache::disk_cache() {
533 return static_cast<MockDiskCache*>(backend());
536 int MockHttpCache::CreateTransaction(scoped_ptr<HttpTransaction>* trans) {
537 return http_cache_.CreateTransaction(DEFAULT_PRIORITY, trans);
540 void MockHttpCache::BypassCacheLock() {
541 http_cache_.BypassLockForTest();
544 void MockHttpCache::FailConditionalizations() {
545 http_cache_.FailConditionalizationForTest();
548 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry,
549 HttpResponseInfo* response_info,
550 bool* response_truncated) {
551 int size = disk_entry->GetDataSize(0);
553 TestCompletionCallback cb;
554 scoped_refptr<IOBuffer> buffer(new IOBuffer(size));
555 int rv = disk_entry->ReadData(0, 0, buffer.get(), size, cb.callback());
556 rv = cb.GetResult(rv);
557 EXPECT_EQ(size, rv);
559 return HttpCache::ParseResponseInfo(buffer->data(), size, response_info,
560 response_truncated);
563 bool MockHttpCache::WriteResponseInfo(disk_cache::Entry* disk_entry,
564 const HttpResponseInfo* response_info,
565 bool skip_transient_headers,
566 bool response_truncated) {
567 Pickle pickle;
568 response_info->Persist(
569 &pickle, skip_transient_headers, response_truncated);
571 TestCompletionCallback cb;
572 scoped_refptr<WrappedIOBuffer> data(
573 new WrappedIOBuffer(reinterpret_cast<const char*>(pickle.data())));
574 int len = static_cast<int>(pickle.size());
576 int rv = disk_entry->WriteData(0, 0, data.get(), len, cb.callback(), true);
577 rv = cb.GetResult(rv);
578 return (rv == len);
581 bool MockHttpCache::OpenBackendEntry(const std::string& key,
582 disk_cache::Entry** entry) {
583 TestCompletionCallback cb;
584 int rv = backend()->OpenEntry(key, entry, cb.callback());
585 return (cb.GetResult(rv) == OK);
588 bool MockHttpCache::CreateBackendEntry(const std::string& key,
589 disk_cache::Entry** entry,
590 NetLog* net_log) {
591 TestCompletionCallback cb;
592 int rv = backend()->CreateEntry(key, entry, cb.callback());
593 return (cb.GetResult(rv) == OK);
596 // Static.
597 int MockHttpCache::GetTestMode(int test_mode) {
598 if (!g_test_mode)
599 return test_mode;
601 return g_test_mode;
604 // Static.
605 void MockHttpCache::SetTestMode(int test_mode) {
606 g_test_mode = test_mode;
609 //-----------------------------------------------------------------------------
611 int MockDiskCacheNoCB::CreateEntry(const std::string& key,
612 disk_cache::Entry** entry,
613 const CompletionCallback& callback) {
614 return ERR_IO_PENDING;
617 //-----------------------------------------------------------------------------
619 int MockBackendNoCbFactory::CreateBackend(
620 NetLog* net_log,
621 scoped_ptr<disk_cache::Backend>* backend,
622 const CompletionCallback& callback) {
623 backend->reset(new MockDiskCacheNoCB());
624 return OK;
627 //-----------------------------------------------------------------------------
629 MockBlockingBackendFactory::MockBlockingBackendFactory()
630 : backend_(NULL),
631 block_(true),
632 fail_(false) {
635 MockBlockingBackendFactory::~MockBlockingBackendFactory() {
638 int MockBlockingBackendFactory::CreateBackend(
639 NetLog* net_log,
640 scoped_ptr<disk_cache::Backend>* backend,
641 const CompletionCallback& callback) {
642 if (!block_) {
643 if (!fail_)
644 backend->reset(new MockDiskCache());
645 return Result();
648 backend_ = backend;
649 callback_ = callback;
650 return ERR_IO_PENDING;
653 void MockBlockingBackendFactory::FinishCreation() {
654 block_ = false;
655 if (!callback_.is_null()) {
656 if (!fail_)
657 backend_->reset(new MockDiskCache());
658 CompletionCallback cb = callback_;
659 callback_.Reset();
660 cb.Run(Result()); // This object can be deleted here.
664 } // namespace net