Convert events_unittests to run exclusively on Swarming
[chromium-blink-merge.git] / net / http / mock_http_cache.cc
blobd89993ccba3f6490342b1573700281ae8611b688
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/http/mock_http_cache.h"
7 #include "base/bind.h"
8 #include "base/location.h"
9 #include "base/single_thread_task_runner.h"
10 #include "base/thread_task_runner_handle.h"
11 #include "net/base/completion_callback.h"
12 #include "net/base/net_errors.h"
13 #include "testing/gtest/include/gtest/gtest.h"
15 namespace net {
17 namespace {
19 // We can override the test mode for a given operation by setting this global
20 // variable.
21 int g_test_mode = 0;
23 int GetTestModeForEntry(const std::string& key) {
24 // 'key' is prefixed with an identifier if it corresponds to a cached POST.
25 // Skip past that to locate the actual URL.
27 // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an
28 // URL corresponding to a registered MockTransaction. It would be good to
29 // have another way to access the test_mode.
30 GURL url;
31 if (isdigit(key[0])) {
32 size_t slash = key.find('/');
33 DCHECK(slash != std::string::npos);
34 url = GURL(key.substr(slash + 1));
35 } else {
36 url = GURL(key);
38 const MockTransaction* t = FindMockTransaction(url);
39 DCHECK(t);
40 return t->test_mode;
43 void CallbackForwader(const CompletionCallback& callback, int result) {
44 callback.Run(result);
47 } // namespace
49 //-----------------------------------------------------------------------------
51 struct MockDiskEntry::CallbackInfo {
52 scoped_refptr<MockDiskEntry> entry;
53 CompletionCallback callback;
54 int result;
57 MockDiskEntry::MockDiskEntry(const std::string& key)
58 : key_(key), doomed_(false), sparse_(false),
59 fail_requests_(false), fail_sparse_requests_(false), busy_(false),
60 delayed_(false) {
61 test_mode_ = GetTestModeForEntry(key);
64 void MockDiskEntry::Doom() {
65 doomed_ = true;
68 void MockDiskEntry::Close() {
69 Release();
72 std::string MockDiskEntry::GetKey() const {
73 return key_;
76 base::Time MockDiskEntry::GetLastUsed() const {
77 return base::Time::FromInternalValue(0);
80 base::Time MockDiskEntry::GetLastModified() const {
81 return base::Time::FromInternalValue(0);
84 int32 MockDiskEntry::GetDataSize(int index) const {
85 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
86 return static_cast<int32>(data_[index].size());
89 int MockDiskEntry::ReadData(int index,
90 int offset,
91 IOBuffer* buf,
92 int buf_len,
93 const CompletionCallback& callback) {
94 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
95 DCHECK(!callback.is_null());
97 if (fail_requests_)
98 return ERR_CACHE_READ_FAILURE;
100 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
101 return ERR_FAILED;
102 if (static_cast<size_t>(offset) == data_[index].size())
103 return 0;
105 int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset);
106 memcpy(buf->data(), &data_[index][offset], num);
108 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
109 return num;
111 CallbackLater(callback, num);
112 return ERR_IO_PENDING;
115 int MockDiskEntry::WriteData(int index,
116 int offset,
117 IOBuffer* buf,
118 int buf_len,
119 const CompletionCallback& callback,
120 bool truncate) {
121 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
122 DCHECK(!callback.is_null());
123 DCHECK(truncate);
125 if (fail_requests_) {
126 CallbackLater(callback, ERR_CACHE_READ_FAILURE);
127 return ERR_IO_PENDING;
130 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
131 return ERR_FAILED;
133 data_[index].resize(offset + buf_len);
134 if (buf_len)
135 memcpy(&data_[index][offset], buf->data(), buf_len);
137 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
138 return buf_len;
140 CallbackLater(callback, buf_len);
141 return ERR_IO_PENDING;
144 int MockDiskEntry::ReadSparseData(int64 offset,
145 IOBuffer* buf,
146 int buf_len,
147 const CompletionCallback& callback) {
148 DCHECK(!callback.is_null());
149 if (fail_sparse_requests_)
150 return ERR_NOT_IMPLEMENTED;
151 if (!sparse_ || busy_)
152 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
153 if (offset < 0)
154 return ERR_FAILED;
156 if (fail_requests_)
157 return ERR_CACHE_READ_FAILURE;
159 DCHECK(offset < kint32max);
160 int real_offset = static_cast<int>(offset);
161 if (!buf_len)
162 return 0;
164 int num = std::min(static_cast<int>(data_[1].size()) - real_offset,
165 buf_len);
166 memcpy(buf->data(), &data_[1][real_offset], num);
168 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
169 return num;
171 CallbackLater(callback, num);
172 busy_ = true;
173 delayed_ = false;
174 return ERR_IO_PENDING;
177 int MockDiskEntry::WriteSparseData(int64 offset,
178 IOBuffer* buf,
179 int buf_len,
180 const CompletionCallback& callback) {
181 DCHECK(!callback.is_null());
182 if (fail_sparse_requests_)
183 return ERR_NOT_IMPLEMENTED;
184 if (busy_)
185 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
186 if (!sparse_) {
187 if (data_[1].size())
188 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
189 sparse_ = true;
191 if (offset < 0)
192 return ERR_FAILED;
193 if (!buf_len)
194 return 0;
196 if (fail_requests_)
197 return ERR_CACHE_READ_FAILURE;
199 DCHECK(offset < kint32max);
200 int real_offset = static_cast<int>(offset);
202 if (static_cast<int>(data_[1].size()) < real_offset + buf_len)
203 data_[1].resize(real_offset + buf_len);
205 memcpy(&data_[1][real_offset], buf->data(), buf_len);
206 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
207 return buf_len;
209 CallbackLater(callback, buf_len);
210 return ERR_IO_PENDING;
213 int MockDiskEntry::GetAvailableRange(int64 offset,
214 int len,
215 int64* start,
216 const CompletionCallback& callback) {
217 DCHECK(!callback.is_null());
218 if (!sparse_ || busy_)
219 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
220 if (offset < 0)
221 return ERR_FAILED;
223 if (fail_requests_)
224 return ERR_CACHE_READ_FAILURE;
226 *start = offset;
227 DCHECK(offset < kint32max);
228 int real_offset = static_cast<int>(offset);
229 if (static_cast<int>(data_[1].size()) < real_offset)
230 return 0;
232 int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len);
233 int count = 0;
234 for (; num > 0; num--, real_offset++) {
235 if (!count) {
236 if (data_[1][real_offset]) {
237 count++;
238 *start = real_offset;
240 } else {
241 if (!data_[1][real_offset])
242 break;
243 count++;
246 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
247 return count;
249 CallbackLater(callback, count);
250 return ERR_IO_PENDING;
253 bool MockDiskEntry::CouldBeSparse() const {
254 if (fail_sparse_requests_)
255 return false;
256 return sparse_;
259 void MockDiskEntry::CancelSparseIO() {
260 cancel_ = true;
263 int MockDiskEntry::ReadyForSparseIO(const CompletionCallback& callback) {
264 if (fail_sparse_requests_)
265 return ERR_NOT_IMPLEMENTED;
266 if (!cancel_)
267 return OK;
269 cancel_ = false;
270 DCHECK(!callback.is_null());
271 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
272 return OK;
274 // The pending operation is already in the message loop (and hopefully
275 // already in the second pass). Just notify the caller that it finished.
276 CallbackLater(callback, 0);
277 return ERR_IO_PENDING;
280 // If |value| is true, don't deliver any completion callbacks until called
281 // again with |value| set to false. Caution: remember to enable callbacks
282 // again or all subsequent tests will fail.
283 // Static.
284 void MockDiskEntry::IgnoreCallbacks(bool value) {
285 if (ignore_callbacks_ == value)
286 return;
287 ignore_callbacks_ = value;
288 if (!value)
289 StoreAndDeliverCallbacks(false, NULL, CompletionCallback(), 0);
292 MockDiskEntry::~MockDiskEntry() {
295 // Unlike the callbacks for MockHttpTransaction, we want this one to run even
296 // if the consumer called Close on the MockDiskEntry. We achieve that by
297 // leveraging the fact that this class is reference counted.
298 void MockDiskEntry::CallbackLater(const CompletionCallback& callback,
299 int result) {
300 if (ignore_callbacks_)
301 return StoreAndDeliverCallbacks(true, this, callback, result);
302 base::ThreadTaskRunnerHandle::Get()->PostTask(
303 FROM_HERE,
304 base::Bind(&MockDiskEntry::RunCallback, this, callback, result));
307 void MockDiskEntry::RunCallback(const CompletionCallback& callback,
308 int result) {
309 if (busy_) {
310 // This is kind of hacky, but controlling the behavior of just this entry
311 // from a test is sort of complicated. What we really want to do is
312 // delay the delivery of a sparse IO operation a little more so that the
313 // request start operation (async) will finish without seeing the end of
314 // this operation (already posted to the message loop)... and without
315 // just delaying for n mS (which may cause trouble with slow bots). So
316 // we re-post this operation (all async sparse IO operations will take two
317 // trips through the message loop instead of one).
318 if (!delayed_) {
319 delayed_ = true;
320 return CallbackLater(callback, result);
323 busy_ = false;
324 callback.Run(result);
327 // When |store| is true, stores the callback to be delivered later; otherwise
328 // delivers any callback previously stored.
329 // Static.
330 void MockDiskEntry::StoreAndDeliverCallbacks(bool store,
331 MockDiskEntry* entry,
332 const CompletionCallback& callback,
333 int result) {
334 static std::vector<CallbackInfo> callback_list;
335 if (store) {
336 CallbackInfo c = {entry, callback, result};
337 callback_list.push_back(c);
338 } else {
339 for (size_t i = 0; i < callback_list.size(); i++) {
340 CallbackInfo& c = callback_list[i];
341 c.entry->CallbackLater(c.callback, c.result);
343 callback_list.clear();
347 // Statics.
348 bool MockDiskEntry::cancel_ = false;
349 bool MockDiskEntry::ignore_callbacks_ = false;
351 //-----------------------------------------------------------------------------
353 MockDiskCache::MockDiskCache()
354 : open_count_(0), create_count_(0), fail_requests_(false),
355 soft_failures_(false), double_create_check_(true),
356 fail_sparse_requests_(false) {
359 MockDiskCache::~MockDiskCache() {
360 ReleaseAll();
363 CacheType MockDiskCache::GetCacheType() const {
364 return DISK_CACHE;
367 int32 MockDiskCache::GetEntryCount() const {
368 return static_cast<int32>(entries_.size());
371 int MockDiskCache::OpenEntry(const std::string& key,
372 disk_cache::Entry** entry,
373 const CompletionCallback& callback) {
374 DCHECK(!callback.is_null());
375 if (fail_requests_)
376 return ERR_CACHE_OPEN_FAILURE;
378 EntryMap::iterator it = entries_.find(key);
379 if (it == entries_.end())
380 return ERR_CACHE_OPEN_FAILURE;
382 if (it->second->is_doomed()) {
383 it->second->Release();
384 entries_.erase(it);
385 return ERR_CACHE_OPEN_FAILURE;
388 open_count_++;
390 it->second->AddRef();
391 *entry = it->second;
393 if (soft_failures_)
394 it->second->set_fail_requests();
396 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
397 return OK;
399 CallbackLater(callback, OK);
400 return ERR_IO_PENDING;
403 int MockDiskCache::CreateEntry(const std::string& key,
404 disk_cache::Entry** entry,
405 const CompletionCallback& callback) {
406 DCHECK(!callback.is_null());
407 if (fail_requests_)
408 return ERR_CACHE_CREATE_FAILURE;
410 EntryMap::iterator it = entries_.find(key);
411 if (it != entries_.end()) {
412 if (!it->second->is_doomed()) {
413 if (double_create_check_)
414 NOTREACHED();
415 else
416 return ERR_CACHE_CREATE_FAILURE;
418 it->second->Release();
419 entries_.erase(it);
422 create_count_++;
424 MockDiskEntry* new_entry = new MockDiskEntry(key);
426 new_entry->AddRef();
427 entries_[key] = new_entry;
429 new_entry->AddRef();
430 *entry = new_entry;
432 if (soft_failures_)
433 new_entry->set_fail_requests();
435 if (fail_sparse_requests_)
436 new_entry->set_fail_sparse_requests();
438 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
439 return OK;
441 CallbackLater(callback, OK);
442 return ERR_IO_PENDING;
445 int MockDiskCache::DoomEntry(const std::string& key,
446 const CompletionCallback& callback) {
447 DCHECK(!callback.is_null());
448 EntryMap::iterator it = entries_.find(key);
449 if (it != entries_.end()) {
450 it->second->Release();
451 entries_.erase(it);
454 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
455 return OK;
457 CallbackLater(callback, OK);
458 return ERR_IO_PENDING;
461 int MockDiskCache::DoomAllEntries(const CompletionCallback& callback) {
462 return ERR_NOT_IMPLEMENTED;
465 int MockDiskCache::DoomEntriesBetween(const base::Time initial_time,
466 const base::Time end_time,
467 const CompletionCallback& callback) {
468 return ERR_NOT_IMPLEMENTED;
471 int MockDiskCache::DoomEntriesSince(const base::Time initial_time,
472 const CompletionCallback& callback) {
473 return ERR_NOT_IMPLEMENTED;
476 class MockDiskCache::NotImplementedIterator : public Iterator {
477 public:
478 int OpenNextEntry(disk_cache::Entry** next_entry,
479 const CompletionCallback& callback) override {
480 return ERR_NOT_IMPLEMENTED;
484 scoped_ptr<disk_cache::Backend::Iterator> MockDiskCache::CreateIterator() {
485 return scoped_ptr<Iterator>(new NotImplementedIterator());
488 void MockDiskCache::GetStats(base::StringPairs* stats) {
491 void MockDiskCache::OnExternalCacheHit(const std::string& key) {
494 void MockDiskCache::ReleaseAll() {
495 EntryMap::iterator it = entries_.begin();
496 for (; it != entries_.end(); ++it)
497 it->second->Release();
498 entries_.clear();
501 void MockDiskCache::CallbackLater(const CompletionCallback& callback,
502 int result) {
503 base::ThreadTaskRunnerHandle::Get()->PostTask(
504 FROM_HERE, base::Bind(&CallbackForwader, callback, result));
507 //-----------------------------------------------------------------------------
509 int MockBackendFactory::CreateBackend(NetLog* net_log,
510 scoped_ptr<disk_cache::Backend>* backend,
511 const CompletionCallback& callback) {
512 backend->reset(new MockDiskCache());
513 return OK;
516 //-----------------------------------------------------------------------------
518 MockHttpCache::MockHttpCache()
519 : http_cache_(new MockNetworkLayer(), NULL, new MockBackendFactory()) {
522 MockHttpCache::MockHttpCache(HttpCache::BackendFactory* disk_cache_factory)
523 : http_cache_(new MockNetworkLayer(), NULL, disk_cache_factory) {
526 disk_cache::Backend* MockHttpCache::backend() {
527 TestCompletionCallback cb;
528 disk_cache::Backend* backend;
529 int rv = http_cache_.GetBackend(&backend, cb.callback());
530 rv = cb.GetResult(rv);
531 return (rv == OK) ? backend : NULL;
534 MockDiskCache* MockHttpCache::disk_cache() {
535 return static_cast<MockDiskCache*>(backend());
538 int MockHttpCache::CreateTransaction(scoped_ptr<HttpTransaction>* trans) {
539 return http_cache_.CreateTransaction(DEFAULT_PRIORITY, trans);
542 void MockHttpCache::BypassCacheLock() {
543 http_cache_.BypassLockForTest();
546 void MockHttpCache::FailConditionalizations() {
547 http_cache_.FailConditionalizationForTest();
550 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry,
551 HttpResponseInfo* response_info,
552 bool* response_truncated) {
553 int size = disk_entry->GetDataSize(0);
555 TestCompletionCallback cb;
556 scoped_refptr<IOBuffer> buffer(new IOBuffer(size));
557 int rv = disk_entry->ReadData(0, 0, buffer.get(), size, cb.callback());
558 rv = cb.GetResult(rv);
559 EXPECT_EQ(size, rv);
561 return HttpCache::ParseResponseInfo(buffer->data(), size, response_info,
562 response_truncated);
565 bool MockHttpCache::WriteResponseInfo(disk_cache::Entry* disk_entry,
566 const HttpResponseInfo* response_info,
567 bool skip_transient_headers,
568 bool response_truncated) {
569 base::Pickle pickle;
570 response_info->Persist(
571 &pickle, skip_transient_headers, response_truncated);
573 TestCompletionCallback cb;
574 scoped_refptr<WrappedIOBuffer> data(
575 new WrappedIOBuffer(reinterpret_cast<const char*>(pickle.data())));
576 int len = static_cast<int>(pickle.size());
578 int rv = disk_entry->WriteData(0, 0, data.get(), len, cb.callback(), true);
579 rv = cb.GetResult(rv);
580 return (rv == len);
583 bool MockHttpCache::OpenBackendEntry(const std::string& key,
584 disk_cache::Entry** entry) {
585 TestCompletionCallback cb;
586 int rv = backend()->OpenEntry(key, entry, cb.callback());
587 return (cb.GetResult(rv) == OK);
590 bool MockHttpCache::CreateBackendEntry(const std::string& key,
591 disk_cache::Entry** entry,
592 NetLog* net_log) {
593 TestCompletionCallback cb;
594 int rv = backend()->CreateEntry(key, entry, cb.callback());
595 return (cb.GetResult(rv) == OK);
598 // Static.
599 int MockHttpCache::GetTestMode(int test_mode) {
600 if (!g_test_mode)
601 return test_mode;
603 return g_test_mode;
606 // Static.
607 void MockHttpCache::SetTestMode(int test_mode) {
608 g_test_mode = test_mode;
611 //-----------------------------------------------------------------------------
613 int MockDiskCacheNoCB::CreateEntry(const std::string& key,
614 disk_cache::Entry** entry,
615 const CompletionCallback& callback) {
616 return ERR_IO_PENDING;
619 //-----------------------------------------------------------------------------
621 int MockBackendNoCbFactory::CreateBackend(
622 NetLog* net_log,
623 scoped_ptr<disk_cache::Backend>* backend,
624 const CompletionCallback& callback) {
625 backend->reset(new MockDiskCacheNoCB());
626 return OK;
629 //-----------------------------------------------------------------------------
631 MockBlockingBackendFactory::MockBlockingBackendFactory()
632 : backend_(NULL),
633 block_(true),
634 fail_(false) {
637 MockBlockingBackendFactory::~MockBlockingBackendFactory() {
640 int MockBlockingBackendFactory::CreateBackend(
641 NetLog* net_log,
642 scoped_ptr<disk_cache::Backend>* backend,
643 const CompletionCallback& callback) {
644 if (!block_) {
645 if (!fail_)
646 backend->reset(new MockDiskCache());
647 return Result();
650 backend_ = backend;
651 callback_ = callback;
652 return ERR_IO_PENDING;
655 void MockBlockingBackendFactory::FinishCreation() {
656 block_ = false;
657 if (!callback_.is_null()) {
658 if (!fail_)
659 backend_->reset(new MockDiskCache());
660 CompletionCallback cb = callback_;
661 callback_.Reset();
662 cb.Run(Result()); // This object can be deleted here.
666 } // namespace net