Upgrade ReadPixels to ES3 semantic in command buffer.
[chromium-blink-merge.git] / net / http / mock_http_cache.cc
blobd939a3d8f9692f9f6c8eeddeda0e97dd1ccc38ca
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/http/mock_http_cache.h"
7 #include "base/bind.h"
8 #include "base/location.h"
9 #include "base/single_thread_task_runner.h"
10 #include "base/thread_task_runner_handle.h"
11 #include "net/base/completion_callback.h"
12 #include "net/base/net_errors.h"
13 #include "testing/gtest/include/gtest/gtest.h"
15 namespace net {
17 namespace {
19 // We can override the test mode for a given operation by setting this global
20 // variable.
21 int g_test_mode = 0;
23 int GetTestModeForEntry(const std::string& key) {
24 // 'key' is prefixed with an identifier if it corresponds to a cached POST.
25 // Skip past that to locate the actual URL.
27 // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an
28 // URL corresponding to a registered MockTransaction. It would be good to
29 // have another way to access the test_mode.
30 GURL url;
31 if (isdigit(key[0])) {
32 size_t slash = key.find('/');
33 DCHECK(slash != std::string::npos);
34 url = GURL(key.substr(slash + 1));
35 } else {
36 url = GURL(key);
38 const MockTransaction* t = FindMockTransaction(url);
39 DCHECK(t);
40 return t->test_mode;
43 void CallbackForwader(const CompletionCallback& callback, int result) {
44 callback.Run(result);
47 } // namespace
49 //-----------------------------------------------------------------------------
51 struct MockDiskEntry::CallbackInfo {
52 scoped_refptr<MockDiskEntry> entry;
53 CompletionCallback callback;
54 int result;
57 MockDiskEntry::MockDiskEntry(const std::string& key)
58 : key_(key),
59 doomed_(false),
60 sparse_(false),
61 fail_requests_(false),
62 fail_sparse_requests_(false),
63 busy_(false),
64 delayed_(false),
65 cancel_(false) {
66 test_mode_ = GetTestModeForEntry(key);
69 void MockDiskEntry::Doom() {
70 doomed_ = true;
73 void MockDiskEntry::Close() {
74 Release();
77 std::string MockDiskEntry::GetKey() const {
78 return key_;
81 base::Time MockDiskEntry::GetLastUsed() const {
82 return base::Time::FromInternalValue(0);
85 base::Time MockDiskEntry::GetLastModified() const {
86 return base::Time::FromInternalValue(0);
89 int32 MockDiskEntry::GetDataSize(int index) const {
90 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
91 return static_cast<int32>(data_[index].size());
94 int MockDiskEntry::ReadData(int index,
95 int offset,
96 IOBuffer* buf,
97 int buf_len,
98 const CompletionCallback& callback) {
99 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
100 DCHECK(!callback.is_null());
102 if (fail_requests_)
103 return ERR_CACHE_READ_FAILURE;
105 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
106 return ERR_FAILED;
107 if (static_cast<size_t>(offset) == data_[index].size())
108 return 0;
110 int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset);
111 memcpy(buf->data(), &data_[index][offset], num);
113 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
114 return num;
116 CallbackLater(callback, num);
117 return ERR_IO_PENDING;
120 int MockDiskEntry::WriteData(int index,
121 int offset,
122 IOBuffer* buf,
123 int buf_len,
124 const CompletionCallback& callback,
125 bool truncate) {
126 DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
127 DCHECK(!callback.is_null());
128 DCHECK(truncate);
130 if (fail_requests_) {
131 CallbackLater(callback, ERR_CACHE_READ_FAILURE);
132 return ERR_IO_PENDING;
135 if (offset < 0 || offset > static_cast<int>(data_[index].size()))
136 return ERR_FAILED;
138 data_[index].resize(offset + buf_len);
139 if (buf_len)
140 memcpy(&data_[index][offset], buf->data(), buf_len);
142 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
143 return buf_len;
145 CallbackLater(callback, buf_len);
146 return ERR_IO_PENDING;
149 int MockDiskEntry::ReadSparseData(int64 offset,
150 IOBuffer* buf,
151 int buf_len,
152 const CompletionCallback& callback) {
153 DCHECK(!callback.is_null());
154 if (fail_sparse_requests_)
155 return ERR_NOT_IMPLEMENTED;
156 if (!sparse_ || busy_ || cancel_)
157 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
158 if (offset < 0)
159 return ERR_FAILED;
161 if (fail_requests_)
162 return ERR_CACHE_READ_FAILURE;
164 DCHECK(offset < kint32max);
165 int real_offset = static_cast<int>(offset);
166 if (!buf_len)
167 return 0;
169 int num = std::min(static_cast<int>(data_[1].size()) - real_offset,
170 buf_len);
171 memcpy(buf->data(), &data_[1][real_offset], num);
173 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
174 return num;
176 CallbackLater(callback, num);
177 busy_ = true;
178 delayed_ = false;
179 return ERR_IO_PENDING;
182 int MockDiskEntry::WriteSparseData(int64 offset,
183 IOBuffer* buf,
184 int buf_len,
185 const CompletionCallback& callback) {
186 DCHECK(!callback.is_null());
187 if (fail_sparse_requests_)
188 return ERR_NOT_IMPLEMENTED;
189 if (busy_ || cancel_)
190 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
191 if (!sparse_) {
192 if (data_[1].size())
193 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
194 sparse_ = true;
196 if (offset < 0)
197 return ERR_FAILED;
198 if (!buf_len)
199 return 0;
201 if (fail_requests_)
202 return ERR_CACHE_READ_FAILURE;
204 DCHECK(offset < kint32max);
205 int real_offset = static_cast<int>(offset);
207 if (static_cast<int>(data_[1].size()) < real_offset + buf_len)
208 data_[1].resize(real_offset + buf_len);
210 memcpy(&data_[1][real_offset], buf->data(), buf_len);
211 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
212 return buf_len;
214 CallbackLater(callback, buf_len);
215 return ERR_IO_PENDING;
218 int MockDiskEntry::GetAvailableRange(int64 offset,
219 int len,
220 int64* start,
221 const CompletionCallback& callback) {
222 DCHECK(!callback.is_null());
223 if (!sparse_ || busy_ || cancel_)
224 return ERR_CACHE_OPERATION_NOT_SUPPORTED;
225 if (offset < 0)
226 return ERR_FAILED;
228 if (fail_requests_)
229 return ERR_CACHE_READ_FAILURE;
231 *start = offset;
232 DCHECK(offset < kint32max);
233 int real_offset = static_cast<int>(offset);
234 if (static_cast<int>(data_[1].size()) < real_offset)
235 return 0;
237 int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len);
238 int count = 0;
239 for (; num > 0; num--, real_offset++) {
240 if (!count) {
241 if (data_[1][real_offset]) {
242 count++;
243 *start = real_offset;
245 } else {
246 if (!data_[1][real_offset])
247 break;
248 count++;
251 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
252 return count;
254 CallbackLater(callback, count);
255 return ERR_IO_PENDING;
258 bool MockDiskEntry::CouldBeSparse() const {
259 if (fail_sparse_requests_)
260 return false;
261 return sparse_;
264 void MockDiskEntry::CancelSparseIO() {
265 cancel_ = true;
268 int MockDiskEntry::ReadyForSparseIO(const CompletionCallback& callback) {
269 if (fail_sparse_requests_)
270 return ERR_NOT_IMPLEMENTED;
271 if (!cancel_)
272 return OK;
274 cancel_ = false;
275 DCHECK(!callback.is_null());
276 if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
277 return OK;
279 // The pending operation is already in the message loop (and hopefully
280 // already in the second pass). Just notify the caller that it finished.
281 CallbackLater(callback, 0);
282 return ERR_IO_PENDING;
285 // If |value| is true, don't deliver any completion callbacks until called
286 // again with |value| set to false. Caution: remember to enable callbacks
287 // again or all subsequent tests will fail.
288 // Static.
289 void MockDiskEntry::IgnoreCallbacks(bool value) {
290 if (ignore_callbacks_ == value)
291 return;
292 ignore_callbacks_ = value;
293 if (!value)
294 StoreAndDeliverCallbacks(false, NULL, CompletionCallback(), 0);
297 MockDiskEntry::~MockDiskEntry() {
300 // Unlike the callbacks for MockHttpTransaction, we want this one to run even
301 // if the consumer called Close on the MockDiskEntry. We achieve that by
302 // leveraging the fact that this class is reference counted.
303 void MockDiskEntry::CallbackLater(const CompletionCallback& callback,
304 int result) {
305 if (ignore_callbacks_)
306 return StoreAndDeliverCallbacks(true, this, callback, result);
307 base::ThreadTaskRunnerHandle::Get()->PostTask(
308 FROM_HERE,
309 base::Bind(&MockDiskEntry::RunCallback, this, callback, result));
312 void MockDiskEntry::RunCallback(const CompletionCallback& callback,
313 int result) {
314 if (busy_) {
315 // This is kind of hacky, but controlling the behavior of just this entry
316 // from a test is sort of complicated. What we really want to do is
317 // delay the delivery of a sparse IO operation a little more so that the
318 // request start operation (async) will finish without seeing the end of
319 // this operation (already posted to the message loop)... and without
320 // just delaying for n mS (which may cause trouble with slow bots). So
321 // we re-post this operation (all async sparse IO operations will take two
322 // trips through the message loop instead of one).
323 if (!delayed_) {
324 delayed_ = true;
325 return CallbackLater(callback, result);
328 busy_ = false;
329 callback.Run(result);
332 // When |store| is true, stores the callback to be delivered later; otherwise
333 // delivers any callback previously stored.
334 // Static.
335 void MockDiskEntry::StoreAndDeliverCallbacks(bool store,
336 MockDiskEntry* entry,
337 const CompletionCallback& callback,
338 int result) {
339 static std::vector<CallbackInfo> callback_list;
340 if (store) {
341 CallbackInfo c = {entry, callback, result};
342 callback_list.push_back(c);
343 } else {
344 for (size_t i = 0; i < callback_list.size(); i++) {
345 CallbackInfo& c = callback_list[i];
346 c.entry->CallbackLater(c.callback, c.result);
348 callback_list.clear();
352 // Statics.
353 bool MockDiskEntry::ignore_callbacks_ = false;
355 //-----------------------------------------------------------------------------
357 MockDiskCache::MockDiskCache()
358 : open_count_(0), create_count_(0), fail_requests_(false),
359 soft_failures_(false), double_create_check_(true),
360 fail_sparse_requests_(false) {
363 MockDiskCache::~MockDiskCache() {
364 ReleaseAll();
367 CacheType MockDiskCache::GetCacheType() const {
368 return DISK_CACHE;
371 int32 MockDiskCache::GetEntryCount() const {
372 return static_cast<int32>(entries_.size());
375 int MockDiskCache::OpenEntry(const std::string& key,
376 disk_cache::Entry** entry,
377 const CompletionCallback& callback) {
378 DCHECK(!callback.is_null());
379 if (fail_requests_)
380 return ERR_CACHE_OPEN_FAILURE;
382 EntryMap::iterator it = entries_.find(key);
383 if (it == entries_.end())
384 return ERR_CACHE_OPEN_FAILURE;
386 if (it->second->is_doomed()) {
387 it->second->Release();
388 entries_.erase(it);
389 return ERR_CACHE_OPEN_FAILURE;
392 open_count_++;
394 it->second->AddRef();
395 *entry = it->second;
397 if (soft_failures_)
398 it->second->set_fail_requests();
400 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
401 return OK;
403 CallbackLater(callback, OK);
404 return ERR_IO_PENDING;
407 int MockDiskCache::CreateEntry(const std::string& key,
408 disk_cache::Entry** entry,
409 const CompletionCallback& callback) {
410 DCHECK(!callback.is_null());
411 if (fail_requests_)
412 return ERR_CACHE_CREATE_FAILURE;
414 EntryMap::iterator it = entries_.find(key);
415 if (it != entries_.end()) {
416 if (!it->second->is_doomed()) {
417 if (double_create_check_)
418 NOTREACHED();
419 else
420 return ERR_CACHE_CREATE_FAILURE;
422 it->second->Release();
423 entries_.erase(it);
426 create_count_++;
428 MockDiskEntry* new_entry = new MockDiskEntry(key);
430 new_entry->AddRef();
431 entries_[key] = new_entry;
433 new_entry->AddRef();
434 *entry = new_entry;
436 if (soft_failures_)
437 new_entry->set_fail_requests();
439 if (fail_sparse_requests_)
440 new_entry->set_fail_sparse_requests();
442 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
443 return OK;
445 CallbackLater(callback, OK);
446 return ERR_IO_PENDING;
449 int MockDiskCache::DoomEntry(const std::string& key,
450 const CompletionCallback& callback) {
451 DCHECK(!callback.is_null());
452 EntryMap::iterator it = entries_.find(key);
453 if (it != entries_.end()) {
454 it->second->Release();
455 entries_.erase(it);
458 if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
459 return OK;
461 CallbackLater(callback, OK);
462 return ERR_IO_PENDING;
465 int MockDiskCache::DoomAllEntries(const CompletionCallback& callback) {
466 return ERR_NOT_IMPLEMENTED;
469 int MockDiskCache::DoomEntriesBetween(const base::Time initial_time,
470 const base::Time end_time,
471 const CompletionCallback& callback) {
472 return ERR_NOT_IMPLEMENTED;
475 int MockDiskCache::DoomEntriesSince(const base::Time initial_time,
476 const CompletionCallback& callback) {
477 return ERR_NOT_IMPLEMENTED;
480 class MockDiskCache::NotImplementedIterator : public Iterator {
481 public:
482 int OpenNextEntry(disk_cache::Entry** next_entry,
483 const CompletionCallback& callback) override {
484 return ERR_NOT_IMPLEMENTED;
488 scoped_ptr<disk_cache::Backend::Iterator> MockDiskCache::CreateIterator() {
489 return scoped_ptr<Iterator>(new NotImplementedIterator());
492 void MockDiskCache::GetStats(base::StringPairs* stats) {
495 void MockDiskCache::OnExternalCacheHit(const std::string& key) {
498 void MockDiskCache::ReleaseAll() {
499 EntryMap::iterator it = entries_.begin();
500 for (; it != entries_.end(); ++it)
501 it->second->Release();
502 entries_.clear();
505 void MockDiskCache::CallbackLater(const CompletionCallback& callback,
506 int result) {
507 base::ThreadTaskRunnerHandle::Get()->PostTask(
508 FROM_HERE, base::Bind(&CallbackForwader, callback, result));
511 //-----------------------------------------------------------------------------
513 int MockBackendFactory::CreateBackend(NetLog* net_log,
514 scoped_ptr<disk_cache::Backend>* backend,
515 const CompletionCallback& callback) {
516 backend->reset(new MockDiskCache());
517 return OK;
520 //-----------------------------------------------------------------------------
522 MockHttpCache::MockHttpCache()
523 : http_cache_(new MockNetworkLayer(), NULL, new MockBackendFactory()) {
526 MockHttpCache::MockHttpCache(HttpCache::BackendFactory* disk_cache_factory)
527 : http_cache_(new MockNetworkLayer(), NULL, disk_cache_factory) {
530 disk_cache::Backend* MockHttpCache::backend() {
531 TestCompletionCallback cb;
532 disk_cache::Backend* backend;
533 int rv = http_cache_.GetBackend(&backend, cb.callback());
534 rv = cb.GetResult(rv);
535 return (rv == OK) ? backend : NULL;
538 MockDiskCache* MockHttpCache::disk_cache() {
539 return static_cast<MockDiskCache*>(backend());
542 int MockHttpCache::CreateTransaction(scoped_ptr<HttpTransaction>* trans) {
543 return http_cache_.CreateTransaction(DEFAULT_PRIORITY, trans);
546 void MockHttpCache::BypassCacheLock() {
547 http_cache_.BypassLockForTest();
550 void MockHttpCache::FailConditionalizations() {
551 http_cache_.FailConditionalizationForTest();
554 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry,
555 HttpResponseInfo* response_info,
556 bool* response_truncated) {
557 int size = disk_entry->GetDataSize(0);
559 TestCompletionCallback cb;
560 scoped_refptr<IOBuffer> buffer(new IOBuffer(size));
561 int rv = disk_entry->ReadData(0, 0, buffer.get(), size, cb.callback());
562 rv = cb.GetResult(rv);
563 EXPECT_EQ(size, rv);
565 return HttpCache::ParseResponseInfo(buffer->data(), size, response_info,
566 response_truncated);
569 bool MockHttpCache::WriteResponseInfo(disk_cache::Entry* disk_entry,
570 const HttpResponseInfo* response_info,
571 bool skip_transient_headers,
572 bool response_truncated) {
573 base::Pickle pickle;
574 response_info->Persist(
575 &pickle, skip_transient_headers, response_truncated);
577 TestCompletionCallback cb;
578 scoped_refptr<WrappedIOBuffer> data(
579 new WrappedIOBuffer(reinterpret_cast<const char*>(pickle.data())));
580 int len = static_cast<int>(pickle.size());
582 int rv = disk_entry->WriteData(0, 0, data.get(), len, cb.callback(), true);
583 rv = cb.GetResult(rv);
584 return (rv == len);
587 bool MockHttpCache::OpenBackendEntry(const std::string& key,
588 disk_cache::Entry** entry) {
589 TestCompletionCallback cb;
590 int rv = backend()->OpenEntry(key, entry, cb.callback());
591 return (cb.GetResult(rv) == OK);
594 bool MockHttpCache::CreateBackendEntry(const std::string& key,
595 disk_cache::Entry** entry,
596 NetLog* net_log) {
597 TestCompletionCallback cb;
598 int rv = backend()->CreateEntry(key, entry, cb.callback());
599 return (cb.GetResult(rv) == OK);
602 // Static.
603 int MockHttpCache::GetTestMode(int test_mode) {
604 if (!g_test_mode)
605 return test_mode;
607 return g_test_mode;
610 // Static.
611 void MockHttpCache::SetTestMode(int test_mode) {
612 g_test_mode = test_mode;
615 //-----------------------------------------------------------------------------
617 int MockDiskCacheNoCB::CreateEntry(const std::string& key,
618 disk_cache::Entry** entry,
619 const CompletionCallback& callback) {
620 return ERR_IO_PENDING;
623 //-----------------------------------------------------------------------------
625 int MockBackendNoCbFactory::CreateBackend(
626 NetLog* net_log,
627 scoped_ptr<disk_cache::Backend>* backend,
628 const CompletionCallback& callback) {
629 backend->reset(new MockDiskCacheNoCB());
630 return OK;
633 //-----------------------------------------------------------------------------
635 MockBlockingBackendFactory::MockBlockingBackendFactory()
636 : backend_(NULL),
637 block_(true),
638 fail_(false) {
641 MockBlockingBackendFactory::~MockBlockingBackendFactory() {
644 int MockBlockingBackendFactory::CreateBackend(
645 NetLog* net_log,
646 scoped_ptr<disk_cache::Backend>* backend,
647 const CompletionCallback& callback) {
648 if (!block_) {
649 if (!fail_)
650 backend->reset(new MockDiskCache());
651 return Result();
654 backend_ = backend;
655 callback_ = callback;
656 return ERR_IO_PENDING;
659 void MockBlockingBackendFactory::FinishCreation() {
660 block_ = false;
661 if (!callback_.is_null()) {
662 if (!fail_)
663 backend_->reset(new MockDiskCache());
664 CompletionCallback cb = callback_;
665 callback_.Reset();
666 cb.Run(Result()); // This object can be deleted here.
670 } // namespace net