1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/browser/cache_storage/cache_storage_cache.h"
9 #include "base/barrier_closure.h"
10 #include "base/files/file_path.h"
11 #include "base/guid.h"
12 #include "base/metrics/histogram_macros.h"
13 #include "base/strings/string_split.h"
14 #include "base/strings/string_util.h"
15 #include "content/browser/cache_storage/cache_storage.pb.h"
16 #include "content/browser/cache_storage/cache_storage_blob_to_disk_cache.h"
17 #include "content/browser/cache_storage/cache_storage_scheduler.h"
18 #include "content/public/browser/browser_thread.h"
19 #include "content/public/common/referrer.h"
20 #include "net/base/completion_callback.h"
21 #include "net/base/io_buffer.h"
22 #include "net/base/net_errors.h"
23 #include "net/disk_cache/disk_cache.h"
24 #include "net/url_request/url_request_context_getter.h"
25 #include "storage/browser/blob/blob_data_builder.h"
26 #include "storage/browser/blob/blob_data_handle.h"
27 #include "storage/browser/blob/blob_storage_context.h"
28 #include "storage/browser/blob/blob_url_request_job_factory.h"
29 #include "storage/browser/quota/quota_manager_proxy.h"
30 #include "third_party/WebKit/public/platform/WebServiceWorkerResponseType.h"
36 // This class ensures that the cache and the entry have a lifetime as long as
37 // the blob that is created to contain them.
38 class CacheStorageCacheDataHandle
39 : public storage::BlobDataBuilder::DataHandle
{
41 CacheStorageCacheDataHandle(const scoped_refptr
<CacheStorageCache
>& cache
,
42 disk_cache::ScopedEntryPtr entry
)
43 : cache_(cache
), entry_(entry
.Pass()) {}
46 ~CacheStorageCacheDataHandle() override
{}
48 scoped_refptr
<CacheStorageCache
> cache_
;
49 disk_cache::ScopedEntryPtr entry_
;
51 DISALLOW_COPY_AND_ASSIGN(CacheStorageCacheDataHandle
);
54 typedef base::Callback
<void(scoped_ptr
<CacheMetadata
>)> MetadataCallback
;
56 enum EntryIndex
{ INDEX_HEADERS
= 0, INDEX_RESPONSE_BODY
};
58 // The maximum size of an individual cache. Ultimately cache size is controlled
60 const int kMaxCacheBytes
= 512 * 1024 * 1024;
62 void NotReachedCompletionCallback(int rv
) {
66 blink::WebServiceWorkerResponseType
ProtoResponseTypeToWebResponseType(
67 CacheResponse::ResponseType response_type
) {
68 switch (response_type
) {
69 case CacheResponse::BASIC_TYPE
:
70 return blink::WebServiceWorkerResponseTypeBasic
;
71 case CacheResponse::CORS_TYPE
:
72 return blink::WebServiceWorkerResponseTypeCORS
;
73 case CacheResponse::DEFAULT_TYPE
:
74 return blink::WebServiceWorkerResponseTypeDefault
;
75 case CacheResponse::ERROR_TYPE
:
76 return blink::WebServiceWorkerResponseTypeError
;
77 case CacheResponse::OPAQUE_TYPE
:
78 return blink::WebServiceWorkerResponseTypeOpaque
;
81 return blink::WebServiceWorkerResponseTypeOpaque
;
84 CacheResponse::ResponseType
WebResponseTypeToProtoResponseType(
85 blink::WebServiceWorkerResponseType response_type
) {
86 switch (response_type
) {
87 case blink::WebServiceWorkerResponseTypeBasic
:
88 return CacheResponse::BASIC_TYPE
;
89 case blink::WebServiceWorkerResponseTypeCORS
:
90 return CacheResponse::CORS_TYPE
;
91 case blink::WebServiceWorkerResponseTypeDefault
:
92 return CacheResponse::DEFAULT_TYPE
;
93 case blink::WebServiceWorkerResponseTypeError
:
94 return CacheResponse::ERROR_TYPE
;
95 case blink::WebServiceWorkerResponseTypeOpaque
:
96 return CacheResponse::OPAQUE_TYPE
;
99 return CacheResponse::OPAQUE_TYPE
;
102 // Copy headers out of a cache entry and into a protobuf. The callback is
103 // guaranteed to be run.
104 void ReadMetadata(disk_cache::Entry
* entry
, const MetadataCallback
& callback
);
105 void ReadMetadataDidReadMetadata(
106 disk_cache::Entry
* entry
,
107 const MetadataCallback
& callback
,
108 const scoped_refptr
<net::IOBufferWithSize
>& buffer
,
111 bool VaryMatches(const ServiceWorkerHeaderMap
& request
,
112 const ServiceWorkerHeaderMap
& cached_request
,
113 const ServiceWorkerHeaderMap
& response
) {
114 ServiceWorkerHeaderMap::const_iterator vary_iter
= response
.find("vary");
115 if (vary_iter
== response
.end())
118 for (const std::string
& trimmed
:
119 base::SplitString(vary_iter
->second
, ",",
120 base::TRIM_WHITESPACE
, base::SPLIT_WANT_NONEMPTY
)) {
124 ServiceWorkerHeaderMap::const_iterator request_iter
= request
.find(trimmed
);
125 ServiceWorkerHeaderMap::const_iterator cached_request_iter
=
126 cached_request
.find(trimmed
);
128 // If the header exists in one but not the other, no match.
129 if ((request_iter
== request
.end()) !=
130 (cached_request_iter
== cached_request
.end()))
133 // If the header exists in one, it exists in both. Verify that the values
135 if (request_iter
!= request
.end() &&
136 request_iter
->second
!= cached_request_iter
->second
)
143 void ReadMetadata(disk_cache::Entry
* entry
, const MetadataCallback
& callback
) {
146 scoped_refptr
<net::IOBufferWithSize
> buffer(
147 new net::IOBufferWithSize(entry
->GetDataSize(INDEX_HEADERS
)));
149 net::CompletionCallback read_header_callback
=
150 base::Bind(ReadMetadataDidReadMetadata
, entry
, callback
, buffer
);
152 int read_rv
= entry
->ReadData(INDEX_HEADERS
, 0, buffer
.get(), buffer
->size(),
153 read_header_callback
);
155 if (read_rv
!= net::ERR_IO_PENDING
)
156 read_header_callback
.Run(read_rv
);
159 void ReadMetadataDidReadMetadata(
160 disk_cache::Entry
* entry
,
161 const MetadataCallback
& callback
,
162 const scoped_refptr
<net::IOBufferWithSize
>& buffer
,
164 if (rv
!= buffer
->size()) {
165 callback
.Run(scoped_ptr
<CacheMetadata
>());
169 scoped_ptr
<CacheMetadata
> metadata(new CacheMetadata());
171 if (!metadata
->ParseFromArray(buffer
->data(), buffer
->size())) {
172 callback
.Run(scoped_ptr
<CacheMetadata
>());
176 callback
.Run(metadata
.Pass());
181 // The state needed to pass between CacheStorageCache::Keys callbacks.
182 struct CacheStorageCache::KeysContext
{
183 explicit KeysContext(const CacheStorageCache::RequestsCallback
& callback
)
184 : original_callback(callback
),
185 out_keys(new CacheStorageCache::Requests()),
186 enumerated_entry(NULL
) {}
189 for (size_t i
= 0, max
= entries
.size(); i
< max
; ++i
)
191 if (enumerated_entry
)
192 enumerated_entry
->Close();
195 // The callback passed to the Keys() function.
196 CacheStorageCache::RequestsCallback original_callback
;
198 // The vector of open entries in the backend.
201 // The output of the Keys function.
202 scoped_ptr
<CacheStorageCache::Requests
> out_keys
;
204 // Used for enumerating cache entries.
205 scoped_ptr
<disk_cache::Backend::Iterator
> backend_iterator
;
206 disk_cache::Entry
* enumerated_entry
;
209 DISALLOW_COPY_AND_ASSIGN(KeysContext
);
212 // The state needed to pass between CacheStorageCache::Put callbacks.
213 struct CacheStorageCache::PutContext
{
216 scoped_ptr
<ServiceWorkerFetchRequest
> request
,
217 scoped_ptr
<ServiceWorkerResponse
> response
,
218 scoped_ptr
<storage::BlobDataHandle
> blob_data_handle
,
219 const CacheStorageCache::ErrorCallback
& callback
,
220 const scoped_refptr
<net::URLRequestContextGetter
>& request_context_getter
,
221 const scoped_refptr
<storage::QuotaManagerProxy
>& quota_manager_proxy
)
223 request(request
.Pass()),
224 response(response
.Pass()),
225 blob_data_handle(blob_data_handle
.Pass()),
227 request_context_getter(request_context_getter
),
228 quota_manager_proxy(quota_manager_proxy
) {}
230 // Input parameters to the Put function.
232 scoped_ptr
<ServiceWorkerFetchRequest
> request
;
233 scoped_ptr
<ServiceWorkerResponse
> response
;
234 scoped_ptr
<storage::BlobDataHandle
> blob_data_handle
;
235 CacheStorageCache::ErrorCallback callback
;
236 scoped_refptr
<net::URLRequestContextGetter
> request_context_getter
;
237 scoped_refptr
<storage::QuotaManagerProxy
> quota_manager_proxy
;
238 disk_cache::ScopedEntryPtr cache_entry
;
241 DISALLOW_COPY_AND_ASSIGN(PutContext
);
245 scoped_refptr
<CacheStorageCache
> CacheStorageCache::CreateMemoryCache(
247 const scoped_refptr
<net::URLRequestContextGetter
>& request_context_getter
,
248 const scoped_refptr
<storage::QuotaManagerProxy
>& quota_manager_proxy
,
249 base::WeakPtr
<storage::BlobStorageContext
> blob_context
) {
250 return make_scoped_refptr(
251 new CacheStorageCache(origin
, base::FilePath(), request_context_getter
,
252 quota_manager_proxy
, blob_context
));
256 scoped_refptr
<CacheStorageCache
> CacheStorageCache::CreatePersistentCache(
258 const base::FilePath
& path
,
259 const scoped_refptr
<net::URLRequestContextGetter
>& request_context_getter
,
260 const scoped_refptr
<storage::QuotaManagerProxy
>& quota_manager_proxy
,
261 base::WeakPtr
<storage::BlobStorageContext
> blob_context
) {
262 return make_scoped_refptr(new CacheStorageCache(
263 origin
, path
, request_context_getter
, quota_manager_proxy
, blob_context
));
266 CacheStorageCache::~CacheStorageCache() {
269 base::WeakPtr
<CacheStorageCache
> CacheStorageCache::AsWeakPtr() {
270 return weak_ptr_factory_
.GetWeakPtr();
273 void CacheStorageCache::Match(scoped_ptr
<ServiceWorkerFetchRequest
> request
,
274 const ResponseCallback
& callback
) {
275 if (!LazyInitialize()) {
276 callback
.Run(CACHE_STORAGE_ERROR_STORAGE
,
277 scoped_ptr
<ServiceWorkerResponse
>(),
278 scoped_ptr
<storage::BlobDataHandle
>());
282 ResponseCallback pending_callback
=
283 base::Bind(&CacheStorageCache::PendingResponseCallback
,
284 weak_ptr_factory_
.GetWeakPtr(), callback
);
285 scheduler_
->ScheduleOperation(
286 base::Bind(&CacheStorageCache::MatchImpl
, weak_ptr_factory_
.GetWeakPtr(),
287 base::Passed(request
.Pass()), pending_callback
));
290 void CacheStorageCache::BatchOperation(
291 const std::vector
<CacheStorageBatchOperation
>& operations
,
292 const ErrorCallback
& callback
) {
293 if (!LazyInitialize()) {
294 callback
.Run(CACHE_STORAGE_ERROR_STORAGE
);
298 scoped_ptr
<ErrorCallback
> callback_copy(new ErrorCallback(callback
));
299 ErrorCallback
* callback_ptr
= callback_copy
.get();
300 base::Closure barrier_closure
= base::BarrierClosure(
301 operations
.size(), base::Bind(&CacheStorageCache::BatchDidAllOperations
,
302 this, base::Passed(callback_copy
.Pass())));
303 ErrorCallback completion_callback
=
304 base::Bind(&CacheStorageCache::BatchDidOneOperation
, this,
305 barrier_closure
, callback_ptr
);
307 for (const auto& operation
: operations
) {
308 switch (operation
.operation_type
) {
309 case CACHE_STORAGE_CACHE_OPERATION_TYPE_PUT
:
310 Put(operation
, completion_callback
);
312 case CACHE_STORAGE_CACHE_OPERATION_TYPE_DELETE
:
313 DCHECK_EQ(1u, operations
.size());
314 Delete(operation
, completion_callback
);
316 case CACHE_STORAGE_CACHE_OPERATION_TYPE_UNDEFINED
:
318 // TODO(nhiroki): This should return "TypeError".
319 // http://crbug.com/425505
320 completion_callback
.Run(CACHE_STORAGE_ERROR_STORAGE
);
326 void CacheStorageCache::BatchDidOneOperation(
327 const base::Closure
& barrier_closure
,
328 ErrorCallback
* callback
,
329 CacheStorageError error
) {
330 if (callback
->is_null() || error
== CACHE_STORAGE_OK
) {
331 barrier_closure
.Run();
334 callback
->Run(error
);
335 callback
->Reset(); // Only call the callback once.
337 barrier_closure
.Run();
340 void CacheStorageCache::BatchDidAllOperations(
341 scoped_ptr
<ErrorCallback
> callback
) {
342 if (callback
->is_null())
344 callback
->Run(CACHE_STORAGE_OK
);
347 void CacheStorageCache::Keys(const RequestsCallback
& callback
) {
348 if (!LazyInitialize()) {
349 callback
.Run(CACHE_STORAGE_ERROR_STORAGE
, scoped_ptr
<Requests
>());
353 RequestsCallback pending_callback
=
354 base::Bind(&CacheStorageCache::PendingRequestsCallback
,
355 weak_ptr_factory_
.GetWeakPtr(), callback
);
356 scheduler_
->ScheduleOperation(base::Bind(&CacheStorageCache::KeysImpl
,
357 weak_ptr_factory_
.GetWeakPtr(),
361 void CacheStorageCache::Close(const base::Closure
& callback
) {
362 DCHECK_NE(BACKEND_CLOSED
, backend_state_
)
363 << "Was CacheStorageCache::Close() called twice?";
365 base::Closure pending_callback
=
366 base::Bind(&CacheStorageCache::PendingClosure
,
367 weak_ptr_factory_
.GetWeakPtr(), callback
);
369 scheduler_
->ScheduleOperation(base::Bind(&CacheStorageCache::CloseImpl
,
370 weak_ptr_factory_
.GetWeakPtr(),
374 int64
CacheStorageCache::MemoryBackedSize() const {
375 if (backend_state_
!= BACKEND_OPEN
|| !memory_only_
)
378 scoped_ptr
<disk_cache::Backend::Iterator
> backend_iter
=
379 backend_
->CreateIterator();
380 disk_cache::Entry
* entry
= nullptr;
384 std::vector
<disk_cache::Entry
*> entries
;
386 while ((rv
= backend_iter
->OpenNextEntry(
387 &entry
, base::Bind(NotReachedCompletionCallback
))) == net::OK
) {
388 entries
.push_back(entry
); // Open the entries without mutating them.
390 DCHECK_NE(net::ERR_IO_PENDING
, rv
)
391 << "Memory cache operations should be synchronous.";
393 for (disk_cache::Entry
* entry
: entries
) {
394 sum
+= entry
->GetDataSize(INDEX_HEADERS
) +
395 entry
->GetDataSize(INDEX_RESPONSE_BODY
);
402 CacheStorageCache::CacheStorageCache(
404 const base::FilePath
& path
,
405 const scoped_refptr
<net::URLRequestContextGetter
>& request_context_getter
,
406 const scoped_refptr
<storage::QuotaManagerProxy
>& quota_manager_proxy
,
407 base::WeakPtr
<storage::BlobStorageContext
> blob_context
)
410 request_context_getter_(request_context_getter
),
411 quota_manager_proxy_(quota_manager_proxy
),
412 blob_storage_context_(blob_context
),
413 backend_state_(BACKEND_UNINITIALIZED
),
414 scheduler_(new CacheStorageScheduler()),
415 initializing_(false),
416 memory_only_(path
.empty()),
417 weak_ptr_factory_(this) {
420 bool CacheStorageCache::LazyInitialize() {
421 switch (backend_state_
) {
422 case BACKEND_UNINITIALIZED
:
435 void CacheStorageCache::MatchImpl(scoped_ptr
<ServiceWorkerFetchRequest
> request
,
436 const ResponseCallback
& callback
) {
437 DCHECK_NE(BACKEND_UNINITIALIZED
, backend_state_
);
438 if (backend_state_
!= BACKEND_OPEN
) {
439 callback
.Run(CACHE_STORAGE_ERROR_STORAGE
,
440 scoped_ptr
<ServiceWorkerResponse
>(),
441 scoped_ptr
<storage::BlobDataHandle
>());
445 scoped_ptr
<disk_cache::Entry
*> scoped_entry_ptr(new disk_cache::Entry
*());
446 disk_cache::Entry
** entry_ptr
= scoped_entry_ptr
.get();
447 ServiceWorkerFetchRequest
* request_ptr
= request
.get();
449 net::CompletionCallback open_entry_callback
=
450 base::Bind(&CacheStorageCache::MatchDidOpenEntry
,
451 weak_ptr_factory_
.GetWeakPtr(), base::Passed(request
.Pass()),
452 callback
, base::Passed(scoped_entry_ptr
.Pass()));
454 int rv
= backend_
->OpenEntry(request_ptr
->url
.spec(), entry_ptr
,
455 open_entry_callback
);
456 if (rv
!= net::ERR_IO_PENDING
)
457 open_entry_callback
.Run(rv
);
460 void CacheStorageCache::MatchDidOpenEntry(
461 scoped_ptr
<ServiceWorkerFetchRequest
> request
,
462 const ResponseCallback
& callback
,
463 scoped_ptr
<disk_cache::Entry
*> entry_ptr
,
466 callback
.Run(CACHE_STORAGE_ERROR_NOT_FOUND
,
467 scoped_ptr
<ServiceWorkerResponse
>(),
468 scoped_ptr
<storage::BlobDataHandle
>());
471 disk_cache::ScopedEntryPtr
entry(*entry_ptr
);
473 MetadataCallback headers_callback
= base::Bind(
474 &CacheStorageCache::MatchDidReadMetadata
, weak_ptr_factory_
.GetWeakPtr(),
475 base::Passed(request
.Pass()), callback
, base::Passed(entry
.Pass()));
477 ReadMetadata(*entry_ptr
, headers_callback
);
480 void CacheStorageCache::MatchDidReadMetadata(
481 scoped_ptr
<ServiceWorkerFetchRequest
> request
,
482 const ResponseCallback
& callback
,
483 disk_cache::ScopedEntryPtr entry
,
484 scoped_ptr
<CacheMetadata
> metadata
) {
486 callback
.Run(CACHE_STORAGE_ERROR_STORAGE
,
487 scoped_ptr
<ServiceWorkerResponse
>(),
488 scoped_ptr
<storage::BlobDataHandle
>());
492 scoped_ptr
<ServiceWorkerResponse
> response(new ServiceWorkerResponse(
493 request
->url
, metadata
->response().status_code(),
494 metadata
->response().status_text(),
495 ProtoResponseTypeToWebResponseType(metadata
->response().response_type()),
496 ServiceWorkerHeaderMap(), "", 0, GURL(),
497 blink::WebServiceWorkerResponseErrorUnknown
));
499 if (metadata
->response().has_url())
500 response
->url
= GURL(metadata
->response().url());
502 for (int i
= 0; i
< metadata
->response().headers_size(); ++i
) {
503 const CacheHeaderMap header
= metadata
->response().headers(i
);
504 DCHECK_EQ(std::string::npos
, header
.name().find('\0'));
505 DCHECK_EQ(std::string::npos
, header
.value().find('\0'));
506 response
->headers
.insert(std::make_pair(header
.name(), header
.value()));
509 ServiceWorkerHeaderMap cached_request_headers
;
510 for (int i
= 0; i
< metadata
->request().headers_size(); ++i
) {
511 const CacheHeaderMap header
= metadata
->request().headers(i
);
512 DCHECK_EQ(std::string::npos
, header
.name().find('\0'));
513 DCHECK_EQ(std::string::npos
, header
.value().find('\0'));
514 cached_request_headers
[header
.name()] = header
.value();
517 if (!VaryMatches(request
->headers
, cached_request_headers
,
518 response
->headers
)) {
519 callback
.Run(CACHE_STORAGE_ERROR_NOT_FOUND
,
520 scoped_ptr
<ServiceWorkerResponse
>(),
521 scoped_ptr
<storage::BlobDataHandle
>());
525 if (entry
->GetDataSize(INDEX_RESPONSE_BODY
) == 0) {
526 callback
.Run(CACHE_STORAGE_OK
, response
.Pass(),
527 scoped_ptr
<storage::BlobDataHandle
>());
531 if (!blob_storage_context_
) {
532 callback
.Run(CACHE_STORAGE_ERROR_STORAGE
,
533 scoped_ptr
<ServiceWorkerResponse
>(),
534 scoped_ptr
<storage::BlobDataHandle
>());
538 // Create a blob with the response body data.
539 response
->blob_size
= entry
->GetDataSize(INDEX_RESPONSE_BODY
);
540 response
->blob_uuid
= base::GenerateGUID();
541 storage::BlobDataBuilder
blob_data(response
->blob_uuid
);
543 disk_cache::Entry
* temp_entry
= entry
.get();
544 blob_data
.AppendDiskCacheEntry(
545 new CacheStorageCacheDataHandle(this, entry
.Pass()), temp_entry
,
546 INDEX_RESPONSE_BODY
);
547 scoped_ptr
<storage::BlobDataHandle
> blob_data_handle(
548 blob_storage_context_
->AddFinishedBlob(&blob_data
));
549 callback
.Run(CACHE_STORAGE_OK
, response
.Pass(), blob_data_handle
.Pass());
552 void CacheStorageCache::Put(const CacheStorageBatchOperation
& operation
,
553 const ErrorCallback
& callback
) {
554 DCHECK(BACKEND_OPEN
== backend_state_
|| initializing_
);
555 DCHECK_EQ(CACHE_STORAGE_CACHE_OPERATION_TYPE_PUT
, operation
.operation_type
);
557 scoped_ptr
<ServiceWorkerFetchRequest
> request(new ServiceWorkerFetchRequest(
558 operation
.request
.url
, operation
.request
.method
,
559 operation
.request
.headers
, operation
.request
.referrer
,
560 operation
.request
.is_reload
));
562 // We don't support streaming for cache.
563 DCHECK(operation
.response
.stream_url
.is_empty());
564 scoped_ptr
<ServiceWorkerResponse
> response(new ServiceWorkerResponse(
565 operation
.response
.url
, operation
.response
.status_code
,
566 operation
.response
.status_text
, operation
.response
.response_type
,
567 operation
.response
.headers
, operation
.response
.blob_uuid
,
568 operation
.response
.blob_size
, operation
.response
.stream_url
,
569 operation
.response
.error
));
571 scoped_ptr
<storage::BlobDataHandle
> blob_data_handle
;
573 if (!response
->blob_uuid
.empty()) {
574 if (!blob_storage_context_
) {
575 callback
.Run(CACHE_STORAGE_ERROR_STORAGE
);
579 blob_storage_context_
->GetBlobDataFromUUID(response
->blob_uuid
);
580 if (!blob_data_handle
) {
581 callback
.Run(CACHE_STORAGE_ERROR_STORAGE
);
586 ErrorCallback pending_callback
=
587 base::Bind(&CacheStorageCache::PendingErrorCallback
,
588 weak_ptr_factory_
.GetWeakPtr(), callback
);
590 scoped_ptr
<PutContext
> put_context(new PutContext(
591 origin_
, request
.Pass(), response
.Pass(), blob_data_handle
.Pass(),
592 pending_callback
, request_context_getter_
, quota_manager_proxy_
));
594 scheduler_
->ScheduleOperation(base::Bind(&CacheStorageCache::PutImpl
,
595 weak_ptr_factory_
.GetWeakPtr(),
596 base::Passed(put_context
.Pass())));
599 void CacheStorageCache::PutImpl(scoped_ptr
<PutContext
> put_context
) {
600 DCHECK_NE(BACKEND_UNINITIALIZED
, backend_state_
);
601 if (backend_state_
!= BACKEND_OPEN
) {
602 put_context
->callback
.Run(CACHE_STORAGE_ERROR_STORAGE
);
606 scoped_ptr
<ServiceWorkerFetchRequest
> request_copy(
607 new ServiceWorkerFetchRequest(*put_context
->request
));
609 DeleteImpl(request_copy
.Pass(), base::Bind(&CacheStorageCache::PutDidDelete
,
610 weak_ptr_factory_
.GetWeakPtr(),
611 base::Passed(put_context
.Pass())));
614 void CacheStorageCache::PutDidDelete(scoped_ptr
<PutContext
> put_context
,
615 CacheStorageError delete_error
) {
616 if (backend_state_
!= BACKEND_OPEN
) {
617 put_context
->callback
.Run(CACHE_STORAGE_ERROR_STORAGE
);
621 scoped_ptr
<disk_cache::Entry
*> scoped_entry_ptr(new disk_cache::Entry
*());
622 disk_cache::Entry
** entry_ptr
= scoped_entry_ptr
.get();
623 ServiceWorkerFetchRequest
* request_ptr
= put_context
->request
.get();
624 disk_cache::Backend
* backend_ptr
= backend_
.get();
626 net::CompletionCallback create_entry_callback
= base::Bind(
627 &CacheStorageCache::PutDidCreateEntry
, weak_ptr_factory_
.GetWeakPtr(),
628 base::Passed(scoped_entry_ptr
.Pass()), base::Passed(put_context
.Pass()));
630 int create_rv
= backend_ptr
->CreateEntry(request_ptr
->url
.spec(), entry_ptr
,
631 create_entry_callback
);
633 if (create_rv
!= net::ERR_IO_PENDING
)
634 create_entry_callback
.Run(create_rv
);
637 void CacheStorageCache::PutDidCreateEntry(
638 scoped_ptr
<disk_cache::Entry
*> entry_ptr
,
639 scoped_ptr
<PutContext
> put_context
,
642 put_context
->callback
.Run(CACHE_STORAGE_ERROR_EXISTS
);
645 put_context
->cache_entry
.reset(*entry_ptr
);
647 CacheMetadata metadata
;
648 CacheRequest
* request_metadata
= metadata
.mutable_request();
649 request_metadata
->set_method(put_context
->request
->method
);
650 for (ServiceWorkerHeaderMap::const_iterator it
=
651 put_context
->request
->headers
.begin();
652 it
!= put_context
->request
->headers
.end(); ++it
) {
653 DCHECK_EQ(std::string::npos
, it
->first
.find('\0'));
654 DCHECK_EQ(std::string::npos
, it
->second
.find('\0'));
655 CacheHeaderMap
* header_map
= request_metadata
->add_headers();
656 header_map
->set_name(it
->first
);
657 header_map
->set_value(it
->second
);
660 CacheResponse
* response_metadata
= metadata
.mutable_response();
661 response_metadata
->set_status_code(put_context
->response
->status_code
);
662 response_metadata
->set_status_text(put_context
->response
->status_text
);
663 response_metadata
->set_response_type(
664 WebResponseTypeToProtoResponseType(put_context
->response
->response_type
));
665 response_metadata
->set_url(put_context
->response
->url
.spec());
666 for (ServiceWorkerHeaderMap::const_iterator it
=
667 put_context
->response
->headers
.begin();
668 it
!= put_context
->response
->headers
.end(); ++it
) {
669 DCHECK_EQ(std::string::npos
, it
->first
.find('\0'));
670 DCHECK_EQ(std::string::npos
, it
->second
.find('\0'));
671 CacheHeaderMap
* header_map
= response_metadata
->add_headers();
672 header_map
->set_name(it
->first
);
673 header_map
->set_value(it
->second
);
676 scoped_ptr
<std::string
> serialized(new std::string());
677 if (!metadata
.SerializeToString(serialized
.get())) {
678 put_context
->callback
.Run(CACHE_STORAGE_ERROR_STORAGE
);
682 scoped_refptr
<net::StringIOBuffer
> buffer(
683 new net::StringIOBuffer(serialized
.Pass()));
685 // Get a temporary copy of the entry pointer before passing it in base::Bind.
686 disk_cache::Entry
* temp_entry_ptr
= put_context
->cache_entry
.get();
688 net::CompletionCallback write_headers_callback
= base::Bind(
689 &CacheStorageCache::PutDidWriteHeaders
, weak_ptr_factory_
.GetWeakPtr(),
690 base::Passed(put_context
.Pass()), buffer
->size());
692 rv
= temp_entry_ptr
->WriteData(INDEX_HEADERS
, 0 /* offset */, buffer
.get(),
693 buffer
->size(), write_headers_callback
,
694 true /* truncate */);
696 if (rv
!= net::ERR_IO_PENDING
)
697 write_headers_callback
.Run(rv
);
700 void CacheStorageCache::PutDidWriteHeaders(scoped_ptr
<PutContext
> put_context
,
703 if (rv
!= expected_bytes
) {
704 put_context
->cache_entry
->Doom();
705 put_context
->callback
.Run(CACHE_STORAGE_ERROR_STORAGE
);
709 // The metadata is written, now for the response content. The data is streamed
710 // from the blob into the cache entry.
712 if (put_context
->response
->blob_uuid
.empty()) {
713 if (put_context
->quota_manager_proxy
.get()) {
714 put_context
->quota_manager_proxy
->NotifyStorageModified(
715 storage::QuotaClient::kServiceWorkerCache
, put_context
->origin
,
716 storage::kStorageTypeTemporary
,
717 put_context
->cache_entry
->GetDataSize(INDEX_HEADERS
));
720 put_context
->callback
.Run(CACHE_STORAGE_OK
);
724 DCHECK(put_context
->blob_data_handle
);
726 disk_cache::ScopedEntryPtr
entry(put_context
->cache_entry
.Pass());
727 put_context
->cache_entry
= NULL
;
729 CacheStorageBlobToDiskCache
* blob_to_cache
=
730 new CacheStorageBlobToDiskCache();
731 BlobToDiskCacheIDMap::KeyType blob_to_cache_key
=
732 active_blob_to_disk_cache_writers_
.Add(blob_to_cache
);
734 // Grab some pointers before passing put_context in Bind.
735 scoped_refptr
<net::URLRequestContextGetter
> request_context_getter
=
736 put_context
->request_context_getter
;
737 scoped_ptr
<storage::BlobDataHandle
> blob_data_handle
=
738 put_context
->blob_data_handle
.Pass();
740 blob_to_cache
->StreamBlobToCache(
741 entry
.Pass(), INDEX_RESPONSE_BODY
, request_context_getter
,
742 blob_data_handle
.Pass(),
743 base::Bind(&CacheStorageCache::PutDidWriteBlobToCache
,
744 weak_ptr_factory_
.GetWeakPtr(),
745 base::Passed(put_context
.Pass()), blob_to_cache_key
));
748 void CacheStorageCache::PutDidWriteBlobToCache(
749 scoped_ptr
<PutContext
> put_context
,
750 BlobToDiskCacheIDMap::KeyType blob_to_cache_key
,
751 disk_cache::ScopedEntryPtr entry
,
754 put_context
->cache_entry
= entry
.Pass();
756 active_blob_to_disk_cache_writers_
.Remove(blob_to_cache_key
);
759 put_context
->cache_entry
->Doom();
760 put_context
->callback
.Run(CACHE_STORAGE_ERROR_STORAGE
);
764 if (put_context
->quota_manager_proxy
.get()) {
765 put_context
->quota_manager_proxy
->NotifyStorageModified(
766 storage::QuotaClient::kServiceWorkerCache
, put_context
->origin
,
767 storage::kStorageTypeTemporary
,
768 put_context
->cache_entry
->GetDataSize(INDEX_HEADERS
) +
769 put_context
->cache_entry
->GetDataSize(INDEX_RESPONSE_BODY
));
772 put_context
->callback
.Run(CACHE_STORAGE_OK
);
775 void CacheStorageCache::Delete(const CacheStorageBatchOperation
& operation
,
776 const ErrorCallback
& callback
) {
777 DCHECK(BACKEND_OPEN
== backend_state_
|| initializing_
);
778 DCHECK_EQ(CACHE_STORAGE_CACHE_OPERATION_TYPE_DELETE
,
779 operation
.operation_type
);
781 scoped_ptr
<ServiceWorkerFetchRequest
> request(new ServiceWorkerFetchRequest(
782 operation
.request
.url
, operation
.request
.method
,
783 operation
.request
.headers
, operation
.request
.referrer
,
784 operation
.request
.is_reload
));
786 ErrorCallback pending_callback
=
787 base::Bind(&CacheStorageCache::PendingErrorCallback
,
788 weak_ptr_factory_
.GetWeakPtr(), callback
);
789 scheduler_
->ScheduleOperation(
790 base::Bind(&CacheStorageCache::DeleteImpl
, weak_ptr_factory_
.GetWeakPtr(),
791 base::Passed(request
.Pass()), pending_callback
));
794 void CacheStorageCache::DeleteImpl(
795 scoped_ptr
<ServiceWorkerFetchRequest
> request
,
796 const ErrorCallback
& callback
) {
797 DCHECK_NE(BACKEND_UNINITIALIZED
, backend_state_
);
798 if (backend_state_
!= BACKEND_OPEN
) {
799 callback
.Run(CACHE_STORAGE_ERROR_STORAGE
);
802 scoped_ptr
<disk_cache::Entry
*> entry(new disk_cache::Entry
*);
804 disk_cache::Entry
** entry_ptr
= entry
.get();
806 ServiceWorkerFetchRequest
* request_ptr
= request
.get();
808 net::CompletionCallback open_entry_callback
= base::Bind(
809 &CacheStorageCache::DeleteDidOpenEntry
, weak_ptr_factory_
.GetWeakPtr(),
810 origin_
, base::Passed(request
.Pass()), callback
,
811 base::Passed(entry
.Pass()), quota_manager_proxy_
);
813 int rv
= backend_
->OpenEntry(request_ptr
->url
.spec(), entry_ptr
,
814 open_entry_callback
);
815 if (rv
!= net::ERR_IO_PENDING
)
816 open_entry_callback
.Run(rv
);
819 void CacheStorageCache::DeleteDidOpenEntry(
821 scoped_ptr
<ServiceWorkerFetchRequest
> request
,
822 const CacheStorageCache::ErrorCallback
& callback
,
823 scoped_ptr
<disk_cache::Entry
*> entry_ptr
,
824 const scoped_refptr
<storage::QuotaManagerProxy
>& quota_manager_proxy
,
827 callback
.Run(CACHE_STORAGE_ERROR_NOT_FOUND
);
832 disk_cache::ScopedEntryPtr
entry(*entry_ptr
);
834 if (quota_manager_proxy
.get()) {
835 quota_manager_proxy
->NotifyStorageModified(
836 storage::QuotaClient::kServiceWorkerCache
, origin
,
837 storage::kStorageTypeTemporary
,
838 -1 * (entry
->GetDataSize(INDEX_HEADERS
) +
839 entry
->GetDataSize(INDEX_RESPONSE_BODY
)));
843 callback
.Run(CACHE_STORAGE_OK
);
846 void CacheStorageCache::KeysImpl(const RequestsCallback
& callback
) {
847 DCHECK_NE(BACKEND_UNINITIALIZED
, backend_state_
);
848 if (backend_state_
!= BACKEND_OPEN
) {
849 callback
.Run(CACHE_STORAGE_ERROR_STORAGE
, scoped_ptr
<Requests
>());
853 // 1. Iterate through all of the entries, open them, and add them to a vector.
854 // 2. For each open entry:
855 // 2.1. Read the headers into a protobuf.
856 // 2.2. Copy the protobuf into a ServiceWorkerFetchRequest (a "key").
857 // 2.3. Push the response into a vector of requests to be returned.
858 // 3. Return the vector of requests (keys).
860 // The entries have to be loaded into a vector first because enumeration loops
861 // forever if you read data from a cache entry while enumerating.
863 scoped_ptr
<KeysContext
> keys_context(new KeysContext(callback
));
865 keys_context
->backend_iterator
= backend_
->CreateIterator();
866 disk_cache::Backend::Iterator
& iterator
= *keys_context
->backend_iterator
;
867 disk_cache::Entry
** enumerated_entry
= &keys_context
->enumerated_entry
;
869 net::CompletionCallback open_entry_callback
= base::Bind(
870 &CacheStorageCache::KeysDidOpenNextEntry
, weak_ptr_factory_
.GetWeakPtr(),
871 base::Passed(keys_context
.Pass()));
873 int rv
= iterator
.OpenNextEntry(enumerated_entry
, open_entry_callback
);
875 if (rv
!= net::ERR_IO_PENDING
)
876 open_entry_callback
.Run(rv
);
879 void CacheStorageCache::KeysDidOpenNextEntry(
880 scoped_ptr
<KeysContext
> keys_context
,
882 if (rv
== net::ERR_FAILED
) {
883 DCHECK(!keys_context
->enumerated_entry
);
884 // Enumeration is complete, extract the requests from the entries.
885 Entries::iterator iter
= keys_context
->entries
.begin();
886 KeysProcessNextEntry(keys_context
.Pass(), iter
);
891 keys_context
->original_callback
.Run(CACHE_STORAGE_ERROR_STORAGE
,
892 scoped_ptr
<Requests
>());
896 if (backend_state_
!= BACKEND_OPEN
) {
897 keys_context
->original_callback
.Run(CACHE_STORAGE_ERROR_NOT_FOUND
,
898 scoped_ptr
<Requests
>());
903 keys_context
->entries
.push_back(keys_context
->enumerated_entry
);
904 keys_context
->enumerated_entry
= NULL
;
906 // Enumerate the next entry.
907 disk_cache::Backend::Iterator
& iterator
= *keys_context
->backend_iterator
;
908 disk_cache::Entry
** enumerated_entry
= &keys_context
->enumerated_entry
;
909 net::CompletionCallback open_entry_callback
= base::Bind(
910 &CacheStorageCache::KeysDidOpenNextEntry
, weak_ptr_factory_
.GetWeakPtr(),
911 base::Passed(keys_context
.Pass()));
913 rv
= iterator
.OpenNextEntry(enumerated_entry
, open_entry_callback
);
915 if (rv
!= net::ERR_IO_PENDING
)
916 open_entry_callback
.Run(rv
);
919 void CacheStorageCache::KeysProcessNextEntry(
920 scoped_ptr
<KeysContext
> keys_context
,
921 const Entries::iterator
& iter
) {
922 if (iter
== keys_context
->entries
.end()) {
923 // All done. Return all of the keys.
924 keys_context
->original_callback
.Run(CACHE_STORAGE_OK
,
925 keys_context
->out_keys
.Pass());
929 ReadMetadata(*iter
, base::Bind(&CacheStorageCache::KeysDidReadMetadata
,
930 weak_ptr_factory_
.GetWeakPtr(),
931 base::Passed(keys_context
.Pass()), iter
));
934 void CacheStorageCache::KeysDidReadMetadata(
935 scoped_ptr
<KeysContext
> keys_context
,
936 const Entries::iterator
& iter
,
937 scoped_ptr
<CacheMetadata
> metadata
) {
938 disk_cache::Entry
* entry
= *iter
;
941 keys_context
->out_keys
->push_back(ServiceWorkerFetchRequest(
942 GURL(entry
->GetKey()), metadata
->request().method(),
943 ServiceWorkerHeaderMap(), Referrer(), false));
945 ServiceWorkerHeaderMap
& req_headers
=
946 keys_context
->out_keys
->back().headers
;
948 for (int i
= 0; i
< metadata
->request().headers_size(); ++i
) {
949 const CacheHeaderMap header
= metadata
->request().headers(i
);
950 DCHECK_EQ(std::string::npos
, header
.name().find('\0'));
951 DCHECK_EQ(std::string::npos
, header
.value().find('\0'));
952 req_headers
.insert(std::make_pair(header
.name(), header
.value()));
958 KeysProcessNextEntry(keys_context
.Pass(), iter
+ 1);
961 void CacheStorageCache::CloseImpl(const base::Closure
& callback
) {
962 DCHECK_NE(BACKEND_CLOSED
, backend_state_
);
964 backend_state_
= BACKEND_CLOSED
;
969 void CacheStorageCache::CreateBackend(const ErrorCallback
& callback
) {
972 // Use APP_CACHE as opposed to DISK_CACHE to prevent cache eviction.
973 net::CacheType cache_type
= memory_only_
? net::MEMORY_CACHE
: net::APP_CACHE
;
975 scoped_ptr
<ScopedBackendPtr
> backend_ptr(new ScopedBackendPtr());
977 // Temporary pointer so that backend_ptr can be Pass()'d in Bind below.
978 ScopedBackendPtr
* backend
= backend_ptr
.get();
980 net::CompletionCallback create_cache_callback
=
981 base::Bind(&CacheStorageCache::CreateBackendDidCreate
,
982 weak_ptr_factory_
.GetWeakPtr(), callback
,
983 base::Passed(backend_ptr
.Pass()));
985 // TODO(jkarlin): Use the cache task runner that ServiceWorkerCacheCore
986 // has for disk caches.
987 int rv
= disk_cache::CreateCacheBackend(
988 cache_type
, net::CACHE_BACKEND_SIMPLE
, path_
, kMaxCacheBytes
,
990 BrowserThread::GetMessageLoopProxyForThread(BrowserThread::CACHE
).get(),
991 NULL
, backend
, create_cache_callback
);
992 if (rv
!= net::ERR_IO_PENDING
)
993 create_cache_callback
.Run(rv
);
996 void CacheStorageCache::CreateBackendDidCreate(
997 const CacheStorageCache::ErrorCallback
& callback
,
998 scoped_ptr
<ScopedBackendPtr
> backend_ptr
,
1000 if (rv
!= net::OK
) {
1001 callback
.Run(CACHE_STORAGE_ERROR_STORAGE
);
1005 backend_
= backend_ptr
->Pass();
1006 callback
.Run(CACHE_STORAGE_OK
);
1009 void CacheStorageCache::InitBackend() {
1010 DCHECK_EQ(BACKEND_UNINITIALIZED
, backend_state_
);
1015 DCHECK(!scheduler_
->ScheduledOperations());
1016 initializing_
= true;
1018 scheduler_
->ScheduleOperation(base::Bind(
1019 &CacheStorageCache::CreateBackend
, weak_ptr_factory_
.GetWeakPtr(),
1020 base::Bind(&CacheStorageCache::InitDone
,
1021 weak_ptr_factory_
.GetWeakPtr())));
1024 void CacheStorageCache::InitDone(CacheStorageError error
) {
1025 initializing_
= false;
1026 backend_state_
= (error
== CACHE_STORAGE_OK
&& backend_
&&
1027 backend_state_
== BACKEND_UNINITIALIZED
)
1031 UMA_HISTOGRAM_ENUMERATION("ServiceWorkerCache.InitBackendResult", error
,
1032 CACHE_STORAGE_ERROR_LAST
+ 1);
1034 scheduler_
->CompleteOperationAndRunNext();
1037 void CacheStorageCache::PendingClosure(const base::Closure
& callback
) {
1038 base::WeakPtr
<CacheStorageCache
> cache
= weak_ptr_factory_
.GetWeakPtr();
1042 scheduler_
->CompleteOperationAndRunNext();
1045 void CacheStorageCache::PendingErrorCallback(const ErrorCallback
& callback
,
1046 CacheStorageError error
) {
1047 base::WeakPtr
<CacheStorageCache
> cache
= weak_ptr_factory_
.GetWeakPtr();
1049 callback
.Run(error
);
1051 scheduler_
->CompleteOperationAndRunNext();
1054 void CacheStorageCache::PendingResponseCallback(
1055 const ResponseCallback
& callback
,
1056 CacheStorageError error
,
1057 scoped_ptr
<ServiceWorkerResponse
> response
,
1058 scoped_ptr
<storage::BlobDataHandle
> blob_data_handle
) {
1059 base::WeakPtr
<CacheStorageCache
> cache
= weak_ptr_factory_
.GetWeakPtr();
1061 callback
.Run(error
, response
.Pass(), blob_data_handle
.Pass());
1063 scheduler_
->CompleteOperationAndRunNext();
1066 void CacheStorageCache::PendingRequestsCallback(
1067 const RequestsCallback
& callback
,
1068 CacheStorageError error
,
1069 scoped_ptr
<Requests
> requests
) {
1070 base::WeakPtr
<CacheStorageCache
> cache
= weak_ptr_factory_
.GetWeakPtr();
1072 callback
.Run(error
, requests
.Pass());
1074 scheduler_
->CompleteOperationAndRunNext();
1077 } // namespace content