1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/url_request/url_request_job.h"
8 #include "base/compiler_specific.h"
9 #include "base/message_loop/message_loop.h"
10 #include "base/power_monitor/power_monitor.h"
11 #include "base/strings/string_number_conversions.h"
12 #include "base/strings/string_util.h"
13 #include "net/base/auth.h"
14 #include "net/base/host_port_pair.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/load_states.h"
17 #include "net/base/net_errors.h"
18 #include "net/base/network_delegate.h"
19 #include "net/filter/filter.h"
20 #include "net/http/http_response_headers.h"
21 #include "net/url_request/url_request.h"
25 URLRequestJob::URLRequestJob(URLRequest
* request
,
26 NetworkDelegate
* network_delegate
)
29 prefilter_bytes_read_(0),
30 postfilter_bytes_read_(0),
31 filter_input_byte_count_(0),
32 filter_needs_more_output_space_(false),
33 filtered_read_buffer_len_(0),
34 has_handled_response_(false),
35 expected_content_size_(-1),
36 deferred_redirect_status_code_(-1),
37 network_delegate_(network_delegate
),
39 base::PowerMonitor
* power_monitor
= base::PowerMonitor::Get();
41 power_monitor
->AddObserver(this);
44 void URLRequestJob::SetUpload(UploadDataStream
* upload
) {
47 void URLRequestJob::SetExtraRequestHeaders(const HttpRequestHeaders
& headers
) {
50 void URLRequestJob::SetPriority(RequestPriority priority
) {
53 void URLRequestJob::Kill() {
54 weak_factory_
.InvalidateWeakPtrs();
55 // Make sure the request is notified that we are done. We assume that the
56 // request took care of setting its error status before calling Kill.
61 void URLRequestJob::DetachRequest() {
65 // This function calls ReadData to get stream data. If a filter exists, passes
66 // the data to the attached filter. Then returns the output from filter back to
68 bool URLRequestJob::Read(IOBuffer
* buf
, int buf_size
, int *bytes_read
) {
71 DCHECK_LT(buf_size
, 1000000); // Sanity check.
74 DCHECK(filtered_read_buffer_
.get() == NULL
);
75 DCHECK_EQ(0, filtered_read_buffer_len_
);
79 // Skip Filter if not present.
81 rv
= ReadRawDataHelper(buf
, buf_size
, bytes_read
);
83 // Save the caller's buffers while we do IO
84 // in the filter's buffers.
85 filtered_read_buffer_
= buf
;
86 filtered_read_buffer_len_
= buf_size
;
88 if (ReadFilteredData(bytes_read
)) {
89 rv
= true; // We have data to return.
91 // It is fine to call DoneReading even if ReadFilteredData receives 0
92 // bytes from the net, but we avoid making that call if we know for
93 // sure that's the case (ReadRawDataHelper path).
97 rv
= false; // Error, or a new IO is pending.
100 if (rv
&& *bytes_read
== 0)
101 NotifyDone(URLRequestStatus());
105 void URLRequestJob::StopCaching() {
106 // Nothing to do here.
109 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders
* headers
) const {
110 // Most job types don't send request headers.
114 int64
URLRequestJob::GetTotalReceivedBytes() const {
118 LoadState
URLRequestJob::GetLoadState() const {
119 return LOAD_STATE_IDLE
;
122 UploadProgress
URLRequestJob::GetUploadProgress() const {
123 return UploadProgress();
126 bool URLRequestJob::GetCharset(std::string
* charset
) {
130 void URLRequestJob::GetResponseInfo(HttpResponseInfo
* info
) {
133 void URLRequestJob::GetLoadTimingInfo(LoadTimingInfo
* load_timing_info
) const {
134 // Only certain request types return more than just request start times.
137 bool URLRequestJob::GetResponseCookies(std::vector
<std::string
>* cookies
) {
141 Filter
* URLRequestJob::SetupFilter() const {
145 bool URLRequestJob::IsRedirectResponse(GURL
* location
,
146 int* http_status_code
) {
147 // For non-HTTP jobs, headers will be null.
148 HttpResponseHeaders
* headers
= request_
->response_headers();
153 if (!headers
->IsRedirect(&value
))
156 *location
= request_
->url().Resolve(value
);
157 *http_status_code
= headers
->response_code();
161 bool URLRequestJob::CopyFragmentOnRedirect(const GURL
& location
) const {
165 bool URLRequestJob::IsSafeRedirect(const GURL
& location
) {
169 bool URLRequestJob::NeedsAuth() {
173 void URLRequestJob::GetAuthChallengeInfo(
174 scoped_refptr
<AuthChallengeInfo
>* auth_info
) {
175 // This will only be called if NeedsAuth() returns true, in which
176 // case the derived class should implement this!
180 void URLRequestJob::SetAuth(const AuthCredentials
& credentials
) {
181 // This will only be called if NeedsAuth() returns true, in which
182 // case the derived class should implement this!
186 void URLRequestJob::CancelAuth() {
187 // This will only be called if NeedsAuth() returns true, in which
188 // case the derived class should implement this!
192 void URLRequestJob::ContinueWithCertificate(
193 X509Certificate
* client_cert
) {
194 // The derived class should implement this!
198 void URLRequestJob::ContinueDespiteLastError() {
199 // Implementations should know how to recover from errors they generate.
200 // If this code was reached, we are trying to recover from an error that
201 // we don't know how to recover from.
205 void URLRequestJob::FollowDeferredRedirect() {
206 DCHECK(deferred_redirect_status_code_
!= -1);
208 // NOTE: deferred_redirect_url_ may be invalid, and attempting to redirect to
209 // such an URL will fail inside FollowRedirect. The DCHECK above asserts
210 // that we called OnReceivedRedirect.
212 // It is also possible that FollowRedirect will drop the last reference to
213 // this job, so we need to reset our members before calling it.
215 GURL redirect_url
= deferred_redirect_url_
;
216 int redirect_status_code
= deferred_redirect_status_code_
;
218 deferred_redirect_url_
= GURL();
219 deferred_redirect_status_code_
= -1;
221 FollowRedirect(redirect_url
, redirect_status_code
);
224 void URLRequestJob::ResumeNetworkStart() {
225 // This should only be called for HTTP Jobs, and implemented in the derived
230 bool URLRequestJob::GetMimeType(std::string
* mime_type
) const {
234 int URLRequestJob::GetResponseCode() const {
238 HostPortPair
URLRequestJob::GetSocketAddress() const {
239 return HostPortPair();
242 void URLRequestJob::OnSuspend() {
246 void URLRequestJob::NotifyURLRequestDestroyed() {
249 URLRequestJob::~URLRequestJob() {
250 base::PowerMonitor
* power_monitor
= base::PowerMonitor::Get();
252 power_monitor
->RemoveObserver(this);
255 void URLRequestJob::NotifyCertificateRequested(
256 SSLCertRequestInfo
* cert_request_info
) {
258 return; // The request was destroyed, so there is no more work to do.
260 request_
->NotifyCertificateRequested(cert_request_info
);
263 void URLRequestJob::NotifySSLCertificateError(const SSLInfo
& ssl_info
,
266 return; // The request was destroyed, so there is no more work to do.
268 request_
->NotifySSLCertificateError(ssl_info
, fatal
);
271 bool URLRequestJob::CanGetCookies(const CookieList
& cookie_list
) const {
273 return false; // The request was destroyed, so there is no more work to do.
275 return request_
->CanGetCookies(cookie_list
);
278 bool URLRequestJob::CanSetCookie(const std::string
& cookie_line
,
279 CookieOptions
* options
) const {
281 return false; // The request was destroyed, so there is no more work to do.
283 return request_
->CanSetCookie(cookie_line
, options
);
286 bool URLRequestJob::CanEnablePrivacyMode() const {
288 return false; // The request was destroyed, so there is no more work to do.
290 return request_
->CanEnablePrivacyMode();
293 CookieStore
* URLRequestJob::GetCookieStore() const {
296 return request_
->cookie_store();
299 void URLRequestJob::NotifyBeforeNetworkStart(bool* defer
) {
303 request_
->NotifyBeforeNetworkStart(defer
);
306 void URLRequestJob::NotifyHeadersComplete() {
307 if (!request_
|| !request_
->has_delegate())
308 return; // The request was destroyed, so there is no more work to do.
310 if (has_handled_response_
)
313 DCHECK(!request_
->status().is_io_pending());
315 // Initialize to the current time, and let the subclass optionally override
316 // the time stamps if it has that information. The default request_time is
317 // set by URLRequest before it calls our Start method.
318 request_
->response_info_
.response_time
= base::Time::Now();
319 GetResponseInfo(&request_
->response_info_
);
321 // When notifying the delegate, the delegate can release the request
322 // (and thus release 'this'). After calling to the delgate, we must
323 // check the request pointer to see if it still exists, and return
324 // immediately if it has been destroyed. self_preservation ensures our
325 // survival until we can get out of this method.
326 scoped_refptr
<URLRequestJob
> self_preservation(this);
329 request_
->OnHeadersComplete();
332 int http_status_code
;
333 if (IsRedirectResponse(&new_location
, &http_status_code
)) {
334 // Redirect response bodies are not read. Notify the transaction
335 // so it does not treat being stopped as an error.
336 DoneReadingRedirectResponse();
338 const GURL
& url
= request_
->url();
340 // Move the reference fragment of the old location to the new one if the
341 // new one has none. This duplicates mozilla's behavior.
342 if (url
.is_valid() && url
.has_ref() && !new_location
.has_ref() &&
343 CopyFragmentOnRedirect(new_location
)) {
344 GURL::Replacements replacements
;
345 // Reference the |ref| directly out of the original URL to avoid a
347 replacements
.SetRef(url
.spec().data(),
348 url
.parsed_for_possibly_invalid_spec().ref
);
349 new_location
= new_location
.ReplaceComponents(replacements
);
352 bool defer_redirect
= false;
353 request_
->NotifyReceivedRedirect(new_location
, &defer_redirect
);
355 // Ensure that the request wasn't detached or destroyed in
356 // NotifyReceivedRedirect
357 if (!request_
|| !request_
->has_delegate())
360 // If we were not cancelled, then maybe follow the redirect.
361 if (request_
->status().is_success()) {
362 if (defer_redirect
) {
363 deferred_redirect_url_
= new_location
;
364 deferred_redirect_status_code_
= http_status_code
;
366 FollowRedirect(new_location
, http_status_code
);
370 } else if (NeedsAuth()) {
371 scoped_refptr
<AuthChallengeInfo
> auth_info
;
372 GetAuthChallengeInfo(&auth_info
);
373 // Need to check for a NULL auth_info because the server may have failed
374 // to send a challenge with the 401 response.
375 if (auth_info
.get()) {
376 request_
->NotifyAuthRequired(auth_info
.get());
377 // Wait for SetAuth or CancelAuth to be called.
382 has_handled_response_
= true;
383 if (request_
->status().is_success())
384 filter_
.reset(SetupFilter());
386 if (!filter_
.get()) {
387 std::string content_length
;
388 request_
->GetResponseHeaderByName("content-length", &content_length
);
389 if (!content_length
.empty())
390 base::StringToInt64(content_length
, &expected_content_size_
);
393 request_
->NotifyResponseStarted();
396 void URLRequestJob::NotifyReadComplete(int bytes_read
) {
397 if (!request_
|| !request_
->has_delegate())
398 return; // The request was destroyed, so there is no more work to do.
400 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
401 // unit_tests have been fixed to not trip this.
403 DCHECK(!request_
->status().is_io_pending());
405 // The headers should be complete before reads complete
406 DCHECK(has_handled_response_
);
408 OnRawReadComplete(bytes_read
);
410 // Don't notify if we had an error.
411 if (!request_
->status().is_success())
414 // When notifying the delegate, the delegate can release the request
415 // (and thus release 'this'). After calling to the delegate, we must
416 // check the request pointer to see if it still exists, and return
417 // immediately if it has been destroyed. self_preservation ensures our
418 // survival until we can get out of this method.
419 scoped_refptr
<URLRequestJob
> self_preservation(this);
422 // Tell the filter that it has more data
423 FilteredDataRead(bytes_read
);
426 int filter_bytes_read
= 0;
427 if (ReadFilteredData(&filter_bytes_read
)) {
428 if (!filter_bytes_read
)
430 request_
->NotifyReadCompleted(filter_bytes_read
);
433 request_
->NotifyReadCompleted(bytes_read
);
435 DVLOG(1) << __FUNCTION__
<< "() "
436 << "\"" << (request_
? request_
->url().spec() : "???") << "\""
437 << " pre bytes read = " << bytes_read
438 << " pre total = " << prefilter_bytes_read_
439 << " post total = " << postfilter_bytes_read_
;
442 void URLRequestJob::NotifyStartError(const URLRequestStatus
&status
) {
443 DCHECK(!has_handled_response_
);
444 has_handled_response_
= true;
446 // There may be relevant information in the response info even in the
448 GetResponseInfo(&request_
->response_info_
);
450 request_
->set_status(status
);
451 request_
->NotifyResponseStarted();
452 // We may have been deleted.
456 void URLRequestJob::NotifyDone(const URLRequestStatus
&status
) {
457 DCHECK(!done_
) << "Job sending done notification twice";
462 // Unless there was an error, we should have at least tried to handle
463 // the response before getting here.
464 DCHECK(has_handled_response_
|| !status
.is_success());
466 // As with NotifyReadComplete, we need to take care to notice if we were
467 // destroyed during a delegate callback.
469 request_
->set_is_pending(false);
470 // With async IO, it's quite possible to have a few outstanding
471 // requests. We could receive a request to Cancel, followed shortly
472 // by a successful IO. For tracking the status(), once there is
473 // an error, we do not change the status back to success. To
474 // enforce this, only set the status if the job is so far
476 if (request_
->status().is_success()) {
477 if (status
.status() == URLRequestStatus::FAILED
) {
478 request_
->net_log().AddEventWithNetErrorCode(NetLog::TYPE_FAILED
,
481 request_
->set_status(status
);
485 // Complete this notification later. This prevents us from re-entering the
486 // delegate if we're done because of a synchronous call.
487 base::MessageLoop::current()->PostTask(
489 base::Bind(&URLRequestJob::CompleteNotifyDone
,
490 weak_factory_
.GetWeakPtr()));
493 void URLRequestJob::CompleteNotifyDone() {
494 // Check if we should notify the delegate that we're done because of an error.
496 !request_
->status().is_success() &&
497 request_
->has_delegate()) {
498 // We report the error differently depending on whether we've called
499 // OnResponseStarted yet.
500 if (has_handled_response_
) {
501 // We signal the error by calling OnReadComplete with a bytes_read of -1.
502 request_
->NotifyReadCompleted(-1);
504 has_handled_response_
= true;
505 request_
->NotifyResponseStarted();
510 void URLRequestJob::NotifyCanceled() {
512 NotifyDone(URLRequestStatus(URLRequestStatus::CANCELED
, ERR_ABORTED
));
516 void URLRequestJob::NotifyRestartRequired() {
517 DCHECK(!has_handled_response_
);
518 if (GetStatus().status() != URLRequestStatus::CANCELED
)
522 void URLRequestJob::OnCallToDelegate() {
523 request_
->OnCallToDelegate();
526 void URLRequestJob::OnCallToDelegateComplete() {
527 request_
->OnCallToDelegateComplete();
530 bool URLRequestJob::ReadRawData(IOBuffer
* buf
, int buf_size
,
537 void URLRequestJob::DoneReading() {
541 void URLRequestJob::DoneReadingRedirectResponse() {
544 void URLRequestJob::FilteredDataRead(int bytes_read
) {
546 filter_
->FlushStreamBuffer(bytes_read
);
549 bool URLRequestJob::ReadFilteredData(int* bytes_read
) {
551 DCHECK(filtered_read_buffer_
);
552 DCHECK_GT(filtered_read_buffer_len_
, 0);
553 DCHECK_LT(filtered_read_buffer_len_
, 1000000); // Sanity check.
554 DCHECK(!raw_read_buffer_
);
563 if (!filter_needs_more_output_space_
&& !filter_
->stream_data_len()) {
564 // We don't have any raw data to work with, so read from the transaction.
565 int filtered_data_read
;
566 if (ReadRawDataForFilter(&filtered_data_read
)) {
567 if (filtered_data_read
> 0) {
568 // Give data to filter.
569 filter_
->FlushStreamBuffer(filtered_data_read
);
574 return false; // IO Pending (or error).
578 if ((filter_
->stream_data_len() || filter_needs_more_output_space_
) &&
580 // Get filtered data.
581 int filtered_data_len
= filtered_read_buffer_len_
;
582 int output_buffer_size
= filtered_data_len
;
583 Filter::FilterStatus status
=
584 filter_
->ReadData(filtered_read_buffer_
->data(), &filtered_data_len
);
586 if (filter_needs_more_output_space_
&& !filtered_data_len
) {
587 // filter_needs_more_output_space_ was mistaken... there are no more
588 // bytes and we should have at least tried to fill up the filter's input
589 // buffer. Correct the state, and try again.
590 filter_needs_more_output_space_
= false;
593 filter_needs_more_output_space_
=
594 (filtered_data_len
== output_buffer_size
);
597 case Filter::FILTER_DONE
: {
598 filter_needs_more_output_space_
= false;
599 *bytes_read
= filtered_data_len
;
600 postfilter_bytes_read_
+= filtered_data_len
;
604 case Filter::FILTER_NEED_MORE_DATA
: {
605 // We have finished filtering all data currently in the buffer.
606 // There might be some space left in the output buffer. One can
607 // consider reading more data from the stream to feed the filter
608 // and filling up the output buffer. This leads to more complicated
609 // buffer management and data notification mechanisms.
610 // We can revisit this issue if there is a real perf need.
611 if (filtered_data_len
> 0) {
612 *bytes_read
= filtered_data_len
;
613 postfilter_bytes_read_
+= filtered_data_len
;
616 // Read again since we haven't received enough data yet (e.g., we
617 // may not have a complete gzip header yet).
622 case Filter::FILTER_OK
: {
623 *bytes_read
= filtered_data_len
;
624 postfilter_bytes_read_
+= filtered_data_len
;
628 case Filter::FILTER_ERROR
: {
629 DVLOG(1) << __FUNCTION__
<< "() "
630 << "\"" << (request_
? request_
->url().spec() : "???")
631 << "\"" << " Filter Error";
632 filter_needs_more_output_space_
= false;
633 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED
,
634 ERR_CONTENT_DECODING_FAILED
));
640 filter_needs_more_output_space_
= false;
646 // If logging all bytes is enabled, log the filtered bytes read.
647 if (rv
&& request() && request()->net_log().IsLoggingBytes() &&
648 filtered_data_len
> 0) {
649 request()->net_log().AddByteTransferEvent(
650 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ
,
651 filtered_data_len
, filtered_read_buffer_
->data());
654 // we are done, or there is no data left.
661 // When we successfully finished a read, we no longer need to save the
662 // caller's buffers. Release our reference.
663 filtered_read_buffer_
= NULL
;
664 filtered_read_buffer_len_
= 0;
669 void URLRequestJob::DestroyFilters() {
673 const URLRequestStatus
URLRequestJob::GetStatus() {
675 return request_
->status();
676 // If the request is gone, we must be cancelled.
677 return URLRequestStatus(URLRequestStatus::CANCELED
,
681 void URLRequestJob::SetStatus(const URLRequestStatus
&status
) {
683 request_
->set_status(status
);
686 void URLRequestJob::SetProxyServer(const HostPortPair
& proxy_server
) {
687 request_
->proxy_server_
= proxy_server
;
690 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read
) {
694 DCHECK(filter_
.get());
698 // Get more pre-filtered data if needed.
699 // TODO(mbelshe): is it possible that the filter needs *MORE* data
700 // when there is some data already in the buffer?
701 if (!filter_
->stream_data_len() && !is_done()) {
702 IOBuffer
* stream_buffer
= filter_
->stream_buffer();
703 int stream_buffer_size
= filter_
->stream_buffer_size();
704 rv
= ReadRawDataHelper(stream_buffer
, stream_buffer_size
, bytes_read
);
709 bool URLRequestJob::ReadRawDataHelper(IOBuffer
* buf
, int buf_size
,
711 DCHECK(!request_
->status().is_io_pending());
712 DCHECK(raw_read_buffer_
.get() == NULL
);
714 // Keep a pointer to the read buffer, so we have access to it in the
715 // OnRawReadComplete() callback in the event that the read completes
717 raw_read_buffer_
= buf
;
718 bool rv
= ReadRawData(buf
, buf_size
, bytes_read
);
720 if (!request_
->status().is_io_pending()) {
721 // If the read completes synchronously, either success or failure,
722 // invoke the OnRawReadComplete callback so we can account for the
724 OnRawReadComplete(*bytes_read
);
729 void URLRequestJob::FollowRedirect(const GURL
& location
, int http_status_code
) {
730 int rv
= request_
->Redirect(location
, http_status_code
);
732 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED
, rv
));
735 void URLRequestJob::OnRawReadComplete(int bytes_read
) {
736 DCHECK(raw_read_buffer_
.get());
737 // If |filter_| is non-NULL, bytes will be logged after it is applied instead.
738 if (!filter_
.get() && request() && request()->net_log().IsLoggingBytes() &&
740 request()->net_log().AddByteTransferEvent(
741 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ
,
742 bytes_read
, raw_read_buffer_
->data());
745 if (bytes_read
> 0) {
746 RecordBytesRead(bytes_read
);
748 raw_read_buffer_
= NULL
;
751 void URLRequestJob::RecordBytesRead(int bytes_read
) {
752 filter_input_byte_count_
+= bytes_read
;
753 prefilter_bytes_read_
+= bytes_read
;
755 postfilter_bytes_read_
+= bytes_read
;
756 DVLOG(2) << __FUNCTION__
<< "() "
757 << "\"" << (request_
? request_
->url().spec() : "???") << "\""
758 << " pre bytes read = " << bytes_read
759 << " pre total = " << prefilter_bytes_read_
760 << " post total = " << postfilter_bytes_read_
;
761 UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
762 if (network_delegate_
)
763 network_delegate_
->NotifyRawBytesRead(*request_
, bytes_read
);
766 bool URLRequestJob::FilterHasData() {
767 return filter_
.get() && filter_
->stream_data_len();
770 void URLRequestJob::UpdatePacketReadTimes() {