1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/url_request/url_request_job.h"
8 #include "base/compiler_specific.h"
9 #include "base/location.h"
10 #include "base/metrics/histogram_macros.h"
11 #include "base/power_monitor/power_monitor.h"
12 #include "base/profiler/scoped_tracker.h"
13 #include "base/single_thread_task_runner.h"
14 #include "base/strings/string_number_conversions.h"
15 #include "base/strings/string_util.h"
16 #include "base/thread_task_runner_handle.h"
17 #include "base/values.h"
18 #include "net/base/auth.h"
19 #include "net/base/host_port_pair.h"
20 #include "net/base/io_buffer.h"
21 #include "net/base/load_flags.h"
22 #include "net/base/load_states.h"
23 #include "net/base/net_errors.h"
24 #include "net/base/network_delegate.h"
25 #include "net/base/network_quality_estimator.h"
26 #include "net/filter/filter.h"
27 #include "net/http/http_response_headers.h"
28 #include "net/url_request/url_request_context.h"
34 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event.
35 scoped_ptr
<base::Value
> FiltersSetCallback(
37 NetLogCaptureMode
/* capture_mode */) {
38 scoped_ptr
<base::DictionaryValue
> event_params(new base::DictionaryValue());
39 event_params
->SetString("filters", filter
->OrderedFilterList());
40 return event_params
.Pass();
43 std::string
ComputeMethodForRedirect(const std::string
& method
,
44 int http_status_code
) {
45 // For 303 redirects, all request methods except HEAD are converted to GET,
46 // as per the latest httpbis draft. The draft also allows POST requests to
47 // be converted to GETs when following 301/302 redirects, for historical
48 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and
49 // the httpbis draft say to prompt the user to confirm the generation of new
50 // requests, other than GET and HEAD requests, but IE omits these prompts and
53 // https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-17#section-7.3
54 if ((http_status_code
== 303 && method
!= "HEAD") ||
55 ((http_status_code
== 301 || http_status_code
== 302) &&
64 URLRequestJob::URLRequestJob(URLRequest
* request
,
65 NetworkDelegate
* network_delegate
)
68 prefilter_bytes_read_(0),
69 postfilter_bytes_read_(0),
70 filter_needs_more_output_space_(false),
71 filtered_read_buffer_len_(0),
72 has_handled_response_(false),
73 expected_content_size_(-1),
74 network_delegate_(network_delegate
),
76 base::PowerMonitor
* power_monitor
= base::PowerMonitor::Get();
78 power_monitor
->AddObserver(this);
81 void URLRequestJob::SetUpload(UploadDataStream
* upload
) {
84 void URLRequestJob::SetExtraRequestHeaders(const HttpRequestHeaders
& headers
) {
87 void URLRequestJob::SetPriority(RequestPriority priority
) {
90 void URLRequestJob::Kill() {
91 weak_factory_
.InvalidateWeakPtrs();
92 // Make sure the request is notified that we are done. We assume that the
93 // request took care of setting its error status before calling Kill.
98 void URLRequestJob::DetachRequest() {
102 // This function calls ReadData to get stream data. If a filter exists, passes
103 // the data to the attached filter. Then returns the output from filter back to
105 bool URLRequestJob::Read(IOBuffer
* buf
, int buf_size
, int *bytes_read
) {
108 DCHECK_LT(buf_size
, 1000000); // Sanity check.
111 DCHECK(filtered_read_buffer_
.get() == NULL
);
112 DCHECK_EQ(0, filtered_read_buffer_len_
);
116 // Skip Filter if not present.
117 if (!filter_
.get()) {
118 rv
= ReadRawDataHelper(buf
, buf_size
, bytes_read
);
120 // Save the caller's buffers while we do IO
121 // in the filter's buffers.
122 filtered_read_buffer_
= buf
;
123 filtered_read_buffer_len_
= buf_size
;
125 if (ReadFilteredData(bytes_read
)) {
126 rv
= true; // We have data to return.
128 // It is fine to call DoneReading even if ReadFilteredData receives 0
129 // bytes from the net, but we avoid making that call if we know for
130 // sure that's the case (ReadRawDataHelper path).
131 if (*bytes_read
== 0)
134 rv
= false; // Error, or a new IO is pending.
138 if (rv
&& *bytes_read
== 0)
139 NotifyDone(URLRequestStatus());
143 void URLRequestJob::StopCaching() {
144 // Nothing to do here.
147 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders
* headers
) const {
148 // Most job types don't send request headers.
152 int64
URLRequestJob::GetTotalReceivedBytes() const {
156 LoadState
URLRequestJob::GetLoadState() const {
157 return LOAD_STATE_IDLE
;
160 UploadProgress
URLRequestJob::GetUploadProgress() const {
161 return UploadProgress();
164 bool URLRequestJob::GetCharset(std::string
* charset
) {
168 void URLRequestJob::GetResponseInfo(HttpResponseInfo
* info
) {
171 void URLRequestJob::GetLoadTimingInfo(LoadTimingInfo
* load_timing_info
) const {
172 // Only certain request types return more than just request start times.
175 bool URLRequestJob::GetResponseCookies(std::vector
<std::string
>* cookies
) {
179 Filter
* URLRequestJob::SetupFilter() const {
183 bool URLRequestJob::IsRedirectResponse(GURL
* location
,
184 int* http_status_code
) {
185 // For non-HTTP jobs, headers will be null.
186 HttpResponseHeaders
* headers
= request_
->response_headers();
191 if (!headers
->IsRedirect(&value
))
194 *location
= request_
->url().Resolve(value
);
195 *http_status_code
= headers
->response_code();
199 bool URLRequestJob::CopyFragmentOnRedirect(const GURL
& location
) const {
203 bool URLRequestJob::IsSafeRedirect(const GURL
& location
) {
207 bool URLRequestJob::NeedsAuth() {
211 void URLRequestJob::GetAuthChallengeInfo(
212 scoped_refptr
<AuthChallengeInfo
>* auth_info
) {
213 // This will only be called if NeedsAuth() returns true, in which
214 // case the derived class should implement this!
218 void URLRequestJob::SetAuth(const AuthCredentials
& credentials
) {
219 // This will only be called if NeedsAuth() returns true, in which
220 // case the derived class should implement this!
224 void URLRequestJob::CancelAuth() {
225 // This will only be called if NeedsAuth() returns true, in which
226 // case the derived class should implement this!
230 void URLRequestJob::ContinueWithCertificate(
231 X509Certificate
* client_cert
) {
232 // The derived class should implement this!
236 void URLRequestJob::ContinueDespiteLastError() {
237 // Implementations should know how to recover from errors they generate.
238 // If this code was reached, we are trying to recover from an error that
239 // we don't know how to recover from.
243 void URLRequestJob::FollowDeferredRedirect() {
244 DCHECK_NE(-1, deferred_redirect_info_
.status_code
);
246 // NOTE: deferred_redirect_info_ may be invalid, and attempting to follow it
247 // will fail inside FollowRedirect. The DCHECK above asserts that we called
248 // OnReceivedRedirect.
250 // It is also possible that FollowRedirect will drop the last reference to
251 // this job, so we need to reset our members before calling it.
253 RedirectInfo redirect_info
= deferred_redirect_info_
;
254 deferred_redirect_info_
= RedirectInfo();
255 FollowRedirect(redirect_info
);
258 void URLRequestJob::ResumeNetworkStart() {
259 // This should only be called for HTTP Jobs, and implemented in the derived
264 bool URLRequestJob::GetMimeType(std::string
* mime_type
) const {
268 int URLRequestJob::GetResponseCode() const {
272 HostPortPair
URLRequestJob::GetSocketAddress() const {
273 return HostPortPair();
276 void URLRequestJob::OnSuspend() {
280 void URLRequestJob::NotifyURLRequestDestroyed() {
283 void URLRequestJob::GetConnectionAttempts(ConnectionAttempts
* out
) const {
288 GURL
URLRequestJob::ComputeReferrerForRedirect(
289 URLRequest::ReferrerPolicy policy
,
290 const std::string
& referrer
,
291 const GURL
& redirect_destination
) {
292 GURL
original_referrer(referrer
);
293 bool secure_referrer_but_insecure_destination
=
294 original_referrer
.SchemeIsCryptographic() &&
295 !redirect_destination
.SchemeIsCryptographic();
297 original_referrer
.GetOrigin() == redirect_destination
.GetOrigin();
299 case URLRequest::CLEAR_REFERRER_ON_TRANSITION_FROM_SECURE_TO_INSECURE
:
300 return secure_referrer_but_insecure_destination
? GURL()
303 case URLRequest::REDUCE_REFERRER_GRANULARITY_ON_TRANSITION_CROSS_ORIGIN
:
305 return original_referrer
;
306 } else if (secure_referrer_but_insecure_destination
) {
309 return original_referrer
.GetOrigin();
312 case URLRequest::ORIGIN_ONLY_ON_TRANSITION_CROSS_ORIGIN
:
313 return same_origin
? original_referrer
: original_referrer
.GetOrigin();
315 case URLRequest::NEVER_CLEAR_REFERRER
:
316 return original_referrer
;
323 URLRequestJob::~URLRequestJob() {
324 base::PowerMonitor
* power_monitor
= base::PowerMonitor::Get();
326 power_monitor
->RemoveObserver(this);
329 void URLRequestJob::NotifyCertificateRequested(
330 SSLCertRequestInfo
* cert_request_info
) {
332 return; // The request was destroyed, so there is no more work to do.
334 request_
->NotifyCertificateRequested(cert_request_info
);
337 void URLRequestJob::NotifySSLCertificateError(const SSLInfo
& ssl_info
,
340 return; // The request was destroyed, so there is no more work to do.
342 request_
->NotifySSLCertificateError(ssl_info
, fatal
);
345 bool URLRequestJob::CanGetCookies(const CookieList
& cookie_list
) const {
347 return false; // The request was destroyed, so there is no more work to do.
349 return request_
->CanGetCookies(cookie_list
);
352 bool URLRequestJob::CanSetCookie(const std::string
& cookie_line
,
353 CookieOptions
* options
) const {
355 return false; // The request was destroyed, so there is no more work to do.
357 return request_
->CanSetCookie(cookie_line
, options
);
360 bool URLRequestJob::CanEnablePrivacyMode() const {
362 return false; // The request was destroyed, so there is no more work to do.
364 return request_
->CanEnablePrivacyMode();
367 void URLRequestJob::NotifyBeforeNetworkStart(bool* defer
) {
371 request_
->NotifyBeforeNetworkStart(defer
);
374 void URLRequestJob::NotifyHeadersComplete() {
375 if (!request_
|| !request_
->has_delegate())
376 return; // The request was destroyed, so there is no more work to do.
378 if (has_handled_response_
)
381 DCHECK(!request_
->status().is_io_pending());
383 // Initialize to the current time, and let the subclass optionally override
384 // the time stamps if it has that information. The default request_time is
385 // set by URLRequest before it calls our Start method.
386 request_
->response_info_
.response_time
= base::Time::Now();
387 GetResponseInfo(&request_
->response_info_
);
389 // When notifying the delegate, the delegate can release the request
390 // (and thus release 'this'). After calling to the delgate, we must
391 // check the request pointer to see if it still exists, and return
392 // immediately if it has been destroyed. self_preservation ensures our
393 // survival until we can get out of this method.
394 scoped_refptr
<URLRequestJob
> self_preservation(this);
397 request_
->OnHeadersComplete();
400 int http_status_code
;
401 if (IsRedirectResponse(&new_location
, &http_status_code
)) {
402 // Redirect response bodies are not read. Notify the transaction
403 // so it does not treat being stopped as an error.
404 DoneReadingRedirectResponse();
406 RedirectInfo redirect_info
=
407 ComputeRedirectInfo(new_location
, http_status_code
);
408 bool defer_redirect
= false;
409 request_
->NotifyReceivedRedirect(redirect_info
, &defer_redirect
);
411 // Ensure that the request wasn't detached or destroyed in
412 // NotifyReceivedRedirect
413 if (!request_
|| !request_
->has_delegate())
416 // If we were not cancelled, then maybe follow the redirect.
417 if (request_
->status().is_success()) {
418 if (defer_redirect
) {
419 deferred_redirect_info_
= redirect_info
;
421 FollowRedirect(redirect_info
);
425 } else if (NeedsAuth()) {
426 scoped_refptr
<AuthChallengeInfo
> auth_info
;
427 GetAuthChallengeInfo(&auth_info
);
429 // Need to check for a NULL auth_info because the server may have failed
430 // to send a challenge with the 401 response.
431 if (auth_info
.get()) {
432 request_
->NotifyAuthRequired(auth_info
.get());
433 // Wait for SetAuth or CancelAuth to be called.
438 has_handled_response_
= true;
439 if (request_
->status().is_success())
440 filter_
.reset(SetupFilter());
442 if (!filter_
.get()) {
443 std::string content_length
;
444 request_
->GetResponseHeaderByName("content-length", &content_length
);
445 if (!content_length
.empty())
446 base::StringToInt64(content_length
, &expected_content_size_
);
448 request_
->net_log().AddEvent(
449 NetLog::TYPE_URL_REQUEST_FILTERS_SET
,
450 base::Bind(&FiltersSetCallback
, base::Unretained(filter_
.get())));
453 request_
->NotifyResponseStarted();
456 void URLRequestJob::NotifyReadComplete(int bytes_read
) {
457 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed.
458 tracked_objects::ScopedTracker
tracking_profile(
459 FROM_HERE_WITH_EXPLICIT_FUNCTION(
460 "475755 URLRequestJob::NotifyReadComplete"));
462 if (!request_
|| !request_
->has_delegate())
463 return; // The request was destroyed, so there is no more work to do.
465 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
466 // unit_tests have been fixed to not trip this.
468 DCHECK(!request_
->status().is_io_pending());
470 // The headers should be complete before reads complete
471 DCHECK(has_handled_response_
);
473 OnRawReadComplete(bytes_read
);
475 // Don't notify if we had an error.
476 if (!request_
->status().is_success())
479 // When notifying the delegate, the delegate can release the request
480 // (and thus release 'this'). After calling to the delegate, we must
481 // check the request pointer to see if it still exists, and return
482 // immediately if it has been destroyed. self_preservation ensures our
483 // survival until we can get out of this method.
484 scoped_refptr
<URLRequestJob
> self_preservation(this);
487 // Tell the filter that it has more data
488 FilteredDataRead(bytes_read
);
491 int filter_bytes_read
= 0;
492 if (ReadFilteredData(&filter_bytes_read
)) {
493 if (!filter_bytes_read
)
495 request_
->NotifyReadCompleted(filter_bytes_read
);
498 request_
->NotifyReadCompleted(bytes_read
);
500 DVLOG(1) << __FUNCTION__
<< "() "
501 << "\"" << (request_
? request_
->url().spec() : "???") << "\""
502 << " pre bytes read = " << bytes_read
503 << " pre total = " << prefilter_bytes_read_
504 << " post total = " << postfilter_bytes_read_
;
507 void URLRequestJob::NotifyStartError(const URLRequestStatus
&status
) {
508 DCHECK(!has_handled_response_
);
509 has_handled_response_
= true;
511 // There may be relevant information in the response info even in the
513 GetResponseInfo(&request_
->response_info_
);
515 request_
->set_status(status
);
516 request_
->NotifyResponseStarted();
517 // We may have been deleted.
521 void URLRequestJob::NotifyDone(const URLRequestStatus
&status
) {
522 DCHECK(!done_
) << "Job sending done notification twice";
527 // Unless there was an error, we should have at least tried to handle
528 // the response before getting here.
529 DCHECK(has_handled_response_
|| !status
.is_success());
531 // As with NotifyReadComplete, we need to take care to notice if we were
532 // destroyed during a delegate callback.
534 request_
->set_is_pending(false);
535 // With async IO, it's quite possible to have a few outstanding
536 // requests. We could receive a request to Cancel, followed shortly
537 // by a successful IO. For tracking the status(), once there is
538 // an error, we do not change the status back to success. To
539 // enforce this, only set the status if the job is so far
541 if (request_
->status().is_success()) {
542 if (status
.status() == URLRequestStatus::FAILED
) {
543 request_
->net_log().AddEventWithNetErrorCode(NetLog::TYPE_FAILED
,
546 request_
->set_status(status
);
549 // If the request succeeded (And wasn't cancelled) and the response code was
550 // 4xx or 5xx, record whether or not the main frame was blank. This is
551 // intended to be a short-lived histogram, used to figure out how important
552 // fixing http://crbug.com/331745 is.
553 if (request_
->status().is_success()) {
554 int response_code
= GetResponseCode();
555 if (400 <= response_code
&& response_code
<= 599) {
556 bool page_has_content
= (postfilter_bytes_read_
!= 0);
557 if (request_
->load_flags() & net::LOAD_MAIN_FRAME
) {
558 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentMainFrame",
561 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentNonMainFrame",
568 // Complete this notification later. This prevents us from re-entering the
569 // delegate if we're done because of a synchronous call.
570 base::ThreadTaskRunnerHandle::Get()->PostTask(
571 FROM_HERE
, base::Bind(&URLRequestJob::CompleteNotifyDone
,
572 weak_factory_
.GetWeakPtr()));
575 void URLRequestJob::CompleteNotifyDone() {
576 // Check if we should notify the delegate that we're done because of an error.
578 !request_
->status().is_success() &&
579 request_
->has_delegate()) {
580 // We report the error differently depending on whether we've called
581 // OnResponseStarted yet.
582 if (has_handled_response_
) {
583 // We signal the error by calling OnReadComplete with a bytes_read of -1.
584 request_
->NotifyReadCompleted(-1);
586 has_handled_response_
= true;
587 request_
->NotifyResponseStarted();
592 void URLRequestJob::NotifyCanceled() {
594 NotifyDone(URLRequestStatus(URLRequestStatus::CANCELED
, ERR_ABORTED
));
598 void URLRequestJob::NotifyRestartRequired() {
599 DCHECK(!has_handled_response_
);
600 if (GetStatus().status() != URLRequestStatus::CANCELED
)
604 void URLRequestJob::OnCallToDelegate() {
605 request_
->OnCallToDelegate();
608 void URLRequestJob::OnCallToDelegateComplete() {
609 request_
->OnCallToDelegateComplete();
612 bool URLRequestJob::ReadRawData(IOBuffer
* buf
, int buf_size
,
619 void URLRequestJob::DoneReading() {
623 void URLRequestJob::DoneReadingRedirectResponse() {
626 void URLRequestJob::FilteredDataRead(int bytes_read
) {
628 filter_
->FlushStreamBuffer(bytes_read
);
631 bool URLRequestJob::ReadFilteredData(int* bytes_read
) {
633 DCHECK(filtered_read_buffer_
.get());
634 DCHECK_GT(filtered_read_buffer_len_
, 0);
635 DCHECK_LT(filtered_read_buffer_len_
, 1000000); // Sanity check.
636 DCHECK(!raw_read_buffer_
.get());
645 if (!filter_needs_more_output_space_
&& !filter_
->stream_data_len()) {
646 // We don't have any raw data to work with, so read from the transaction.
647 int filtered_data_read
;
648 if (ReadRawDataForFilter(&filtered_data_read
)) {
649 if (filtered_data_read
> 0) {
650 // Give data to filter.
651 filter_
->FlushStreamBuffer(filtered_data_read
);
656 return false; // IO Pending (or error).
660 if ((filter_
->stream_data_len() || filter_needs_more_output_space_
) &&
662 // Get filtered data.
663 int filtered_data_len
= filtered_read_buffer_len_
;
664 int output_buffer_size
= filtered_data_len
;
665 Filter::FilterStatus status
=
666 filter_
->ReadData(filtered_read_buffer_
->data(), &filtered_data_len
);
668 if (filter_needs_more_output_space_
&& !filtered_data_len
) {
669 // filter_needs_more_output_space_ was mistaken... there are no more
670 // bytes and we should have at least tried to fill up the filter's input
671 // buffer. Correct the state, and try again.
672 filter_needs_more_output_space_
= false;
675 filter_needs_more_output_space_
=
676 (filtered_data_len
== output_buffer_size
);
679 case Filter::FILTER_DONE
: {
680 filter_needs_more_output_space_
= false;
681 *bytes_read
= filtered_data_len
;
682 postfilter_bytes_read_
+= filtered_data_len
;
686 case Filter::FILTER_NEED_MORE_DATA
: {
687 // We have finished filtering all data currently in the buffer.
688 // There might be some space left in the output buffer. One can
689 // consider reading more data from the stream to feed the filter
690 // and filling up the output buffer. This leads to more complicated
691 // buffer management and data notification mechanisms.
692 // We can revisit this issue if there is a real perf need.
693 if (filtered_data_len
> 0) {
694 *bytes_read
= filtered_data_len
;
695 postfilter_bytes_read_
+= filtered_data_len
;
698 // Read again since we haven't received enough data yet (e.g., we
699 // may not have a complete gzip header yet).
704 case Filter::FILTER_OK
: {
705 *bytes_read
= filtered_data_len
;
706 postfilter_bytes_read_
+= filtered_data_len
;
710 case Filter::FILTER_ERROR
: {
711 DVLOG(1) << __FUNCTION__
<< "() "
712 << "\"" << (request_
? request_
->url().spec() : "???")
713 << "\"" << " Filter Error";
714 filter_needs_more_output_space_
= false;
715 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED
,
716 ERR_CONTENT_DECODING_FAILED
));
722 filter_needs_more_output_space_
= false;
728 // If logging all bytes is enabled, log the filtered bytes read.
729 if (rv
&& request() && filtered_data_len
> 0 &&
730 request()->net_log().IsCapturing()) {
731 request()->net_log().AddByteTransferEvent(
732 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ
, filtered_data_len
,
733 filtered_read_buffer_
->data());
736 // we are done, or there is no data left.
743 // When we successfully finished a read, we no longer need to save the
744 // caller's buffers. Release our reference.
745 filtered_read_buffer_
= NULL
;
746 filtered_read_buffer_len_
= 0;
751 void URLRequestJob::DestroyFilters() {
755 const URLRequestStatus
URLRequestJob::GetStatus() {
757 return request_
->status();
758 // If the request is gone, we must be cancelled.
759 return URLRequestStatus(URLRequestStatus::CANCELED
,
763 void URLRequestJob::SetStatus(const URLRequestStatus
&status
) {
765 request_
->set_status(status
);
768 void URLRequestJob::SetProxyServer(const HostPortPair
& proxy_server
) {
769 request_
->proxy_server_
= proxy_server
;
772 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read
) {
776 DCHECK(filter_
.get());
780 // Get more pre-filtered data if needed.
781 // TODO(mbelshe): is it possible that the filter needs *MORE* data
782 // when there is some data already in the buffer?
783 if (!filter_
->stream_data_len() && !is_done()) {
784 IOBuffer
* stream_buffer
= filter_
->stream_buffer();
785 int stream_buffer_size
= filter_
->stream_buffer_size();
786 rv
= ReadRawDataHelper(stream_buffer
, stream_buffer_size
, bytes_read
);
791 bool URLRequestJob::ReadRawDataHelper(IOBuffer
* buf
, int buf_size
,
793 DCHECK(!request_
->status().is_io_pending());
794 DCHECK(raw_read_buffer_
.get() == NULL
);
796 // Keep a pointer to the read buffer, so we have access to it in the
797 // OnRawReadComplete() callback in the event that the read completes
799 raw_read_buffer_
= buf
;
800 bool rv
= ReadRawData(buf
, buf_size
, bytes_read
);
802 if (!request_
->status().is_io_pending()) {
803 // If the read completes synchronously, either success or failure,
804 // invoke the OnRawReadComplete callback so we can account for the
806 OnRawReadComplete(*bytes_read
);
811 void URLRequestJob::FollowRedirect(const RedirectInfo
& redirect_info
) {
812 int rv
= request_
->Redirect(redirect_info
);
814 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED
, rv
));
817 void URLRequestJob::OnRawReadComplete(int bytes_read
) {
818 DCHECK(raw_read_buffer_
.get());
819 // If |filter_| is non-NULL, bytes will be logged after it is applied instead.
820 if (!filter_
.get() && request() && bytes_read
> 0 &&
821 request()->net_log().IsCapturing()) {
822 request()->net_log().AddByteTransferEvent(
823 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ
,
824 bytes_read
, raw_read_buffer_
->data());
827 if (bytes_read
> 0) {
828 RecordBytesRead(bytes_read
);
830 raw_read_buffer_
= NULL
;
833 void URLRequestJob::RecordBytesRead(int bytes_read
) {
834 DCHECK_GT(bytes_read
, 0);
835 prefilter_bytes_read_
+= bytes_read
;
837 // Notify NetworkQualityEstimator.
838 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch
839 // Service Worker jobs twice.
840 if (request_
&& request_
->context()->network_quality_estimator()) {
841 request_
->context()->network_quality_estimator()->NotifyDataReceived(
842 *request_
, prefilter_bytes_read_
, bytes_read
);
846 postfilter_bytes_read_
+= bytes_read
;
847 DVLOG(2) << __FUNCTION__
<< "() "
848 << "\"" << (request_
? request_
->url().spec() : "???") << "\""
849 << " pre bytes read = " << bytes_read
850 << " pre total = " << prefilter_bytes_read_
851 << " post total = " << postfilter_bytes_read_
;
852 UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
853 if (network_delegate_
)
854 network_delegate_
->NotifyRawBytesRead(*request_
, bytes_read
);
857 bool URLRequestJob::FilterHasData() {
858 return filter_
.get() && filter_
->stream_data_len();
861 void URLRequestJob::UpdatePacketReadTimes() {
864 RedirectInfo
URLRequestJob::ComputeRedirectInfo(const GURL
& location
,
865 int http_status_code
) {
866 const GURL
& url
= request_
->url();
868 RedirectInfo redirect_info
;
870 redirect_info
.status_code
= http_status_code
;
872 // The request method may change, depending on the status code.
873 redirect_info
.new_method
=
874 ComputeMethodForRedirect(request_
->method(), http_status_code
);
876 // Move the reference fragment of the old location to the new one if the
877 // new one has none. This duplicates mozilla's behavior.
878 if (url
.is_valid() && url
.has_ref() && !location
.has_ref() &&
879 CopyFragmentOnRedirect(location
)) {
880 GURL::Replacements replacements
;
881 // Reference the |ref| directly out of the original URL to avoid a
883 replacements
.SetRef(url
.spec().data(),
884 url
.parsed_for_possibly_invalid_spec().ref
);
885 redirect_info
.new_url
= location
.ReplaceComponents(replacements
);
887 redirect_info
.new_url
= location
;
890 // Update the first-party URL if appropriate.
891 if (request_
->first_party_url_policy() ==
892 URLRequest::UPDATE_FIRST_PARTY_URL_ON_REDIRECT
) {
893 redirect_info
.new_first_party_for_cookies
= redirect_info
.new_url
;
895 redirect_info
.new_first_party_for_cookies
=
896 request_
->first_party_for_cookies();
899 // Alter the referrer if redirecting cross-origin (especially HTTP->HTTPS).
900 redirect_info
.new_referrer
=
901 ComputeReferrerForRedirect(request_
->referrer_policy(),
902 request_
->referrer(),
903 redirect_info
.new_url
).spec();
905 return redirect_info
;