[Android] Implement 3-way sensor fallback for Device Orientation.
[chromium-blink-merge.git] / net / url_request / url_request_job.cc
blob682f3d18bfe954ef1fd21af5a7c5cc9196166e69
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/url_request/url_request_job.h"
7 #include "base/bind.h"
8 #include "base/compiler_specific.h"
9 #include "base/location.h"
10 #include "base/metrics/histogram_macros.h"
11 #include "base/power_monitor/power_monitor.h"
12 #include "base/profiler/scoped_tracker.h"
13 #include "base/single_thread_task_runner.h"
14 #include "base/strings/string_number_conversions.h"
15 #include "base/strings/string_util.h"
16 #include "base/thread_task_runner_handle.h"
17 #include "base/values.h"
18 #include "net/base/auth.h"
19 #include "net/base/host_port_pair.h"
20 #include "net/base/io_buffer.h"
21 #include "net/base/load_flags.h"
22 #include "net/base/load_states.h"
23 #include "net/base/net_errors.h"
24 #include "net/base/network_delegate.h"
25 #include "net/base/network_quality_estimator.h"
26 #include "net/filter/filter.h"
27 #include "net/http/http_response_headers.h"
28 #include "net/url_request/url_request_context.h"
30 namespace net {
32 namespace {
34 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event.
35 scoped_ptr<base::Value> FiltersSetCallback(
36 Filter* filter,
37 NetLogCaptureMode /* capture_mode */) {
38 scoped_ptr<base::DictionaryValue> event_params(new base::DictionaryValue());
39 event_params->SetString("filters", filter->OrderedFilterList());
40 return event_params.Pass();
43 std::string ComputeMethodForRedirect(const std::string& method,
44 int http_status_code) {
45 // For 303 redirects, all request methods except HEAD are converted to GET,
46 // as per the latest httpbis draft. The draft also allows POST requests to
47 // be converted to GETs when following 301/302 redirects, for historical
48 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and
49 // the httpbis draft say to prompt the user to confirm the generation of new
50 // requests, other than GET and HEAD requests, but IE omits these prompts and
51 // so shall we.
52 // See:
53 // https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-17#section-7.3
54 if ((http_status_code == 303 && method != "HEAD") ||
55 ((http_status_code == 301 || http_status_code == 302) &&
56 method == "POST")) {
57 return "GET";
59 return method;
62 } // namespace
64 URLRequestJob::URLRequestJob(URLRequest* request,
65 NetworkDelegate* network_delegate)
66 : request_(request),
67 done_(false),
68 prefilter_bytes_read_(0),
69 postfilter_bytes_read_(0),
70 filter_needs_more_output_space_(false),
71 filtered_read_buffer_len_(0),
72 has_handled_response_(false),
73 expected_content_size_(-1),
74 network_delegate_(network_delegate),
75 weak_factory_(this) {
76 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
77 if (power_monitor)
78 power_monitor->AddObserver(this);
81 void URLRequestJob::SetUpload(UploadDataStream* upload) {
84 void URLRequestJob::SetExtraRequestHeaders(const HttpRequestHeaders& headers) {
87 void URLRequestJob::SetPriority(RequestPriority priority) {
90 void URLRequestJob::Kill() {
91 weak_factory_.InvalidateWeakPtrs();
92 // Make sure the request is notified that we are done. We assume that the
93 // request took care of setting its error status before calling Kill.
94 if (request_)
95 NotifyCanceled();
98 void URLRequestJob::DetachRequest() {
99 request_ = NULL;
102 // This function calls ReadData to get stream data. If a filter exists, passes
103 // the data to the attached filter. Then returns the output from filter back to
104 // the caller.
105 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
106 bool rv = false;
108 DCHECK_LT(buf_size, 1000000); // Sanity check.
109 DCHECK(buf);
110 DCHECK(bytes_read);
111 DCHECK(filtered_read_buffer_.get() == NULL);
112 DCHECK_EQ(0, filtered_read_buffer_len_);
114 *bytes_read = 0;
116 // Skip Filter if not present.
117 if (!filter_.get()) {
118 rv = ReadRawDataHelper(buf, buf_size, bytes_read);
119 } else {
120 // Save the caller's buffers while we do IO
121 // in the filter's buffers.
122 filtered_read_buffer_ = buf;
123 filtered_read_buffer_len_ = buf_size;
125 if (ReadFilteredData(bytes_read)) {
126 rv = true; // We have data to return.
128 // It is fine to call DoneReading even if ReadFilteredData receives 0
129 // bytes from the net, but we avoid making that call if we know for
130 // sure that's the case (ReadRawDataHelper path).
131 if (*bytes_read == 0)
132 DoneReading();
133 } else {
134 rv = false; // Error, or a new IO is pending.
138 if (rv && *bytes_read == 0)
139 NotifyDone(URLRequestStatus());
140 return rv;
143 void URLRequestJob::StopCaching() {
144 // Nothing to do here.
147 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const {
148 // Most job types don't send request headers.
149 return false;
152 int64 URLRequestJob::GetTotalReceivedBytes() const {
153 return 0;
156 LoadState URLRequestJob::GetLoadState() const {
157 return LOAD_STATE_IDLE;
160 UploadProgress URLRequestJob::GetUploadProgress() const {
161 return UploadProgress();
164 bool URLRequestJob::GetCharset(std::string* charset) {
165 return false;
168 void URLRequestJob::GetResponseInfo(HttpResponseInfo* info) {
171 void URLRequestJob::GetLoadTimingInfo(LoadTimingInfo* load_timing_info) const {
172 // Only certain request types return more than just request start times.
175 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
176 return false;
179 Filter* URLRequestJob::SetupFilter() const {
180 return NULL;
183 bool URLRequestJob::IsRedirectResponse(GURL* location,
184 int* http_status_code) {
185 // For non-HTTP jobs, headers will be null.
186 HttpResponseHeaders* headers = request_->response_headers();
187 if (!headers)
188 return false;
190 std::string value;
191 if (!headers->IsRedirect(&value))
192 return false;
194 *location = request_->url().Resolve(value);
195 *http_status_code = headers->response_code();
196 return true;
199 bool URLRequestJob::CopyFragmentOnRedirect(const GURL& location) const {
200 return true;
203 bool URLRequestJob::IsSafeRedirect(const GURL& location) {
204 return true;
207 bool URLRequestJob::NeedsAuth() {
208 return false;
211 void URLRequestJob::GetAuthChallengeInfo(
212 scoped_refptr<AuthChallengeInfo>* auth_info) {
213 // This will only be called if NeedsAuth() returns true, in which
214 // case the derived class should implement this!
215 NOTREACHED();
218 void URLRequestJob::SetAuth(const AuthCredentials& credentials) {
219 // This will only be called if NeedsAuth() returns true, in which
220 // case the derived class should implement this!
221 NOTREACHED();
224 void URLRequestJob::CancelAuth() {
225 // This will only be called if NeedsAuth() returns true, in which
226 // case the derived class should implement this!
227 NOTREACHED();
230 void URLRequestJob::ContinueWithCertificate(
231 X509Certificate* client_cert) {
232 // The derived class should implement this!
233 NOTREACHED();
236 void URLRequestJob::ContinueDespiteLastError() {
237 // Implementations should know how to recover from errors they generate.
238 // If this code was reached, we are trying to recover from an error that
239 // we don't know how to recover from.
240 NOTREACHED();
243 void URLRequestJob::FollowDeferredRedirect() {
244 DCHECK_NE(-1, deferred_redirect_info_.status_code);
246 // NOTE: deferred_redirect_info_ may be invalid, and attempting to follow it
247 // will fail inside FollowRedirect. The DCHECK above asserts that we called
248 // OnReceivedRedirect.
250 // It is also possible that FollowRedirect will drop the last reference to
251 // this job, so we need to reset our members before calling it.
253 RedirectInfo redirect_info = deferred_redirect_info_;
254 deferred_redirect_info_ = RedirectInfo();
255 FollowRedirect(redirect_info);
258 void URLRequestJob::ResumeNetworkStart() {
259 // This should only be called for HTTP Jobs, and implemented in the derived
260 // class.
261 NOTREACHED();
264 bool URLRequestJob::GetMimeType(std::string* mime_type) const {
265 return false;
268 int URLRequestJob::GetResponseCode() const {
269 return -1;
272 HostPortPair URLRequestJob::GetSocketAddress() const {
273 return HostPortPair();
276 void URLRequestJob::OnSuspend() {
277 Kill();
280 void URLRequestJob::NotifyURLRequestDestroyed() {
283 void URLRequestJob::GetConnectionAttempts(ConnectionAttempts* out) const {
284 out->clear();
287 // static
288 GURL URLRequestJob::ComputeReferrerForRedirect(
289 URLRequest::ReferrerPolicy policy,
290 const std::string& referrer,
291 const GURL& redirect_destination) {
292 GURL original_referrer(referrer);
293 bool secure_referrer_but_insecure_destination =
294 original_referrer.SchemeIsCryptographic() &&
295 !redirect_destination.SchemeIsCryptographic();
296 bool same_origin =
297 original_referrer.GetOrigin() == redirect_destination.GetOrigin();
298 switch (policy) {
299 case URLRequest::CLEAR_REFERRER_ON_TRANSITION_FROM_SECURE_TO_INSECURE:
300 return secure_referrer_but_insecure_destination ? GURL()
301 : original_referrer;
303 case URLRequest::REDUCE_REFERRER_GRANULARITY_ON_TRANSITION_CROSS_ORIGIN:
304 if (same_origin) {
305 return original_referrer;
306 } else if (secure_referrer_but_insecure_destination) {
307 return GURL();
308 } else {
309 return original_referrer.GetOrigin();
312 case URLRequest::ORIGIN_ONLY_ON_TRANSITION_CROSS_ORIGIN:
313 return same_origin ? original_referrer : original_referrer.GetOrigin();
315 case URLRequest::NEVER_CLEAR_REFERRER:
316 return original_referrer;
319 NOTREACHED();
320 return GURL();
323 URLRequestJob::~URLRequestJob() {
324 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
325 if (power_monitor)
326 power_monitor->RemoveObserver(this);
329 void URLRequestJob::NotifyCertificateRequested(
330 SSLCertRequestInfo* cert_request_info) {
331 if (!request_)
332 return; // The request was destroyed, so there is no more work to do.
334 request_->NotifyCertificateRequested(cert_request_info);
337 void URLRequestJob::NotifySSLCertificateError(const SSLInfo& ssl_info,
338 bool fatal) {
339 if (!request_)
340 return; // The request was destroyed, so there is no more work to do.
342 request_->NotifySSLCertificateError(ssl_info, fatal);
345 bool URLRequestJob::CanGetCookies(const CookieList& cookie_list) const {
346 if (!request_)
347 return false; // The request was destroyed, so there is no more work to do.
349 return request_->CanGetCookies(cookie_list);
352 bool URLRequestJob::CanSetCookie(const std::string& cookie_line,
353 CookieOptions* options) const {
354 if (!request_)
355 return false; // The request was destroyed, so there is no more work to do.
357 return request_->CanSetCookie(cookie_line, options);
360 bool URLRequestJob::CanEnablePrivacyMode() const {
361 if (!request_)
362 return false; // The request was destroyed, so there is no more work to do.
364 return request_->CanEnablePrivacyMode();
367 void URLRequestJob::NotifyBeforeNetworkStart(bool* defer) {
368 if (!request_)
369 return;
371 request_->NotifyBeforeNetworkStart(defer);
374 void URLRequestJob::NotifyHeadersComplete() {
375 if (!request_ || !request_->has_delegate())
376 return; // The request was destroyed, so there is no more work to do.
378 if (has_handled_response_)
379 return;
381 // This should not be called on error, and the job type should have cleared
382 // IO_PENDING state before calling this method.
383 DCHECK(request_->status().is_success());
385 // Initialize to the current time, and let the subclass optionally override
386 // the time stamps if it has that information. The default request_time is
387 // set by URLRequest before it calls our Start method.
388 request_->response_info_.response_time = base::Time::Now();
389 GetResponseInfo(&request_->response_info_);
391 // When notifying the delegate, the delegate can release the request
392 // (and thus release 'this'). After calling to the delgate, we must
393 // check the request pointer to see if it still exists, and return
394 // immediately if it has been destroyed. self_preservation ensures our
395 // survival until we can get out of this method.
396 scoped_refptr<URLRequestJob> self_preservation(this);
398 if (request_)
399 request_->OnHeadersComplete();
401 GURL new_location;
402 int http_status_code;
403 if (IsRedirectResponse(&new_location, &http_status_code)) {
404 // Redirect response bodies are not read. Notify the transaction
405 // so it does not treat being stopped as an error.
406 DoneReadingRedirectResponse();
408 RedirectInfo redirect_info =
409 ComputeRedirectInfo(new_location, http_status_code);
410 bool defer_redirect = false;
411 request_->NotifyReceivedRedirect(redirect_info, &defer_redirect);
413 // Ensure that the request wasn't detached or destroyed in
414 // NotifyReceivedRedirect
415 if (!request_ || !request_->has_delegate())
416 return;
418 // If we were not cancelled, then maybe follow the redirect.
419 if (request_->status().is_success()) {
420 if (defer_redirect) {
421 deferred_redirect_info_ = redirect_info;
422 } else {
423 FollowRedirect(redirect_info);
425 return;
427 } else if (NeedsAuth()) {
428 scoped_refptr<AuthChallengeInfo> auth_info;
429 GetAuthChallengeInfo(&auth_info);
431 // Need to check for a NULL auth_info because the server may have failed
432 // to send a challenge with the 401 response.
433 if (auth_info.get()) {
434 request_->NotifyAuthRequired(auth_info.get());
435 // Wait for SetAuth or CancelAuth to be called.
436 return;
440 has_handled_response_ = true;
441 if (request_->status().is_success())
442 filter_.reset(SetupFilter());
444 if (!filter_.get()) {
445 std::string content_length;
446 request_->GetResponseHeaderByName("content-length", &content_length);
447 if (!content_length.empty())
448 base::StringToInt64(content_length, &expected_content_size_);
449 } else {
450 request_->net_log().AddEvent(
451 NetLog::TYPE_URL_REQUEST_FILTERS_SET,
452 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get())));
455 request_->NotifyResponseStarted();
458 void URLRequestJob::NotifyReadComplete(int bytes_read) {
459 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed.
460 tracked_objects::ScopedTracker tracking_profile(
461 FROM_HERE_WITH_EXPLICIT_FUNCTION(
462 "475755 URLRequestJob::NotifyReadComplete"));
464 if (!request_ || !request_->has_delegate())
465 return; // The request was destroyed, so there is no more work to do.
467 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
468 // unit_tests have been fixed to not trip this.
469 #if 0
470 DCHECK(!request_->status().is_io_pending());
471 #endif
472 // The headers should be complete before reads complete
473 DCHECK(has_handled_response_);
475 OnRawReadComplete(bytes_read);
477 // Don't notify if we had an error.
478 if (!request_->status().is_success())
479 return;
481 // When notifying the delegate, the delegate can release the request
482 // (and thus release 'this'). After calling to the delegate, we must
483 // check the request pointer to see if it still exists, and return
484 // immediately if it has been destroyed. self_preservation ensures our
485 // survival until we can get out of this method.
486 scoped_refptr<URLRequestJob> self_preservation(this);
488 if (filter_.get()) {
489 // Tell the filter that it has more data
490 FilteredDataRead(bytes_read);
492 // Filter the data.
493 int filter_bytes_read = 0;
494 if (ReadFilteredData(&filter_bytes_read)) {
495 if (!filter_bytes_read)
496 DoneReading();
497 request_->NotifyReadCompleted(filter_bytes_read);
499 } else {
500 request_->NotifyReadCompleted(bytes_read);
502 DVLOG(1) << __FUNCTION__ << "() "
503 << "\"" << (request_ ? request_->url().spec() : "???") << "\""
504 << " pre bytes read = " << bytes_read
505 << " pre total = " << prefilter_bytes_read_
506 << " post total = " << postfilter_bytes_read_;
509 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
510 DCHECK(!has_handled_response_);
511 has_handled_response_ = true;
512 if (request_) {
513 // There may be relevant information in the response info even in the
514 // error case.
515 GetResponseInfo(&request_->response_info_);
517 request_->set_status(status);
518 request_->NotifyResponseStarted();
519 // We may have been deleted.
523 void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
524 DCHECK(!done_) << "Job sending done notification twice";
525 if (done_)
526 return;
527 done_ = true;
529 // Unless there was an error, we should have at least tried to handle
530 // the response before getting here.
531 DCHECK(has_handled_response_ || !status.is_success());
533 // As with NotifyReadComplete, we need to take care to notice if we were
534 // destroyed during a delegate callback.
535 if (request_) {
536 request_->set_is_pending(false);
537 // With async IO, it's quite possible to have a few outstanding
538 // requests. We could receive a request to Cancel, followed shortly
539 // by a successful IO. For tracking the status(), once there is
540 // an error, we do not change the status back to success. To
541 // enforce this, only set the status if the job is so far
542 // successful.
543 if (request_->status().is_success()) {
544 if (status.status() == URLRequestStatus::FAILED) {
545 request_->net_log().AddEventWithNetErrorCode(NetLog::TYPE_FAILED,
546 status.error());
548 request_->set_status(status);
551 // If the request succeeded (And wasn't cancelled) and the response code was
552 // 4xx or 5xx, record whether or not the main frame was blank. This is
553 // intended to be a short-lived histogram, used to figure out how important
554 // fixing http://crbug.com/331745 is.
555 if (request_->status().is_success()) {
556 int response_code = GetResponseCode();
557 if (400 <= response_code && response_code <= 599) {
558 bool page_has_content = (postfilter_bytes_read_ != 0);
559 if (request_->load_flags() & net::LOAD_MAIN_FRAME) {
560 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentMainFrame",
561 page_has_content);
562 } else {
563 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentNonMainFrame",
564 page_has_content);
570 // Complete this notification later. This prevents us from re-entering the
571 // delegate if we're done because of a synchronous call.
572 base::ThreadTaskRunnerHandle::Get()->PostTask(
573 FROM_HERE, base::Bind(&URLRequestJob::CompleteNotifyDone,
574 weak_factory_.GetWeakPtr()));
577 void URLRequestJob::CompleteNotifyDone() {
578 // Check if we should notify the delegate that we're done because of an error.
579 if (request_ &&
580 !request_->status().is_success() &&
581 request_->has_delegate()) {
582 // We report the error differently depending on whether we've called
583 // OnResponseStarted yet.
584 if (has_handled_response_) {
585 // We signal the error by calling OnReadComplete with a bytes_read of -1.
586 request_->NotifyReadCompleted(-1);
587 } else {
588 has_handled_response_ = true;
589 request_->NotifyResponseStarted();
594 void URLRequestJob::NotifyCanceled() {
595 if (!done_) {
596 NotifyDone(URLRequestStatus(URLRequestStatus::CANCELED, ERR_ABORTED));
600 void URLRequestJob::NotifyRestartRequired() {
601 DCHECK(!has_handled_response_);
602 if (GetStatus().status() != URLRequestStatus::CANCELED)
603 request_->Restart();
606 void URLRequestJob::OnCallToDelegate() {
607 request_->OnCallToDelegate();
610 void URLRequestJob::OnCallToDelegateComplete() {
611 request_->OnCallToDelegateComplete();
614 bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size,
615 int *bytes_read) {
616 DCHECK(bytes_read);
617 *bytes_read = 0;
618 return true;
621 void URLRequestJob::DoneReading() {
622 // Do nothing.
625 void URLRequestJob::DoneReadingRedirectResponse() {
628 void URLRequestJob::FilteredDataRead(int bytes_read) {
629 DCHECK(filter_);
630 filter_->FlushStreamBuffer(bytes_read);
633 bool URLRequestJob::ReadFilteredData(int* bytes_read) {
634 DCHECK(filter_);
635 DCHECK(filtered_read_buffer_.get());
636 DCHECK_GT(filtered_read_buffer_len_, 0);
637 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check.
638 DCHECK(!raw_read_buffer_.get());
640 *bytes_read = 0;
641 bool rv = false;
643 for (;;) {
644 if (is_done())
645 return true;
647 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
648 // We don't have any raw data to work with, so read from the transaction.
649 int filtered_data_read;
650 if (ReadRawDataForFilter(&filtered_data_read)) {
651 if (filtered_data_read > 0) {
652 // Give data to filter.
653 filter_->FlushStreamBuffer(filtered_data_read);
654 } else {
655 return true; // EOF.
657 } else {
658 return false; // IO Pending (or error).
662 if ((filter_->stream_data_len() || filter_needs_more_output_space_) &&
663 !is_done()) {
664 // Get filtered data.
665 int filtered_data_len = filtered_read_buffer_len_;
666 int output_buffer_size = filtered_data_len;
667 Filter::FilterStatus status =
668 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len);
670 if (filter_needs_more_output_space_ && !filtered_data_len) {
671 // filter_needs_more_output_space_ was mistaken... there are no more
672 // bytes and we should have at least tried to fill up the filter's input
673 // buffer. Correct the state, and try again.
674 filter_needs_more_output_space_ = false;
675 continue;
677 filter_needs_more_output_space_ =
678 (filtered_data_len == output_buffer_size);
680 switch (status) {
681 case Filter::FILTER_DONE: {
682 filter_needs_more_output_space_ = false;
683 *bytes_read = filtered_data_len;
684 postfilter_bytes_read_ += filtered_data_len;
685 rv = true;
686 break;
688 case Filter::FILTER_NEED_MORE_DATA: {
689 // We have finished filtering all data currently in the buffer.
690 // There might be some space left in the output buffer. One can
691 // consider reading more data from the stream to feed the filter
692 // and filling up the output buffer. This leads to more complicated
693 // buffer management and data notification mechanisms.
694 // We can revisit this issue if there is a real perf need.
695 if (filtered_data_len > 0) {
696 *bytes_read = filtered_data_len;
697 postfilter_bytes_read_ += filtered_data_len;
698 rv = true;
699 } else {
700 // Read again since we haven't received enough data yet (e.g., we
701 // may not have a complete gzip header yet).
702 continue;
704 break;
706 case Filter::FILTER_OK: {
707 *bytes_read = filtered_data_len;
708 postfilter_bytes_read_ += filtered_data_len;
709 rv = true;
710 break;
712 case Filter::FILTER_ERROR: {
713 DVLOG(1) << __FUNCTION__ << "() "
714 << "\"" << (request_ ? request_->url().spec() : "???")
715 << "\"" << " Filter Error";
716 filter_needs_more_output_space_ = false;
717 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
718 ERR_CONTENT_DECODING_FAILED));
719 rv = false;
720 break;
722 default: {
723 NOTREACHED();
724 filter_needs_more_output_space_ = false;
725 rv = false;
726 break;
730 // If logging all bytes is enabled, log the filtered bytes read.
731 if (rv && request() && filtered_data_len > 0 &&
732 request()->net_log().IsCapturing()) {
733 request()->net_log().AddByteTransferEvent(
734 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, filtered_data_len,
735 filtered_read_buffer_->data());
737 } else {
738 // we are done, or there is no data left.
739 rv = true;
741 break;
744 if (rv) {
745 // When we successfully finished a read, we no longer need to save the
746 // caller's buffers. Release our reference.
747 filtered_read_buffer_ = NULL;
748 filtered_read_buffer_len_ = 0;
750 return rv;
753 void URLRequestJob::DestroyFilters() {
754 filter_.reset();
757 const URLRequestStatus URLRequestJob::GetStatus() {
758 if (request_)
759 return request_->status();
760 // If the request is gone, we must be cancelled.
761 return URLRequestStatus(URLRequestStatus::CANCELED,
762 ERR_ABORTED);
765 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
766 if (request_) {
767 // An error status should never be replaced by a non-error status by a
768 // URLRequestJob. URLRequest has some retry paths, but it resets the status
769 // itself, if needed.
770 DCHECK(request_->status().is_io_pending() ||
771 request_->status().is_success() ||
772 (!status.is_success() && !status.is_io_pending()));
773 request_->set_status(status);
777 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) {
778 request_->proxy_server_ = proxy_server;
781 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
782 bool rv = false;
784 DCHECK(bytes_read);
785 DCHECK(filter_.get());
787 *bytes_read = 0;
789 // Get more pre-filtered data if needed.
790 // TODO(mbelshe): is it possible that the filter needs *MORE* data
791 // when there is some data already in the buffer?
792 if (!filter_->stream_data_len() && !is_done()) {
793 IOBuffer* stream_buffer = filter_->stream_buffer();
794 int stream_buffer_size = filter_->stream_buffer_size();
795 rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
797 return rv;
800 bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf, int buf_size,
801 int* bytes_read) {
802 DCHECK(!request_->status().is_io_pending());
803 DCHECK(raw_read_buffer_.get() == NULL);
805 // Keep a pointer to the read buffer, so we have access to it in the
806 // OnRawReadComplete() callback in the event that the read completes
807 // asynchronously.
808 raw_read_buffer_ = buf;
809 bool rv = ReadRawData(buf, buf_size, bytes_read);
811 if (!request_->status().is_io_pending()) {
812 // If the read completes synchronously, either success or failure,
813 // invoke the OnRawReadComplete callback so we can account for the
814 // completed read.
815 OnRawReadComplete(*bytes_read);
817 return rv;
820 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) {
821 int rv = request_->Redirect(redirect_info);
822 if (rv != OK)
823 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
826 void URLRequestJob::OnRawReadComplete(int bytes_read) {
827 DCHECK(raw_read_buffer_.get());
828 // If |filter_| is non-NULL, bytes will be logged after it is applied instead.
829 if (!filter_.get() && request() && bytes_read > 0 &&
830 request()->net_log().IsCapturing()) {
831 request()->net_log().AddByteTransferEvent(
832 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ,
833 bytes_read, raw_read_buffer_->data());
836 if (bytes_read > 0) {
837 RecordBytesRead(bytes_read);
839 raw_read_buffer_ = NULL;
842 void URLRequestJob::RecordBytesRead(int bytes_read) {
843 DCHECK_GT(bytes_read, 0);
844 prefilter_bytes_read_ += bytes_read;
846 // On first read, notify NetworkQualityEstimator that response headers have
847 // been received.
848 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch
849 // Service Worker jobs twice.
850 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the
851 // first raw read of the response body. This is used as the signal that
852 // response headers have been received.
853 if (request_ && request_->context()->network_quality_estimator() &&
854 prefilter_bytes_read_ == bytes_read) {
855 request_->context()->network_quality_estimator()->NotifyHeadersReceived(
856 *request_);
859 if (!filter_.get())
860 postfilter_bytes_read_ += bytes_read;
861 DVLOG(2) << __FUNCTION__ << "() "
862 << "\"" << (request_ ? request_->url().spec() : "???") << "\""
863 << " pre bytes read = " << bytes_read
864 << " pre total = " << prefilter_bytes_read_
865 << " post total = " << postfilter_bytes_read_;
866 UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
867 if (network_delegate_)
868 network_delegate_->NotifyRawBytesRead(*request_, bytes_read);
871 bool URLRequestJob::FilterHasData() {
872 return filter_.get() && filter_->stream_data_len();
875 void URLRequestJob::UpdatePacketReadTimes() {
878 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location,
879 int http_status_code) {
880 const GURL& url = request_->url();
882 RedirectInfo redirect_info;
884 redirect_info.status_code = http_status_code;
886 // The request method may change, depending on the status code.
887 redirect_info.new_method =
888 ComputeMethodForRedirect(request_->method(), http_status_code);
890 // Move the reference fragment of the old location to the new one if the
891 // new one has none. This duplicates mozilla's behavior.
892 if (url.is_valid() && url.has_ref() && !location.has_ref() &&
893 CopyFragmentOnRedirect(location)) {
894 GURL::Replacements replacements;
895 // Reference the |ref| directly out of the original URL to avoid a
896 // malloc.
897 replacements.SetRef(url.spec().data(),
898 url.parsed_for_possibly_invalid_spec().ref);
899 redirect_info.new_url = location.ReplaceComponents(replacements);
900 } else {
901 redirect_info.new_url = location;
904 // Update the first-party URL if appropriate.
905 if (request_->first_party_url_policy() ==
906 URLRequest::UPDATE_FIRST_PARTY_URL_ON_REDIRECT) {
907 redirect_info.new_first_party_for_cookies = redirect_info.new_url;
908 } else {
909 redirect_info.new_first_party_for_cookies =
910 request_->first_party_for_cookies();
913 // Alter the referrer if redirecting cross-origin (especially HTTP->HTTPS).
914 redirect_info.new_referrer =
915 ComputeReferrerForRedirect(request_->referrer_policy(),
916 request_->referrer(),
917 redirect_info.new_url).spec();
919 return redirect_info;
922 } // namespace net