Explicitly add python-numpy dependency to install-build-deps.
[chromium-blink-merge.git] / net / url_request / url_request_job.cc
blob3cd8cf40ee30fcd02f01377152179c65b9fc13a3
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/url_request/url_request_job.h"
7 #include "base/bind.h"
8 #include "base/compiler_specific.h"
9 #include "base/message_loop/message_loop.h"
10 #include "base/power_monitor/power_monitor.h"
11 #include "base/profiler/scoped_tracker.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "base/strings/string_util.h"
14 #include "base/values.h"
15 #include "net/base/auth.h"
16 #include "net/base/host_port_pair.h"
17 #include "net/base/io_buffer.h"
18 #include "net/base/load_states.h"
19 #include "net/base/net_errors.h"
20 #include "net/base/network_delegate.h"
21 #include "net/filter/filter.h"
22 #include "net/http/http_response_headers.h"
24 namespace {
26 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event.
27 base::Value* FiltersSetCallback(net::Filter* filter,
28 enum net::NetLog::LogLevel /* log_level */) {
29 base::DictionaryValue* event_params = new base::DictionaryValue();
30 event_params->SetString("filters", filter->OrderedFilterList());
31 return event_params;
34 } // namespace
36 namespace net {
38 URLRequestJob::URLRequestJob(URLRequest* request,
39 NetworkDelegate* network_delegate)
40 : request_(request),
41 done_(false),
42 prefilter_bytes_read_(0),
43 postfilter_bytes_read_(0),
44 filter_input_byte_count_(0),
45 filter_needs_more_output_space_(false),
46 filtered_read_buffer_len_(0),
47 has_handled_response_(false),
48 expected_content_size_(-1),
49 network_delegate_(network_delegate),
50 weak_factory_(this) {
51 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
52 if (power_monitor)
53 power_monitor->AddObserver(this);
56 void URLRequestJob::SetUpload(UploadDataStream* upload) {
59 void URLRequestJob::SetExtraRequestHeaders(const HttpRequestHeaders& headers) {
62 void URLRequestJob::SetPriority(RequestPriority priority) {
65 void URLRequestJob::Kill() {
66 weak_factory_.InvalidateWeakPtrs();
67 // Make sure the request is notified that we are done. We assume that the
68 // request took care of setting its error status before calling Kill.
69 if (request_)
70 NotifyCanceled();
73 void URLRequestJob::DetachRequest() {
74 request_ = NULL;
77 // This function calls ReadData to get stream data. If a filter exists, passes
78 // the data to the attached filter. Then returns the output from filter back to
79 // the caller.
80 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
81 bool rv = false;
83 DCHECK_LT(buf_size, 1000000); // Sanity check.
84 DCHECK(buf);
85 DCHECK(bytes_read);
86 DCHECK(filtered_read_buffer_.get() == NULL);
87 DCHECK_EQ(0, filtered_read_buffer_len_);
89 *bytes_read = 0;
91 // Skip Filter if not present.
92 if (!filter_.get()) {
93 rv = ReadRawDataHelper(buf, buf_size, bytes_read);
94 } else {
95 // Save the caller's buffers while we do IO
96 // in the filter's buffers.
97 filtered_read_buffer_ = buf;
98 filtered_read_buffer_len_ = buf_size;
100 if (ReadFilteredData(bytes_read)) {
101 rv = true; // We have data to return.
103 // It is fine to call DoneReading even if ReadFilteredData receives 0
104 // bytes from the net, but we avoid making that call if we know for
105 // sure that's the case (ReadRawDataHelper path).
106 if (*bytes_read == 0)
107 DoneReading();
108 } else {
109 rv = false; // Error, or a new IO is pending.
112 if (rv && *bytes_read == 0)
113 NotifyDone(URLRequestStatus());
114 return rv;
117 void URLRequestJob::StopCaching() {
118 // Nothing to do here.
121 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const {
122 // Most job types don't send request headers.
123 return false;
126 int64 URLRequestJob::GetTotalReceivedBytes() const {
127 return 0;
130 LoadState URLRequestJob::GetLoadState() const {
131 return LOAD_STATE_IDLE;
134 UploadProgress URLRequestJob::GetUploadProgress() const {
135 return UploadProgress();
138 bool URLRequestJob::GetCharset(std::string* charset) {
139 return false;
142 void URLRequestJob::GetResponseInfo(HttpResponseInfo* info) {
145 void URLRequestJob::GetLoadTimingInfo(LoadTimingInfo* load_timing_info) const {
146 // Only certain request types return more than just request start times.
149 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
150 return false;
153 Filter* URLRequestJob::SetupFilter() const {
154 return NULL;
157 bool URLRequestJob::IsRedirectResponse(GURL* location,
158 int* http_status_code) {
159 // For non-HTTP jobs, headers will be null.
160 HttpResponseHeaders* headers = request_->response_headers();
161 if (!headers)
162 return false;
164 std::string value;
165 if (!headers->IsRedirect(&value))
166 return false;
168 *location = request_->url().Resolve(value);
169 *http_status_code = headers->response_code();
170 return true;
173 bool URLRequestJob::CopyFragmentOnRedirect(const GURL& location) const {
174 return true;
177 bool URLRequestJob::IsSafeRedirect(const GURL& location) {
178 return true;
181 bool URLRequestJob::NeedsAuth() {
182 return false;
185 void URLRequestJob::GetAuthChallengeInfo(
186 scoped_refptr<AuthChallengeInfo>* auth_info) {
187 // This will only be called if NeedsAuth() returns true, in which
188 // case the derived class should implement this!
189 NOTREACHED();
192 void URLRequestJob::SetAuth(const AuthCredentials& credentials) {
193 // This will only be called if NeedsAuth() returns true, in which
194 // case the derived class should implement this!
195 NOTREACHED();
198 void URLRequestJob::CancelAuth() {
199 // This will only be called if NeedsAuth() returns true, in which
200 // case the derived class should implement this!
201 NOTREACHED();
204 void URLRequestJob::ContinueWithCertificate(
205 X509Certificate* client_cert) {
206 // The derived class should implement this!
207 NOTREACHED();
210 void URLRequestJob::ContinueDespiteLastError() {
211 // Implementations should know how to recover from errors they generate.
212 // If this code was reached, we are trying to recover from an error that
213 // we don't know how to recover from.
214 NOTREACHED();
217 void URLRequestJob::FollowDeferredRedirect() {
218 DCHECK_NE(-1, deferred_redirect_info_.status_code);
220 // NOTE: deferred_redirect_info_ may be invalid, and attempting to follow it
221 // will fail inside FollowRedirect. The DCHECK above asserts that we called
222 // OnReceivedRedirect.
224 // It is also possible that FollowRedirect will drop the last reference to
225 // this job, so we need to reset our members before calling it.
227 RedirectInfo redirect_info = deferred_redirect_info_;
228 deferred_redirect_info_ = RedirectInfo();
229 FollowRedirect(redirect_info);
232 void URLRequestJob::ResumeNetworkStart() {
233 // This should only be called for HTTP Jobs, and implemented in the derived
234 // class.
235 NOTREACHED();
238 bool URLRequestJob::GetMimeType(std::string* mime_type) const {
239 return false;
242 int URLRequestJob::GetResponseCode() const {
243 return -1;
246 HostPortPair URLRequestJob::GetSocketAddress() const {
247 return HostPortPair();
250 void URLRequestJob::OnSuspend() {
251 Kill();
254 void URLRequestJob::NotifyURLRequestDestroyed() {
257 // static
258 GURL URLRequestJob::ComputeReferrerForRedirect(
259 URLRequest::ReferrerPolicy policy,
260 const std::string& referrer,
261 const GURL& redirect_destination) {
262 GURL original_referrer(referrer);
263 bool secure_referrer_but_insecure_destination =
264 original_referrer.SchemeIsSecure() &&
265 !redirect_destination.SchemeIsSecure();
266 bool same_origin =
267 original_referrer.GetOrigin() == redirect_destination.GetOrigin();
268 switch (policy) {
269 case URLRequest::CLEAR_REFERRER_ON_TRANSITION_FROM_SECURE_TO_INSECURE:
270 return secure_referrer_but_insecure_destination ? GURL()
271 : original_referrer;
273 case URLRequest::REDUCE_REFERRER_GRANULARITY_ON_TRANSITION_CROSS_ORIGIN:
274 if (same_origin) {
275 return original_referrer;
276 } else if (secure_referrer_but_insecure_destination) {
277 return GURL();
278 } else {
279 return original_referrer.GetOrigin();
282 case URLRequest::ORIGIN_ONLY_ON_TRANSITION_CROSS_ORIGIN:
283 return same_origin ? original_referrer : original_referrer.GetOrigin();
285 case URLRequest::NEVER_CLEAR_REFERRER:
286 return original_referrer;
289 NOTREACHED();
290 return GURL();
293 URLRequestJob::~URLRequestJob() {
294 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
295 if (power_monitor)
296 power_monitor->RemoveObserver(this);
299 void URLRequestJob::NotifyCertificateRequested(
300 SSLCertRequestInfo* cert_request_info) {
301 if (!request_)
302 return; // The request was destroyed, so there is no more work to do.
304 request_->NotifyCertificateRequested(cert_request_info);
307 void URLRequestJob::NotifySSLCertificateError(const SSLInfo& ssl_info,
308 bool fatal) {
309 if (!request_)
310 return; // The request was destroyed, so there is no more work to do.
312 request_->NotifySSLCertificateError(ssl_info, fatal);
315 bool URLRequestJob::CanGetCookies(const CookieList& cookie_list) const {
316 if (!request_)
317 return false; // The request was destroyed, so there is no more work to do.
319 return request_->CanGetCookies(cookie_list);
322 bool URLRequestJob::CanSetCookie(const std::string& cookie_line,
323 CookieOptions* options) const {
324 if (!request_)
325 return false; // The request was destroyed, so there is no more work to do.
327 return request_->CanSetCookie(cookie_line, options);
330 bool URLRequestJob::CanEnablePrivacyMode() const {
331 if (!request_)
332 return false; // The request was destroyed, so there is no more work to do.
334 return request_->CanEnablePrivacyMode();
337 CookieStore* URLRequestJob::GetCookieStore() const {
338 DCHECK(request_);
340 return request_->cookie_store();
343 void URLRequestJob::NotifyBeforeNetworkStart(bool* defer) {
344 if (!request_)
345 return;
347 request_->NotifyBeforeNetworkStart(defer);
350 void URLRequestJob::NotifyHeadersComplete() {
351 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
352 tracked_objects::ScopedTracker tracking_profile(
353 FROM_HERE_WITH_EXPLICIT_FUNCTION(
354 "423948 URLRequestJob::NotifyHeadersComplete"));
356 if (!request_ || !request_->has_delegate())
357 return; // The request was destroyed, so there is no more work to do.
359 if (has_handled_response_)
360 return;
362 DCHECK(!request_->status().is_io_pending());
364 // Initialize to the current time, and let the subclass optionally override
365 // the time stamps if it has that information. The default request_time is
366 // set by URLRequest before it calls our Start method.
367 request_->response_info_.response_time = base::Time::Now();
368 GetResponseInfo(&request_->response_info_);
370 // When notifying the delegate, the delegate can release the request
371 // (and thus release 'this'). After calling to the delgate, we must
372 // check the request pointer to see if it still exists, and return
373 // immediately if it has been destroyed. self_preservation ensures our
374 // survival until we can get out of this method.
375 scoped_refptr<URLRequestJob> self_preservation(this);
377 if (request_) {
378 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
379 tracked_objects::ScopedTracker tracking_profile1(
380 FROM_HERE_WITH_EXPLICIT_FUNCTION(
381 "423948 URLRequestJob::NotifyHeadersComplete 1"));
383 request_->OnHeadersComplete();
386 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
387 tracked_objects::ScopedTracker tracking_profile2(
388 FROM_HERE_WITH_EXPLICIT_FUNCTION(
389 "423948 URLRequestJob::NotifyHeadersComplete 2"));
391 GURL new_location;
392 int http_status_code;
393 if (IsRedirectResponse(&new_location, &http_status_code)) {
394 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
395 tracked_objects::ScopedTracker tracking_profile3(
396 FROM_HERE_WITH_EXPLICIT_FUNCTION(
397 "423948 URLRequestJob::NotifyHeadersComplete 3"));
399 // Redirect response bodies are not read. Notify the transaction
400 // so it does not treat being stopped as an error.
401 DoneReadingRedirectResponse();
403 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
404 tracked_objects::ScopedTracker tracking_profile4(
405 FROM_HERE_WITH_EXPLICIT_FUNCTION(
406 "423948 URLRequestJob::NotifyHeadersComplete 4"));
408 RedirectInfo redirect_info =
409 ComputeRedirectInfo(new_location, http_status_code);
411 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
412 tracked_objects::ScopedTracker tracking_profile5(
413 FROM_HERE_WITH_EXPLICIT_FUNCTION(
414 "423948 URLRequestJob::NotifyHeadersComplete 5"));
416 bool defer_redirect = false;
417 request_->NotifyReceivedRedirect(redirect_info, &defer_redirect);
419 // Ensure that the request wasn't detached or destroyed in
420 // NotifyReceivedRedirect
421 if (!request_ || !request_->has_delegate())
422 return;
424 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
425 tracked_objects::ScopedTracker tracking_profile6(
426 FROM_HERE_WITH_EXPLICIT_FUNCTION(
427 "423948 URLRequestJob::NotifyHeadersComplete 6"));
429 // If we were not cancelled, then maybe follow the redirect.
430 if (request_->status().is_success()) {
431 if (defer_redirect) {
432 deferred_redirect_info_ = redirect_info;
433 } else {
434 FollowRedirect(redirect_info);
436 return;
438 } else if (NeedsAuth()) {
439 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
440 tracked_objects::ScopedTracker tracking_profile7(
441 FROM_HERE_WITH_EXPLICIT_FUNCTION(
442 "423948 URLRequestJob::NotifyHeadersComplete 7"));
444 scoped_refptr<AuthChallengeInfo> auth_info;
445 GetAuthChallengeInfo(&auth_info);
447 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
448 tracked_objects::ScopedTracker tracking_profile8(
449 FROM_HERE_WITH_EXPLICIT_FUNCTION(
450 "423948 URLRequestJob::NotifyHeadersComplete 8"));
452 // Need to check for a NULL auth_info because the server may have failed
453 // to send a challenge with the 401 response.
454 if (auth_info.get()) {
455 request_->NotifyAuthRequired(auth_info.get());
456 // Wait for SetAuth or CancelAuth to be called.
457 return;
461 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
462 tracked_objects::ScopedTracker tracking_profile9(
463 FROM_HERE_WITH_EXPLICIT_FUNCTION(
464 "423948 URLRequestJob::NotifyHeadersComplete 9"));
466 has_handled_response_ = true;
467 if (request_->status().is_success())
468 filter_.reset(SetupFilter());
470 if (!filter_.get()) {
471 std::string content_length;
472 request_->GetResponseHeaderByName("content-length", &content_length);
473 if (!content_length.empty())
474 base::StringToInt64(content_length, &expected_content_size_);
475 } else {
476 request_->net_log().AddEvent(
477 NetLog::TYPE_URL_REQUEST_FILTERS_SET,
478 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get())));
481 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
482 tracked_objects::ScopedTracker tracking_profile10(
483 FROM_HERE_WITH_EXPLICIT_FUNCTION(
484 "423948 URLRequestJob::NotifyHeadersComplete 10"));
486 request_->NotifyResponseStarted();
489 void URLRequestJob::NotifyReadComplete(int bytes_read) {
490 if (!request_ || !request_->has_delegate())
491 return; // The request was destroyed, so there is no more work to do.
493 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
494 // unit_tests have been fixed to not trip this.
495 #if 0
496 DCHECK(!request_->status().is_io_pending());
497 #endif
498 // The headers should be complete before reads complete
499 DCHECK(has_handled_response_);
501 OnRawReadComplete(bytes_read);
503 // Don't notify if we had an error.
504 if (!request_->status().is_success())
505 return;
507 // When notifying the delegate, the delegate can release the request
508 // (and thus release 'this'). After calling to the delegate, we must
509 // check the request pointer to see if it still exists, and return
510 // immediately if it has been destroyed. self_preservation ensures our
511 // survival until we can get out of this method.
512 scoped_refptr<URLRequestJob> self_preservation(this);
514 if (filter_.get()) {
515 // Tell the filter that it has more data
516 FilteredDataRead(bytes_read);
518 // Filter the data.
519 int filter_bytes_read = 0;
520 if (ReadFilteredData(&filter_bytes_read)) {
521 if (!filter_bytes_read)
522 DoneReading();
523 request_->NotifyReadCompleted(filter_bytes_read);
525 } else {
526 request_->NotifyReadCompleted(bytes_read);
528 DVLOG(1) << __FUNCTION__ << "() "
529 << "\"" << (request_ ? request_->url().spec() : "???") << "\""
530 << " pre bytes read = " << bytes_read
531 << " pre total = " << prefilter_bytes_read_
532 << " post total = " << postfilter_bytes_read_;
535 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
536 DCHECK(!has_handled_response_);
537 has_handled_response_ = true;
538 if (request_) {
539 // There may be relevant information in the response info even in the
540 // error case.
541 GetResponseInfo(&request_->response_info_);
543 request_->set_status(status);
544 request_->NotifyResponseStarted();
545 // We may have been deleted.
549 void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
550 DCHECK(!done_) << "Job sending done notification twice";
551 if (done_)
552 return;
553 done_ = true;
555 // Unless there was an error, we should have at least tried to handle
556 // the response before getting here.
557 DCHECK(has_handled_response_ || !status.is_success());
559 // As with NotifyReadComplete, we need to take care to notice if we were
560 // destroyed during a delegate callback.
561 if (request_) {
562 request_->set_is_pending(false);
563 // With async IO, it's quite possible to have a few outstanding
564 // requests. We could receive a request to Cancel, followed shortly
565 // by a successful IO. For tracking the status(), once there is
566 // an error, we do not change the status back to success. To
567 // enforce this, only set the status if the job is so far
568 // successful.
569 if (request_->status().is_success()) {
570 if (status.status() == URLRequestStatus::FAILED) {
571 request_->net_log().AddEventWithNetErrorCode(NetLog::TYPE_FAILED,
572 status.error());
574 request_->set_status(status);
578 // Complete this notification later. This prevents us from re-entering the
579 // delegate if we're done because of a synchronous call.
580 base::MessageLoop::current()->PostTask(
581 FROM_HERE,
582 base::Bind(&URLRequestJob::CompleteNotifyDone,
583 weak_factory_.GetWeakPtr()));
586 void URLRequestJob::CompleteNotifyDone() {
587 // Check if we should notify the delegate that we're done because of an error.
588 if (request_ &&
589 !request_->status().is_success() &&
590 request_->has_delegate()) {
591 // We report the error differently depending on whether we've called
592 // OnResponseStarted yet.
593 if (has_handled_response_) {
594 // We signal the error by calling OnReadComplete with a bytes_read of -1.
595 request_->NotifyReadCompleted(-1);
596 } else {
597 has_handled_response_ = true;
598 request_->NotifyResponseStarted();
603 void URLRequestJob::NotifyCanceled() {
604 if (!done_) {
605 NotifyDone(URLRequestStatus(URLRequestStatus::CANCELED, ERR_ABORTED));
609 void URLRequestJob::NotifyRestartRequired() {
610 DCHECK(!has_handled_response_);
611 if (GetStatus().status() != URLRequestStatus::CANCELED)
612 request_->Restart();
615 void URLRequestJob::OnCallToDelegate() {
616 request_->OnCallToDelegate();
619 void URLRequestJob::OnCallToDelegateComplete() {
620 request_->OnCallToDelegateComplete();
623 bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size,
624 int *bytes_read) {
625 DCHECK(bytes_read);
626 *bytes_read = 0;
627 return true;
630 void URLRequestJob::DoneReading() {
631 // Do nothing.
634 void URLRequestJob::DoneReadingRedirectResponse() {
637 void URLRequestJob::FilteredDataRead(int bytes_read) {
638 DCHECK(filter_);
639 filter_->FlushStreamBuffer(bytes_read);
642 bool URLRequestJob::ReadFilteredData(int* bytes_read) {
643 DCHECK(filter_);
644 DCHECK(filtered_read_buffer_.get());
645 DCHECK_GT(filtered_read_buffer_len_, 0);
646 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check.
647 DCHECK(!raw_read_buffer_.get());
649 *bytes_read = 0;
650 bool rv = false;
652 for (;;) {
653 if (is_done())
654 return true;
656 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
657 // We don't have any raw data to work with, so read from the transaction.
658 int filtered_data_read;
659 if (ReadRawDataForFilter(&filtered_data_read)) {
660 if (filtered_data_read > 0) {
661 // Give data to filter.
662 filter_->FlushStreamBuffer(filtered_data_read);
663 } else {
664 return true; // EOF.
666 } else {
667 return false; // IO Pending (or error).
671 if ((filter_->stream_data_len() || filter_needs_more_output_space_) &&
672 !is_done()) {
673 // Get filtered data.
674 int filtered_data_len = filtered_read_buffer_len_;
675 int output_buffer_size = filtered_data_len;
676 Filter::FilterStatus status =
677 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len);
679 if (filter_needs_more_output_space_ && !filtered_data_len) {
680 // filter_needs_more_output_space_ was mistaken... there are no more
681 // bytes and we should have at least tried to fill up the filter's input
682 // buffer. Correct the state, and try again.
683 filter_needs_more_output_space_ = false;
684 continue;
686 filter_needs_more_output_space_ =
687 (filtered_data_len == output_buffer_size);
689 switch (status) {
690 case Filter::FILTER_DONE: {
691 filter_needs_more_output_space_ = false;
692 *bytes_read = filtered_data_len;
693 postfilter_bytes_read_ += filtered_data_len;
694 rv = true;
695 break;
697 case Filter::FILTER_NEED_MORE_DATA: {
698 // We have finished filtering all data currently in the buffer.
699 // There might be some space left in the output buffer. One can
700 // consider reading more data from the stream to feed the filter
701 // and filling up the output buffer. This leads to more complicated
702 // buffer management and data notification mechanisms.
703 // We can revisit this issue if there is a real perf need.
704 if (filtered_data_len > 0) {
705 *bytes_read = filtered_data_len;
706 postfilter_bytes_read_ += filtered_data_len;
707 rv = true;
708 } else {
709 // Read again since we haven't received enough data yet (e.g., we
710 // may not have a complete gzip header yet).
711 continue;
713 break;
715 case Filter::FILTER_OK: {
716 *bytes_read = filtered_data_len;
717 postfilter_bytes_read_ += filtered_data_len;
718 rv = true;
719 break;
721 case Filter::FILTER_ERROR: {
722 DVLOG(1) << __FUNCTION__ << "() "
723 << "\"" << (request_ ? request_->url().spec() : "???")
724 << "\"" << " Filter Error";
725 filter_needs_more_output_space_ = false;
726 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
727 ERR_CONTENT_DECODING_FAILED));
728 rv = false;
729 break;
731 default: {
732 NOTREACHED();
733 filter_needs_more_output_space_ = false;
734 rv = false;
735 break;
739 // If logging all bytes is enabled, log the filtered bytes read.
740 if (rv && request() && request()->net_log().IsLoggingBytes() &&
741 filtered_data_len > 0) {
742 request()->net_log().AddByteTransferEvent(
743 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ,
744 filtered_data_len, filtered_read_buffer_->data());
746 } else {
747 // we are done, or there is no data left.
748 rv = true;
750 break;
753 if (rv) {
754 // When we successfully finished a read, we no longer need to save the
755 // caller's buffers. Release our reference.
756 filtered_read_buffer_ = NULL;
757 filtered_read_buffer_len_ = 0;
759 return rv;
762 void URLRequestJob::DestroyFilters() {
763 filter_.reset();
766 const URLRequestStatus URLRequestJob::GetStatus() {
767 if (request_)
768 return request_->status();
769 // If the request is gone, we must be cancelled.
770 return URLRequestStatus(URLRequestStatus::CANCELED,
771 ERR_ABORTED);
774 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
775 if (request_)
776 request_->set_status(status);
779 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) {
780 request_->proxy_server_ = proxy_server;
783 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
784 bool rv = false;
786 DCHECK(bytes_read);
787 DCHECK(filter_.get());
789 *bytes_read = 0;
791 // Get more pre-filtered data if needed.
792 // TODO(mbelshe): is it possible that the filter needs *MORE* data
793 // when there is some data already in the buffer?
794 if (!filter_->stream_data_len() && !is_done()) {
795 IOBuffer* stream_buffer = filter_->stream_buffer();
796 int stream_buffer_size = filter_->stream_buffer_size();
797 rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
799 return rv;
802 bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf, int buf_size,
803 int* bytes_read) {
804 DCHECK(!request_->status().is_io_pending());
805 DCHECK(raw_read_buffer_.get() == NULL);
807 // Keep a pointer to the read buffer, so we have access to it in the
808 // OnRawReadComplete() callback in the event that the read completes
809 // asynchronously.
810 raw_read_buffer_ = buf;
811 bool rv = ReadRawData(buf, buf_size, bytes_read);
813 if (!request_->status().is_io_pending()) {
814 // If the read completes synchronously, either success or failure,
815 // invoke the OnRawReadComplete callback so we can account for the
816 // completed read.
817 OnRawReadComplete(*bytes_read);
819 return rv;
822 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) {
823 int rv = request_->Redirect(redirect_info);
824 if (rv != OK)
825 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
828 void URLRequestJob::OnRawReadComplete(int bytes_read) {
829 DCHECK(raw_read_buffer_.get());
830 // If |filter_| is non-NULL, bytes will be logged after it is applied instead.
831 if (!filter_.get() && request() && request()->net_log().IsLoggingBytes() &&
832 bytes_read > 0) {
833 request()->net_log().AddByteTransferEvent(
834 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ,
835 bytes_read, raw_read_buffer_->data());
838 if (bytes_read > 0) {
839 RecordBytesRead(bytes_read);
841 raw_read_buffer_ = NULL;
844 void URLRequestJob::RecordBytesRead(int bytes_read) {
845 filter_input_byte_count_ += bytes_read;
846 prefilter_bytes_read_ += bytes_read;
847 if (!filter_.get())
848 postfilter_bytes_read_ += bytes_read;
849 DVLOG(2) << __FUNCTION__ << "() "
850 << "\"" << (request_ ? request_->url().spec() : "???") << "\""
851 << " pre bytes read = " << bytes_read
852 << " pre total = " << prefilter_bytes_read_
853 << " post total = " << postfilter_bytes_read_;
854 UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
855 if (network_delegate_)
856 network_delegate_->NotifyRawBytesRead(*request_, bytes_read);
859 bool URLRequestJob::FilterHasData() {
860 return filter_.get() && filter_->stream_data_len();
863 void URLRequestJob::UpdatePacketReadTimes() {
866 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location,
867 int http_status_code) {
868 const GURL& url = request_->url();
870 RedirectInfo redirect_info;
872 redirect_info.status_code = http_status_code;
874 // The request method may change, depending on the status code.
875 redirect_info.new_method = URLRequest::ComputeMethodForRedirect(
876 request_->method(), http_status_code);
878 // Move the reference fragment of the old location to the new one if the
879 // new one has none. This duplicates mozilla's behavior.
880 if (url.is_valid() && url.has_ref() && !location.has_ref() &&
881 CopyFragmentOnRedirect(location)) {
882 GURL::Replacements replacements;
883 // Reference the |ref| directly out of the original URL to avoid a
884 // malloc.
885 replacements.SetRef(url.spec().data(),
886 url.parsed_for_possibly_invalid_spec().ref);
887 redirect_info.new_url = location.ReplaceComponents(replacements);
888 } else {
889 redirect_info.new_url = location;
892 // Update the first-party URL if appropriate.
893 if (request_->first_party_url_policy() ==
894 URLRequest::UPDATE_FIRST_PARTY_URL_ON_REDIRECT) {
895 redirect_info.new_first_party_for_cookies = redirect_info.new_url;
896 } else {
897 redirect_info.new_first_party_for_cookies =
898 request_->first_party_for_cookies();
901 // Alter the referrer if redirecting cross-origin (especially HTTP->HTTPS).
902 redirect_info.new_referrer =
903 ComputeReferrerForRedirect(request_->referrer_policy(),
904 request_->referrer(),
905 redirect_info.new_url).spec();
907 return redirect_info;
910 } // namespace net