Redirect back to signin page if we complete signin but don't have password.
[chromium-blink-merge.git] / net / url_request / url_request_http_job.cc
blobb26812dfa397a0da4d29e81c727729fb94ec4c08
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/url_request/url_request_http_job.h"
7 #include "base/base_switches.h"
8 #include "base/bind.h"
9 #include "base/bind_helpers.h"
10 #include "base/command_line.h"
11 #include "base/compiler_specific.h"
12 #include "base/file_util.h"
13 #include "base/file_version_info.h"
14 #include "base/message_loop.h"
15 #include "base/metrics/field_trial.h"
16 #include "base/metrics/histogram.h"
17 #include "base/rand_util.h"
18 #include "base/string_util.h"
19 #include "base/time.h"
20 #include "net/base/cert_status_flags.h"
21 #include "net/base/filter.h"
22 #include "net/base/host_port_pair.h"
23 #include "net/base/load_flags.h"
24 #include "net/base/mime_util.h"
25 #include "net/base/net_errors.h"
26 #include "net/base/net_util.h"
27 #include "net/base/network_delegate.h"
28 #include "net/base/sdch_manager.h"
29 #include "net/base/ssl_cert_request_info.h"
30 #include "net/base/ssl_config_service.h"
31 #include "net/cookies/cookie_monster.h"
32 #include "net/http/http_network_session.h"
33 #include "net/http/http_request_headers.h"
34 #include "net/http/http_response_headers.h"
35 #include "net/http/http_response_info.h"
36 #include "net/http/http_status_code.h"
37 #include "net/http/http_transaction.h"
38 #include "net/http/http_transaction_delegate.h"
39 #include "net/http/http_transaction_factory.h"
40 #include "net/http/http_util.h"
41 #include "net/url_request/fraudulent_certificate_reporter.h"
42 #include "net/url_request/http_user_agent_settings.h"
43 #include "net/url_request/url_request.h"
44 #include "net/url_request/url_request_context.h"
45 #include "net/url_request/url_request_error_job.h"
46 #include "net/url_request/url_request_redirect_job.h"
47 #include "net/url_request/url_request_throttler_header_adapter.h"
48 #include "net/url_request/url_request_throttler_manager.h"
50 static const char kAvailDictionaryHeader[] = "Avail-Dictionary";
52 namespace net {
54 class URLRequestHttpJob::HttpFilterContext : public FilterContext {
55 public:
56 explicit HttpFilterContext(URLRequestHttpJob* job);
57 virtual ~HttpFilterContext();
59 // FilterContext implementation.
60 virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
61 virtual bool GetURL(GURL* gurl) const OVERRIDE;
62 virtual base::Time GetRequestTime() const OVERRIDE;
63 virtual bool IsCachedContent() const OVERRIDE;
64 virtual bool IsDownload() const OVERRIDE;
65 virtual bool IsSdchResponse() const OVERRIDE;
66 virtual int64 GetByteReadCount() const OVERRIDE;
67 virtual int GetResponseCode() const OVERRIDE;
68 virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE;
70 // Method to allow us to reset filter context for a response that should have
71 // been SDCH encoded when there is an update due to an explicit HTTP header.
72 void ResetSdchResponseToFalse();
74 private:
75 URLRequestHttpJob* job_;
77 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext);
80 class URLRequestHttpJob::HttpTransactionDelegateImpl
81 : public HttpTransactionDelegate {
82 public:
83 explicit HttpTransactionDelegateImpl(URLRequest* request)
84 : request_(request),
85 network_delegate_(request->context()->network_delegate()),
86 cache_active_(false),
87 network_active_(false) {
89 virtual ~HttpTransactionDelegateImpl() {
90 OnDetachRequest();
92 void OnDetachRequest() {
93 if (request_ == NULL || network_delegate_ == NULL)
94 return;
95 network_delegate_->NotifyRequestWaitStateChange(
96 *request_,
97 NetworkDelegate::REQUEST_WAIT_STATE_RESET);
98 cache_active_ = false;
99 network_active_ = false;
100 request_ = NULL;
102 virtual void OnCacheActionStart() OVERRIDE {
103 if (request_ == NULL || network_delegate_ == NULL)
104 return;
105 DCHECK(!cache_active_ && !network_active_);
106 cache_active_ = true;
107 network_delegate_->NotifyRequestWaitStateChange(
108 *request_,
109 NetworkDelegate::REQUEST_WAIT_STATE_CACHE_START);
111 virtual void OnCacheActionFinish() OVERRIDE {
112 if (request_ == NULL || network_delegate_ == NULL)
113 return;
114 DCHECK(cache_active_ && !network_active_);
115 cache_active_ = false;
116 network_delegate_->NotifyRequestWaitStateChange(
117 *request_,
118 NetworkDelegate::REQUEST_WAIT_STATE_CACHE_FINISH);
120 virtual void OnNetworkActionStart() OVERRIDE {
121 if (request_ == NULL || network_delegate_ == NULL)
122 return;
123 DCHECK(!cache_active_ && !network_active_);
124 network_active_ = true;
125 network_delegate_->NotifyRequestWaitStateChange(
126 *request_,
127 NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_START);
129 virtual void OnNetworkActionFinish() OVERRIDE {
130 if (request_ == NULL || network_delegate_ == NULL)
131 return;
132 DCHECK(!cache_active_ && network_active_);
133 network_active_ = false;
134 network_delegate_->NotifyRequestWaitStateChange(
135 *request_,
136 NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_FINISH);
138 private:
139 URLRequest* request_;
140 NetworkDelegate* network_delegate_;
141 bool cache_active_;
142 bool network_active_;
145 URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job)
146 : job_(job) {
147 DCHECK(job_);
150 URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() {
153 bool URLRequestHttpJob::HttpFilterContext::GetMimeType(
154 std::string* mime_type) const {
155 return job_->GetMimeType(mime_type);
158 bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const {
159 if (!job_->request())
160 return false;
161 *gurl = job_->request()->url();
162 return true;
165 base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const {
166 return job_->request() ? job_->request()->request_time() : base::Time();
169 bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const {
170 return job_->is_cached_content_;
173 bool URLRequestHttpJob::HttpFilterContext::IsDownload() const {
174 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0;
177 void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() {
178 DCHECK(job_->sdch_dictionary_advertised_);
179 job_->sdch_dictionary_advertised_ = false;
182 bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const {
183 return job_->sdch_dictionary_advertised_;
186 int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const {
187 return job_->filter_input_byte_count();
190 int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const {
191 return job_->GetResponseCode();
194 void URLRequestHttpJob::HttpFilterContext::RecordPacketStats(
195 StatisticSelector statistic) const {
196 job_->RecordPacketStats(statistic);
199 // TODO(darin): make sure the port blocking code is not lost
200 // static
201 URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request,
202 NetworkDelegate* network_delegate,
203 const std::string& scheme) {
204 DCHECK(scheme == "http" || scheme == "https");
206 if (!request->context()->http_transaction_factory()) {
207 NOTREACHED() << "requires a valid context";
208 return new URLRequestErrorJob(
209 request, network_delegate, ERR_INVALID_ARGUMENT);
212 GURL redirect_url;
213 if (request->GetHSTSRedirect(&redirect_url)) {
214 return new URLRequestRedirectJob(
215 request, network_delegate, redirect_url,
216 // Use status code 307 to preserve the method, so POST requests work.
217 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT);
219 return new URLRequestHttpJob(request,
220 network_delegate,
221 request->context()->http_user_agent_settings());
225 URLRequestHttpJob::URLRequestHttpJob(
226 URLRequest* request,
227 NetworkDelegate* network_delegate,
228 const HttpUserAgentSettings* http_user_agent_settings)
229 : URLRequestJob(request, network_delegate),
230 response_info_(NULL),
231 response_cookies_save_index_(0),
232 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
233 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
234 ALLOW_THIS_IN_INITIALIZER_LIST(start_callback_(
235 base::Bind(&URLRequestHttpJob::OnStartCompleted,
236 base::Unretained(this)))),
237 ALLOW_THIS_IN_INITIALIZER_LIST(notify_before_headers_sent_callback_(
238 base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback,
239 base::Unretained(this)))),
240 read_in_progress_(false),
241 transaction_(NULL),
242 throttling_entry_(NULL),
243 sdch_dictionary_advertised_(false),
244 sdch_test_activated_(false),
245 sdch_test_control_(false),
246 is_cached_content_(false),
247 request_creation_time_(),
248 packet_timing_enabled_(false),
249 done_(false),
250 bytes_observed_in_packets_(0),
251 request_time_snapshot_(),
252 final_packet_time_(),
253 ALLOW_THIS_IN_INITIALIZER_LIST(
254 filter_context_(new HttpFilterContext(this))),
255 ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)),
256 ALLOW_THIS_IN_INITIALIZER_LIST(on_headers_received_callback_(
257 base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback,
258 base::Unretained(this)))),
259 awaiting_callback_(false),
260 http_transaction_delegate_(new HttpTransactionDelegateImpl(request)),
261 http_user_agent_settings_(http_user_agent_settings) {
262 URLRequestThrottlerManager* manager = request->context()->throttler_manager();
263 if (manager)
264 throttling_entry_ = manager->RegisterRequestUrl(request->url());
266 ResetTimer();
269 void URLRequestHttpJob::NotifyHeadersComplete() {
270 DCHECK(!response_info_);
272 response_info_ = transaction_->GetResponseInfo();
274 // Save boolean, as we'll need this info at destruction time, and filters may
275 // also need this info.
276 is_cached_content_ = response_info_->was_cached;
278 if (!is_cached_content_ && throttling_entry_) {
279 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders());
280 throttling_entry_->UpdateWithResponse(request_info_.url.host(),
281 &response_adapter);
284 // The ordering of these calls is not important.
285 ProcessStrictTransportSecurityHeader();
286 ProcessPublicKeyPinsHeader();
288 if (SdchManager::Global() &&
289 SdchManager::Global()->IsInSupportedDomain(request_->url())) {
290 const std::string name = "Get-Dictionary";
291 std::string url_text;
292 void* iter = NULL;
293 // TODO(jar): We need to not fetch dictionaries the first time they are
294 // seen, but rather wait until we can justify their usefulness.
295 // For now, we will only fetch the first dictionary, which will at least
296 // require multiple suggestions before we get additional ones for this site.
297 // Eventually we should wait until a dictionary is requested several times
298 // before we even download it (so that we don't waste memory or bandwidth).
299 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) {
300 // request_->url() won't be valid in the destructor, so we use an
301 // alternate copy.
302 DCHECK_EQ(request_->url(), request_info_.url);
303 // Resolve suggested URL relative to request url.
304 sdch_dictionary_url_ = request_info_.url.Resolve(url_text);
308 // The HTTP transaction may be restarted several times for the purposes
309 // of sending authorization information. Each time it restarts, we get
310 // notified of the headers completion so that we can update the cookie store.
311 if (transaction_->IsReadyToRestartForAuth()) {
312 DCHECK(!response_info_->auth_challenge.get());
313 // TODO(battre): This breaks the webrequest API for
314 // URLRequestTestHTTP.BasicAuthWithCookies
315 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders
316 // occurs.
317 RestartTransactionWithAuth(AuthCredentials());
318 return;
321 URLRequestJob::NotifyHeadersComplete();
324 void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) {
325 DoneWithRequest(FINISHED);
326 URLRequestJob::NotifyDone(status);
329 void URLRequestHttpJob::DestroyTransaction() {
330 DCHECK(transaction_.get());
332 DoneWithRequest(ABORTED);
333 transaction_.reset();
334 response_info_ = NULL;
337 void URLRequestHttpJob::StartTransaction() {
338 if (request_->context()->network_delegate()) {
339 int rv = request_->context()->network_delegate()->NotifyBeforeSendHeaders(
340 request_, notify_before_headers_sent_callback_,
341 &request_info_.extra_headers);
342 // If an extension blocks the request, we rely on the callback to
343 // MaybeStartTransactionInternal().
344 if (rv == ERR_IO_PENDING) {
345 SetBlockedOnDelegate();
346 return;
348 MaybeStartTransactionInternal(rv);
349 return;
351 StartTransactionInternal();
354 void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) {
355 SetUnblockedOnDelegate();
357 // Check that there are no callbacks to already canceled requests.
358 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
360 MaybeStartTransactionInternal(result);
363 void URLRequestHttpJob::MaybeStartTransactionInternal(int result) {
364 if (result == OK) {
365 StartTransactionInternal();
366 } else {
367 std::string source("delegate");
368 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
369 NetLog::StringCallback("source", &source));
370 NotifyCanceled();
371 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
375 void URLRequestHttpJob::StartTransactionInternal() {
376 // NOTE: This method assumes that request_info_ is already setup properly.
378 // If we already have a transaction, then we should restart the transaction
379 // with auth provided by auth_credentials_.
381 int rv;
383 if (request_->context()->network_delegate()) {
384 request_->context()->network_delegate()->NotifySendHeaders(
385 request_, request_info_.extra_headers);
388 if (transaction_.get()) {
389 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_);
390 auth_credentials_ = AuthCredentials();
391 } else {
392 DCHECK(request_->context()->http_transaction_factory());
394 rv = request_->context()->http_transaction_factory()->CreateTransaction(
395 &transaction_, http_transaction_delegate_.get());
396 if (rv == OK) {
397 if (!throttling_entry_ ||
398 !throttling_entry_->ShouldRejectRequest(*request_)) {
399 rv = transaction_->Start(
400 &request_info_, start_callback_, request_->net_log());
401 start_time_ = base::TimeTicks::Now();
402 } else {
403 // Special error code for the exponential back-off module.
404 rv = ERR_TEMPORARILY_THROTTLED;
409 if (rv == ERR_IO_PENDING)
410 return;
412 // The transaction started synchronously, but we need to notify the
413 // URLRequest delegate via the message loop.
414 MessageLoop::current()->PostTask(
415 FROM_HERE,
416 base::Bind(&URLRequestHttpJob::OnStartCompleted,
417 weak_factory_.GetWeakPtr(), rv));
420 void URLRequestHttpJob::AddExtraHeaders() {
421 // Supply Accept-Encoding field only if it is not already provided.
422 // It should be provided IF the content is known to have restrictions on
423 // potential encoding, such as streaming multi-media.
424 // For details see bug 47381.
425 // TODO(jar, enal): jpeg files etc. should set up a request header if
426 // possible. Right now it is done only by buffered_resource_loader and
427 // simple_data_source.
428 if (!request_info_.extra_headers.HasHeader(
429 HttpRequestHeaders::kAcceptEncoding)) {
430 bool advertise_sdch = SdchManager::Global() &&
431 SdchManager::Global()->IsInSupportedDomain(request_->url());
432 std::string avail_dictionaries;
433 if (advertise_sdch) {
434 SdchManager::Global()->GetAvailDictionaryList(request_->url(),
435 &avail_dictionaries);
437 // The AllowLatencyExperiment() is only true if we've successfully done a
438 // full SDCH compression recently in this browser session for this host.
439 // Note that for this path, there might be no applicable dictionaries,
440 // and hence we can't participate in the experiment.
441 if (!avail_dictionaries.empty() &&
442 SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
443 // We are participating in the test (or control), and hence we'll
444 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
445 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
446 packet_timing_enabled_ = true;
447 if (base::RandDouble() < .01) {
448 sdch_test_control_ = true; // 1% probability.
449 advertise_sdch = false;
450 } else {
451 sdch_test_activated_ = true;
456 // Supply Accept-Encoding headers first so that it is more likely that they
457 // will be in the first transmitted packet. This can sometimes make it
458 // easier to filter and analyze the streams to assure that a proxy has not
459 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding
460 // headers.
461 if (!advertise_sdch) {
462 // Tell the server what compression formats we support (other than SDCH).
463 request_info_.extra_headers.SetHeader(
464 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate");
465 } else {
466 // Include SDCH in acceptable list.
467 request_info_.extra_headers.SetHeader(
468 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch");
469 if (!avail_dictionaries.empty()) {
470 request_info_.extra_headers.SetHeader(
471 kAvailDictionaryHeader,
472 avail_dictionaries);
473 sdch_dictionary_advertised_ = true;
474 // Since we're tagging this transaction as advertising a dictionary,
475 // we'll definitely employ an SDCH filter (or tentative sdch filter)
476 // when we get a response. When done, we'll record histograms via
477 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet
478 // arrival times.
479 packet_timing_enabled_ = true;
484 if (http_user_agent_settings_) {
485 // Only add default Accept-Language and Accept-Charset if the request
486 // didn't have them specified.
487 std::string accept_language =
488 http_user_agent_settings_->GetAcceptLanguage();
489 if (!accept_language.empty()) {
490 request_info_.extra_headers.SetHeaderIfMissing(
491 HttpRequestHeaders::kAcceptLanguage,
492 accept_language);
494 std::string accept_charset = http_user_agent_settings_->GetAcceptCharset();
495 if (!accept_charset.empty()) {
496 request_info_.extra_headers.SetHeaderIfMissing(
497 HttpRequestHeaders::kAcceptCharset,
498 accept_charset);
503 void URLRequestHttpJob::AddCookieHeaderAndStart() {
504 // No matter what, we want to report our status as IO pending since we will
505 // be notifying our consumer asynchronously via OnStartCompleted.
506 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
508 // If the request was destroyed, then there is no more work to do.
509 if (!request_)
510 return;
512 CookieStore* cookie_store = request_->context()->cookie_store();
513 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) {
514 net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster();
515 if (cookie_monster) {
516 cookie_monster->GetAllCookiesForURLAsync(
517 request_->url(),
518 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad,
519 weak_factory_.GetWeakPtr()));
520 } else {
521 CheckCookiePolicyAndLoad(CookieList());
523 } else {
524 DoStartTransaction();
528 void URLRequestHttpJob::DoLoadCookies() {
529 CookieOptions options;
530 options.set_include_httponly();
531 request_->context()->cookie_store()->GetCookiesWithInfoAsync(
532 request_->url(), options,
533 base::Bind(&URLRequestHttpJob::OnCookiesLoaded,
534 weak_factory_.GetWeakPtr()));
537 void URLRequestHttpJob::CheckCookiePolicyAndLoad(
538 const CookieList& cookie_list) {
539 if (CanGetCookies(cookie_list))
540 DoLoadCookies();
541 else
542 DoStartTransaction();
545 void URLRequestHttpJob::OnCookiesLoaded(
546 const std::string& cookie_line,
547 const std::vector<net::CookieStore::CookieInfo>& cookie_infos) {
548 if (!cookie_line.empty()) {
549 request_info_.extra_headers.SetHeader(
550 HttpRequestHeaders::kCookie, cookie_line);
552 DoStartTransaction();
555 void URLRequestHttpJob::DoStartTransaction() {
556 // We may have been canceled while retrieving cookies.
557 if (GetStatus().is_success()) {
558 StartTransaction();
559 } else {
560 NotifyCanceled();
564 void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) {
565 if (result != net::OK) {
566 std::string source("delegate");
567 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
568 NetLog::StringCallback("source", &source));
569 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
570 return;
573 DCHECK(transaction_.get());
575 const HttpResponseInfo* response_info = transaction_->GetResponseInfo();
576 DCHECK(response_info);
578 response_cookies_.clear();
579 response_cookies_save_index_ = 0;
581 FetchResponseCookies(&response_cookies_);
583 if (!GetResponseHeaders()->GetDateValue(&response_date_))
584 response_date_ = base::Time();
586 // Now, loop over the response cookies, and attempt to persist each.
587 SaveNextCookie();
590 // If the save occurs synchronously, SaveNextCookie will loop and save the next
591 // cookie. If the save is deferred, the callback is responsible for continuing
592 // to iterate through the cookies.
593 // TODO(erikwright): Modify the CookieStore API to indicate via return value
594 // whether it completed synchronously or asynchronously.
595 // See http://crbug.com/131066.
596 void URLRequestHttpJob::SaveNextCookie() {
597 // No matter what, we want to report our status as IO pending since we will
598 // be notifying our consumer asynchronously via OnStartCompleted.
599 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
601 // Used to communicate with the callback. See the implementation of
602 // OnCookieSaved.
603 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false);
604 scoped_refptr<SharedBoolean> save_next_cookie_running =
605 new SharedBoolean(true);
607 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) &&
608 request_->context()->cookie_store() &&
609 response_cookies_.size() > 0) {
610 CookieOptions options;
611 options.set_include_httponly();
612 options.set_server_time(response_date_);
614 net::CookieStore::SetCookiesCallback callback(
615 base::Bind(&URLRequestHttpJob::OnCookieSaved,
616 weak_factory_.GetWeakPtr(),
617 save_next_cookie_running,
618 callback_pending));
620 // Loop through the cookies as long as SetCookieWithOptionsAsync completes
621 // synchronously.
622 while (!callback_pending->data &&
623 response_cookies_save_index_ < response_cookies_.size()) {
624 if (CanSetCookie(
625 response_cookies_[response_cookies_save_index_], &options)) {
626 callback_pending->data = true;
627 request_->context()->cookie_store()->SetCookieWithOptionsAsync(
628 request_->url(), response_cookies_[response_cookies_save_index_],
629 options, callback);
631 ++response_cookies_save_index_;
635 save_next_cookie_running->data = false;
637 if (!callback_pending->data) {
638 response_cookies_.clear();
639 response_cookies_save_index_ = 0;
640 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status
641 NotifyHeadersComplete();
642 return;
646 // |save_next_cookie_running| is true when the callback is bound and set to
647 // false when SaveNextCookie exits, allowing the callback to determine if the
648 // save occurred synchronously or asynchronously.
649 // |callback_pending| is false when the callback is invoked and will be set to
650 // true by the callback, allowing SaveNextCookie to detect whether the save
651 // occurred synchronously.
652 // See SaveNextCookie() for more information.
653 void URLRequestHttpJob::OnCookieSaved(
654 scoped_refptr<SharedBoolean> save_next_cookie_running,
655 scoped_refptr<SharedBoolean> callback_pending,
656 bool cookie_status) {
657 callback_pending->data = false;
659 // If we were called synchronously, return.
660 if (save_next_cookie_running->data) {
661 return;
664 // We were called asynchronously, so trigger the next save.
665 // We may have been canceled within OnSetCookie.
666 if (GetStatus().is_success()) {
667 SaveNextCookie();
668 } else {
669 NotifyCanceled();
673 void URLRequestHttpJob::FetchResponseCookies(
674 std::vector<std::string>* cookies) {
675 const std::string name = "Set-Cookie";
676 std::string value;
678 void* iter = NULL;
679 HttpResponseHeaders* headers = GetResponseHeaders();
680 while (headers->EnumerateHeader(&iter, name, &value)) {
681 if (!value.empty())
682 cookies->push_back(value);
686 // NOTE: |ProcessStrictTransportSecurityHeader| and
687 // |ProcessPublicKeyPinsHeader| have very similar structures, by design.
688 void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() {
689 DCHECK(response_info_);
690 TransportSecurityState* security_state =
691 request_->context()->transport_security_state();
692 const SSLInfo& ssl_info = response_info_->ssl_info;
694 // Only accept HSTS headers on HTTPS connections that have no
695 // certificate errors.
696 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
697 !security_state)
698 return;
700 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec:
702 // If a UA receives more than one STS header field in a HTTP response
703 // message over secure transport, then the UA MUST process only the
704 // first such header field.
705 HttpResponseHeaders* headers = GetResponseHeaders();
706 std::string value;
707 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value))
708 security_state->AddHSTSHeader(request_info_.url.host(), value);
711 void URLRequestHttpJob::ProcessPublicKeyPinsHeader() {
712 DCHECK(response_info_);
713 TransportSecurityState* security_state =
714 request_->context()->transport_security_state();
715 const SSLInfo& ssl_info = response_info_->ssl_info;
717 // Only accept HPKP headers on HTTPS connections that have no
718 // certificate errors.
719 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
720 !security_state)
721 return;
723 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning:
725 // If a UA receives more than one PKP header field in an HTTP
726 // response message over secure transport, then the UA MUST process
727 // only the first such header field.
728 HttpResponseHeaders* headers = GetResponseHeaders();
729 std::string value;
730 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value))
731 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info);
734 void URLRequestHttpJob::OnStartCompleted(int result) {
735 RecordTimer();
737 // If the request was destroyed, then there is no more work to do.
738 if (!request_)
739 return;
741 // If the transaction was destroyed, then the job was cancelled, and
742 // we can just ignore this notification.
743 if (!transaction_.get())
744 return;
746 // Clear the IO_PENDING status
747 SetStatus(URLRequestStatus());
749 const URLRequestContext* context = request_->context();
751 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN &&
752 transaction_->GetResponseInfo() != NULL) {
753 FraudulentCertificateReporter* reporter =
754 context->fraudulent_certificate_reporter();
755 if (reporter != NULL) {
756 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info;
757 bool sni_available = SSLConfigService::IsSNIAvailable(
758 context->ssl_config_service());
759 const std::string& host = request_->url().host();
761 reporter->SendReport(host, ssl_info, sni_available);
765 if (result == OK) {
766 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders();
767 if (context->network_delegate()) {
768 // Note that |this| may not be deleted until
769 // |on_headers_received_callback_| or
770 // |NetworkDelegate::URLRequestDestroyed()| has been called.
771 int error = context->network_delegate()->
772 NotifyHeadersReceived(request_, on_headers_received_callback_,
773 headers, &override_response_headers_);
774 if (error != net::OK) {
775 if (error == net::ERR_IO_PENDING) {
776 awaiting_callback_ = true;
777 SetBlockedOnDelegate();
778 } else {
779 std::string source("delegate");
780 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
781 NetLog::StringCallback("source",
782 &source));
783 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error));
785 return;
789 SaveCookiesAndNotifyHeadersComplete(net::OK);
790 } else if (IsCertificateError(result)) {
791 // We encountered an SSL certificate error. Ask our delegate to decide
792 // what we should do.
794 TransportSecurityState::DomainState domain_state;
795 const URLRequestContext* context = request_->context();
796 const bool fatal = context->transport_security_state() &&
797 context->transport_security_state()->GetDomainState(
798 request_info_.url.host(),
799 SSLConfigService::IsSNIAvailable(context->ssl_config_service()),
800 &domain_state) &&
801 domain_state.ShouldSSLErrorsBeFatal();
802 NotifySSLCertificateError(transaction_->GetResponseInfo()->ssl_info, fatal);
803 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) {
804 NotifyCertificateRequested(
805 transaction_->GetResponseInfo()->cert_request_info);
806 } else {
807 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
811 void URLRequestHttpJob::OnHeadersReceivedCallback(int result) {
812 SetUnblockedOnDelegate();
813 awaiting_callback_ = false;
815 // Check that there are no callbacks to already canceled requests.
816 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
818 SaveCookiesAndNotifyHeadersComplete(result);
821 void URLRequestHttpJob::OnReadCompleted(int result) {
822 read_in_progress_ = false;
824 if (ShouldFixMismatchedContentLength(result))
825 result = OK;
827 if (result == OK) {
828 NotifyDone(URLRequestStatus());
829 } else if (result < 0) {
830 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
831 } else {
832 // Clear the IO_PENDING status
833 SetStatus(URLRequestStatus());
836 NotifyReadComplete(result);
839 void URLRequestHttpJob::RestartTransactionWithAuth(
840 const AuthCredentials& credentials) {
841 auth_credentials_ = credentials;
843 // These will be reset in OnStartCompleted.
844 response_info_ = NULL;
845 response_cookies_.clear();
847 ResetTimer();
849 // Update the cookies, since the cookie store may have been updated from the
850 // headers in the 401/407. Since cookies were already appended to
851 // extra_headers, we need to strip them out before adding them again.
852 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie);
854 AddCookieHeaderAndStart();
857 void URLRequestHttpJob::SetUpload(UploadDataStream* upload) {
858 DCHECK(!transaction_.get()) << "cannot change once started";
859 request_info_.upload_data_stream = upload;
862 void URLRequestHttpJob::SetExtraRequestHeaders(
863 const HttpRequestHeaders& headers) {
864 DCHECK(!transaction_.get()) << "cannot change once started";
865 request_info_.extra_headers.CopyFrom(headers);
868 void URLRequestHttpJob::Start() {
869 DCHECK(!transaction_.get());
871 // Ensure that we do not send username and password fields in the referrer.
872 GURL referrer(request_->GetSanitizedReferrer());
874 request_info_.url = request_->url();
875 request_info_.method = request_->method();
876 request_info_.load_flags = request_->load_flags();
877 request_info_.priority = request_->priority();
878 request_info_.request_id = request_->identifier();
880 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins
881 // from overriding headers that are controlled using other means. Otherwise a
882 // plugin could set a referrer although sending the referrer is inhibited.
883 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer);
885 // Our consumer should have made sure that this is a safe referrer. See for
886 // instance WebCore::FrameLoader::HideReferrer.
887 if (referrer.is_valid()) {
888 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer,
889 referrer.spec());
892 request_info_.extra_headers.SetHeaderIfMissing(
893 HttpRequestHeaders::kUserAgent,
894 http_user_agent_settings_ ?
895 http_user_agent_settings_->GetUserAgent(request_->url()) :
896 EmptyString());
898 AddExtraHeaders();
899 AddCookieHeaderAndStart();
902 void URLRequestHttpJob::Kill() {
903 http_transaction_delegate_->OnDetachRequest();
905 if (!transaction_.get())
906 return;
908 weak_factory_.InvalidateWeakPtrs();
909 DestroyTransaction();
910 URLRequestJob::Kill();
913 LoadState URLRequestHttpJob::GetLoadState() const {
914 return transaction_.get() ?
915 transaction_->GetLoadState() : LOAD_STATE_IDLE;
918 UploadProgress URLRequestHttpJob::GetUploadProgress() const {
919 return transaction_.get() ?
920 transaction_->GetUploadProgress() : UploadProgress();
923 bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
924 DCHECK(transaction_.get());
926 if (!response_info_)
927 return false;
929 return GetResponseHeaders()->GetMimeType(mime_type);
932 bool URLRequestHttpJob::GetCharset(std::string* charset) {
933 DCHECK(transaction_.get());
935 if (!response_info_)
936 return false;
938 return GetResponseHeaders()->GetCharset(charset);
941 void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) {
942 DCHECK(request_);
943 DCHECK(transaction_.get());
945 if (response_info_) {
946 *info = *response_info_;
947 if (override_response_headers_)
948 info->headers = override_response_headers_;
952 void URLRequestHttpJob::GetLoadTimingInfo(
953 LoadTimingInfo* load_timing_info) const {
954 if (transaction_)
955 transaction_->GetLoadTimingInfo(load_timing_info);
958 bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) {
959 DCHECK(transaction_.get());
961 if (!response_info_)
962 return false;
964 // TODO(darin): Why are we extracting response cookies again? Perhaps we
965 // should just leverage response_cookies_.
967 cookies->clear();
968 FetchResponseCookies(cookies);
969 return true;
972 int URLRequestHttpJob::GetResponseCode() const {
973 DCHECK(transaction_.get());
975 if (!response_info_)
976 return -1;
978 return GetResponseHeaders()->response_code();
981 Filter* URLRequestHttpJob::SetupFilter() const {
982 DCHECK(transaction_.get());
983 if (!response_info_)
984 return NULL;
986 std::vector<Filter::FilterType> encoding_types;
987 std::string encoding_type;
988 HttpResponseHeaders* headers = GetResponseHeaders();
989 void* iter = NULL;
990 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) {
991 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type));
994 if (filter_context_->IsSdchResponse()) {
995 // We are wary of proxies that discard or damage SDCH encoding. If a server
996 // explicitly states that this is not SDCH content, then we can correct our
997 // assumption that this is an SDCH response, and avoid the need to recover
998 // as though the content is corrupted (when we discover it is not SDCH
999 // encoded).
1000 std::string sdch_response_status;
1001 iter = NULL;
1002 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode",
1003 &sdch_response_status)) {
1004 if (sdch_response_status == "0") {
1005 filter_context_->ResetSdchResponseToFalse();
1006 break;
1011 // Even if encoding types are empty, there is a chance that we need to add
1012 // some decoding, as some proxies strip encoding completely. In such cases,
1013 // we may need to add (for example) SDCH filtering (when the context suggests
1014 // it is appropriate).
1015 Filter::FixupEncodingTypes(*filter_context_, &encoding_types);
1017 return !encoding_types.empty()
1018 ? Filter::Factory(encoding_types, *filter_context_) : NULL;
1021 bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) {
1022 // We only allow redirects to certain "safe" protocols. This does not
1023 // restrict redirects to externally handled protocols. Our consumer would
1024 // need to take care of those.
1026 if (!URLRequest::IsHandledURL(location))
1027 return true;
1029 static const char* kSafeSchemes[] = {
1030 "http",
1031 "https",
1032 "ftp"
1035 for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) {
1036 if (location.SchemeIs(kSafeSchemes[i]))
1037 return true;
1040 return false;
1043 bool URLRequestHttpJob::NeedsAuth() {
1044 int code = GetResponseCode();
1045 if (code == -1)
1046 return false;
1048 // Check if we need either Proxy or WWW Authentication. This could happen
1049 // because we either provided no auth info, or provided incorrect info.
1050 switch (code) {
1051 case 407:
1052 if (proxy_auth_state_ == AUTH_STATE_CANCELED)
1053 return false;
1054 proxy_auth_state_ = AUTH_STATE_NEED_AUTH;
1055 return true;
1056 case 401:
1057 if (server_auth_state_ == AUTH_STATE_CANCELED)
1058 return false;
1059 server_auth_state_ = AUTH_STATE_NEED_AUTH;
1060 return true;
1062 return false;
1065 void URLRequestHttpJob::GetAuthChallengeInfo(
1066 scoped_refptr<AuthChallengeInfo>* result) {
1067 DCHECK(transaction_.get());
1068 DCHECK(response_info_);
1070 // sanity checks:
1071 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
1072 server_auth_state_ == AUTH_STATE_NEED_AUTH);
1073 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) ||
1074 (GetResponseHeaders()->response_code() ==
1075 HTTP_PROXY_AUTHENTICATION_REQUIRED));
1077 *result = response_info_->auth_challenge;
1080 void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) {
1081 DCHECK(transaction_.get());
1083 // Proxy gets set first, then WWW.
1084 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
1085 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH;
1086 } else {
1087 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
1088 server_auth_state_ = AUTH_STATE_HAVE_AUTH;
1091 RestartTransactionWithAuth(credentials);
1094 void URLRequestHttpJob::CancelAuth() {
1095 // Proxy gets set first, then WWW.
1096 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
1097 proxy_auth_state_ = AUTH_STATE_CANCELED;
1098 } else {
1099 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
1100 server_auth_state_ = AUTH_STATE_CANCELED;
1103 // These will be reset in OnStartCompleted.
1104 response_info_ = NULL;
1105 response_cookies_.clear();
1107 ResetTimer();
1109 // OK, let the consumer read the error page...
1111 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
1112 // which will cause the consumer to receive OnResponseStarted instead of
1113 // OnAuthRequired.
1115 // We have to do this via InvokeLater to avoid "recursing" the consumer.
1117 MessageLoop::current()->PostTask(
1118 FROM_HERE,
1119 base::Bind(&URLRequestHttpJob::OnStartCompleted,
1120 weak_factory_.GetWeakPtr(), OK));
1123 void URLRequestHttpJob::ContinueWithCertificate(
1124 X509Certificate* client_cert) {
1125 DCHECK(transaction_.get());
1127 DCHECK(!response_info_) << "should not have a response yet";
1129 ResetTimer();
1131 // No matter what, we want to report our status as IO pending since we will
1132 // be notifying our consumer asynchronously via OnStartCompleted.
1133 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1135 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_);
1136 if (rv == ERR_IO_PENDING)
1137 return;
1139 // The transaction started synchronously, but we need to notify the
1140 // URLRequest delegate via the message loop.
1141 MessageLoop::current()->PostTask(
1142 FROM_HERE,
1143 base::Bind(&URLRequestHttpJob::OnStartCompleted,
1144 weak_factory_.GetWeakPtr(), rv));
1147 void URLRequestHttpJob::ContinueDespiteLastError() {
1148 // If the transaction was destroyed, then the job was cancelled.
1149 if (!transaction_.get())
1150 return;
1152 DCHECK(!response_info_) << "should not have a response yet";
1154 ResetTimer();
1156 // No matter what, we want to report our status as IO pending since we will
1157 // be notifying our consumer asynchronously via OnStartCompleted.
1158 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1160 int rv = transaction_->RestartIgnoringLastError(start_callback_);
1161 if (rv == ERR_IO_PENDING)
1162 return;
1164 // The transaction started synchronously, but we need to notify the
1165 // URLRequest delegate via the message loop.
1166 MessageLoop::current()->PostTask(
1167 FROM_HERE,
1168 base::Bind(&URLRequestHttpJob::OnStartCompleted,
1169 weak_factory_.GetWeakPtr(), rv));
1172 bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const {
1173 // Some servers send the body compressed, but specify the content length as
1174 // the uncompressed size. Although this violates the HTTP spec we want to
1175 // support it (as IE and FireFox do), but *only* for an exact match.
1176 // See http://crbug.com/79694.
1177 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH ||
1178 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) {
1179 if (request_ && request_->response_headers()) {
1180 int64 expected_length = request_->response_headers()->GetContentLength();
1181 VLOG(1) << __FUNCTION__ << "() "
1182 << "\"" << request_->url().spec() << "\""
1183 << " content-length = " << expected_length
1184 << " pre total = " << prefilter_bytes_read()
1185 << " post total = " << postfilter_bytes_read();
1186 if (postfilter_bytes_read() == expected_length) {
1187 // Clear the error.
1188 return true;
1192 return false;
1195 bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
1196 int* bytes_read) {
1197 DCHECK_NE(buf_size, 0);
1198 DCHECK(bytes_read);
1199 DCHECK(!read_in_progress_);
1201 int rv = transaction_->Read(
1202 buf, buf_size,
1203 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this)));
1205 if (ShouldFixMismatchedContentLength(rv))
1206 rv = 0;
1208 if (rv >= 0) {
1209 *bytes_read = rv;
1210 if (!rv)
1211 DoneWithRequest(FINISHED);
1212 return true;
1215 if (rv == ERR_IO_PENDING) {
1216 read_in_progress_ = true;
1217 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1218 } else {
1219 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
1222 return false;
1225 void URLRequestHttpJob::StopCaching() {
1226 if (transaction_.get())
1227 transaction_->StopCaching();
1230 void URLRequestHttpJob::DoneReading() {
1231 if (transaction_.get())
1232 transaction_->DoneReading();
1233 DoneWithRequest(FINISHED);
1236 HostPortPair URLRequestHttpJob::GetSocketAddress() const {
1237 return response_info_ ? response_info_->socket_address : HostPortPair();
1240 URLRequestHttpJob::~URLRequestHttpJob() {
1241 CHECK(!awaiting_callback_);
1243 DCHECK(!sdch_test_control_ || !sdch_test_activated_);
1244 if (!is_cached_content_) {
1245 if (sdch_test_control_)
1246 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK);
1247 if (sdch_test_activated_)
1248 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE);
1250 // Make sure SDCH filters are told to emit histogram data while
1251 // filter_context_ is still alive.
1252 DestroyFilters();
1254 if (sdch_dictionary_url_.is_valid()) {
1255 // Prior to reaching the destructor, request_ has been set to a NULL
1256 // pointer, so request_->url() is no longer valid in the destructor, and we
1257 // use an alternate copy |request_info_.url|.
1258 SdchManager* manager = SdchManager::Global();
1259 // To be extra safe, since this is a "different time" from when we decided
1260 // to get the dictionary, we'll validate that an SdchManager is available.
1261 // At shutdown time, care is taken to be sure that we don't delete this
1262 // globally useful instance "too soon," so this check is just defensive
1263 // coding to assure that IF the system is shutting down, we don't have any
1264 // problem if the manager was deleted ahead of time.
1265 if (manager) // Defensive programming.
1266 manager->FetchDictionary(request_info_.url, sdch_dictionary_url_);
1268 DoneWithRequest(ABORTED);
1271 void URLRequestHttpJob::RecordTimer() {
1272 if (request_creation_time_.is_null()) {
1273 NOTREACHED()
1274 << "The same transaction shouldn't start twice without new timing.";
1275 return;
1278 base::TimeDelta to_start = base::Time::Now() - request_creation_time_;
1279 request_creation_time_ = base::Time();
1281 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start);
1283 static const bool use_overlapped_read_histogram =
1284 base::FieldTrialList::TrialExists("OverlappedReadImpact");
1285 if (use_overlapped_read_histogram) {
1286 UMA_HISTOGRAM_MEDIUM_TIMES(
1287 base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
1288 "OverlappedReadImpact"),
1289 to_start);
1292 static const bool use_warm_socket_impact_histogram =
1293 base::FieldTrialList::TrialExists("WarmSocketImpact");
1294 if (use_warm_socket_impact_histogram) {
1295 UMA_HISTOGRAM_MEDIUM_TIMES(
1296 base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
1297 "WarmSocketImpact"),
1298 to_start);
1301 static const bool use_prefetch_histogram =
1302 base::FieldTrialList::TrialExists("Prefetch");
1303 if (use_prefetch_histogram) {
1304 UMA_HISTOGRAM_MEDIUM_TIMES(
1305 base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
1306 "Prefetch"),
1307 to_start);
1309 static const bool use_prerender_histogram =
1310 base::FieldTrialList::TrialExists("Prerender");
1311 if (use_prerender_histogram) {
1312 UMA_HISTOGRAM_MEDIUM_TIMES(
1313 base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
1314 "Prerender"),
1315 to_start);
1319 void URLRequestHttpJob::ResetTimer() {
1320 if (!request_creation_time_.is_null()) {
1321 NOTREACHED()
1322 << "The timer was reset before it was recorded.";
1323 return;
1325 request_creation_time_ = base::Time::Now();
1328 void URLRequestHttpJob::UpdatePacketReadTimes() {
1329 if (!packet_timing_enabled_)
1330 return;
1332 if (filter_input_byte_count() <= bytes_observed_in_packets_) {
1333 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_);
1334 return; // No new bytes have arrived.
1337 final_packet_time_ = base::Time::Now();
1338 if (!bytes_observed_in_packets_)
1339 request_time_snapshot_ = request_ ? request_->request_time() : base::Time();
1341 bytes_observed_in_packets_ = filter_input_byte_count();
1344 void URLRequestHttpJob::RecordPacketStats(
1345 FilterContext::StatisticSelector statistic) const {
1346 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
1347 return;
1349 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
1350 switch (statistic) {
1351 case FilterContext::SDCH_DECODE: {
1352 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
1353 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
1354 return;
1356 case FilterContext::SDCH_PASSTHROUGH: {
1357 // Despite advertising a dictionary, we handled non-sdch compressed
1358 // content.
1359 return;
1362 case FilterContext::SDCH_EXPERIMENT_DECODE: {
1363 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode",
1364 duration,
1365 base::TimeDelta::FromMilliseconds(20),
1366 base::TimeDelta::FromMinutes(10), 100);
1367 return;
1369 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: {
1370 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback",
1371 duration,
1372 base::TimeDelta::FromMilliseconds(20),
1373 base::TimeDelta::FromMinutes(10), 100);
1374 return;
1376 default:
1377 NOTREACHED();
1378 return;
1382 // The common type of histogram we use for all compression-tracking histograms.
1383 #define COMPRESSION_HISTOGRAM(name, sample) \
1384 do { \
1385 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \
1386 500, 1000000, 100); \
1387 } while (0)
1389 void URLRequestHttpJob::RecordCompressionHistograms() {
1390 DCHECK(request_);
1391 if (!request_)
1392 return;
1394 if (is_cached_content_ || // Don't record cached content
1395 !GetStatus().is_success() || // Don't record failed content
1396 !IsCompressibleContent() || // Only record compressible content
1397 !prefilter_bytes_read()) // Zero-byte responses aren't useful.
1398 return;
1400 // Miniature requests aren't really compressible. Don't count them.
1401 const int kMinSize = 16;
1402 if (prefilter_bytes_read() < kMinSize)
1403 return;
1405 // Only record for http or https urls.
1406 bool is_http = request_->url().SchemeIs("http");
1407 bool is_https = request_->url().SchemeIs("https");
1408 if (!is_http && !is_https)
1409 return;
1411 int compressed_B = prefilter_bytes_read();
1412 int decompressed_B = postfilter_bytes_read();
1413 bool was_filtered = HasFilter();
1415 // We want to record how often downloaded resources are compressed.
1416 // But, we recognize that different protocols may have different
1417 // properties. So, for each request, we'll put it into one of 3
1418 // groups:
1419 // a) SSL resources
1420 // Proxies cannot tamper with compression headers with SSL.
1421 // b) Non-SSL, loaded-via-proxy resources
1422 // In this case, we know a proxy might have interfered.
1423 // c) Non-SSL, loaded-without-proxy resources
1424 // In this case, we know there was no explicit proxy. However,
1425 // it is possible that a transparent proxy was still interfering.
1427 // For each group, we record the same 3 histograms.
1429 if (is_https) {
1430 if (was_filtered) {
1431 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B);
1432 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B);
1433 } else {
1434 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B);
1436 return;
1439 if (request_->was_fetched_via_proxy()) {
1440 if (was_filtered) {
1441 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B);
1442 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B);
1443 } else {
1444 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B);
1446 return;
1449 if (was_filtered) {
1450 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B);
1451 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B);
1452 } else {
1453 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B);
1457 bool URLRequestHttpJob::IsCompressibleContent() const {
1458 std::string mime_type;
1459 return GetMimeType(&mime_type) &&
1460 (IsSupportedJavascriptMimeType(mime_type.c_str()) ||
1461 IsSupportedNonImageMimeType(mime_type.c_str()));
1464 void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) {
1465 if (start_time_.is_null())
1466 return;
1468 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_;
1469 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time);
1471 if (reason == FINISHED) {
1472 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time);
1473 } else {
1474 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time);
1477 if (response_info_) {
1478 if (response_info_->was_cached) {
1479 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time);
1480 } else {
1481 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time);
1485 static const bool use_overlapped_read_histogram =
1486 base::FieldTrialList::TrialExists("OverlappedReadImpact");
1487 if (use_overlapped_read_histogram) {
1488 UMA_HISTOGRAM_TIMES(
1489 base::FieldTrial::MakeName("Net.HttpJob.TotalTime",
1490 "OverlappedReadImpact"),
1491 total_time);
1493 if (reason == FINISHED) {
1494 UMA_HISTOGRAM_TIMES(
1495 base::FieldTrial::MakeName("Net.HttpJob.TotalTimeSuccess",
1496 "OverlappedReadImpact"),
1497 total_time);
1498 } else {
1499 UMA_HISTOGRAM_TIMES(
1500 base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCancel",
1501 "OverlappedReadImpact"),
1502 total_time);
1505 if (response_info_) {
1506 if (response_info_->was_cached) {
1507 UMA_HISTOGRAM_TIMES(
1508 base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCached",
1509 "OverlappedReadImpact"),
1510 total_time);
1511 } else {
1512 UMA_HISTOGRAM_TIMES(
1513 base::FieldTrial::MakeName("Net.HttpJob.TotalTimeNotCached",
1514 "OverlappedReadImpact"),
1515 total_time);
1520 static const bool cache_sensitivity_analysis =
1521 base::FieldTrialList::TrialExists("CacheSensitivityAnalysis");
1522 if (cache_sensitivity_analysis) {
1523 UMA_HISTOGRAM_TIMES(
1524 base::FieldTrial::MakeName("Net.HttpJob.TotalTime",
1525 "CacheSensitivityAnalysis"),
1526 total_time);
1528 if (reason == FINISHED) {
1529 UMA_HISTOGRAM_TIMES(
1530 base::FieldTrial::MakeName("Net.HttpJob.TotalTimeSuccess",
1531 "CacheSensitivityAnalysis"),
1532 total_time);
1533 } else {
1534 UMA_HISTOGRAM_TIMES(
1535 base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCancel",
1536 "CacheSensitivityAnalysis"),
1537 total_time);
1540 if (response_info_) {
1541 if (response_info_->was_cached) {
1542 UMA_HISTOGRAM_TIMES(
1543 base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCached",
1544 "CacheSensitivityAnalysis"),
1545 total_time);
1546 } else {
1547 UMA_HISTOGRAM_TIMES(
1548 base::FieldTrial::MakeName("Net.HttpJob.TotalTimeNotCached",
1549 "CacheSensitivityAnalysis"),
1550 total_time);
1555 start_time_ = base::TimeTicks();
1558 void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) {
1559 if (done_)
1560 return;
1561 done_ = true;
1562 RecordPerfHistograms(reason);
1563 if (reason == FINISHED) {
1564 request_->set_received_response_content_length(prefilter_bytes_read());
1565 RecordCompressionHistograms();
1569 HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const {
1570 DCHECK(transaction_.get());
1571 DCHECK(transaction_->GetResponseInfo());
1572 return override_response_headers_.get() ?
1573 override_response_headers_ :
1574 transaction_->GetResponseInfo()->headers;
1577 void URLRequestHttpJob::NotifyURLRequestDestroyed() {
1578 awaiting_callback_ = false;
1581 void URLRequestHttpJob::OnDetachRequest() {
1582 http_transaction_delegate_->OnDetachRequest();
1585 } // namespace net