1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "chrome/browser/safe_browsing/protocol_manager.h"
7 #include "base/environment.h"
8 #include "base/logging.h"
9 #include "base/memory/scoped_vector.h"
10 #include "base/metrics/histogram.h"
11 #include "base/profiler/scoped_profile.h"
12 #include "base/rand_util.h"
13 #include "base/stl_util.h"
14 #include "base/strings/string_util.h"
15 #include "base/strings/stringprintf.h"
16 #include "base/timer/timer.h"
17 #include "chrome/browser/safe_browsing/protocol_parser.h"
18 #include "chrome/common/chrome_version_info.h"
19 #include "chrome/common/env_vars.h"
20 #include "google_apis/google_api_keys.h"
21 #include "net/base/escape.h"
22 #include "net/base/load_flags.h"
23 #include "net/base/net_errors.h"
24 #include "net/url_request/url_fetcher.h"
25 #include "net/url_request/url_request_context_getter.h"
26 #include "net/url_request/url_request_status.h"
28 #if defined(OS_ANDROID)
29 #include "net/base/network_change_notifier.h"
33 using base::TimeDelta
;
37 // UpdateResult indicates what happened with the primary and/or backup update
38 // requests. The ordering of the values must stay the same for UMA consistency,
39 // and is also ordered in this way to match ProtocolManager::BackupUpdateReason.
42 UPDATE_RESULT_SUCCESS
,
43 UPDATE_RESULT_BACKUP_CONNECT_FAIL
,
44 UPDATE_RESULT_BACKUP_CONNECT_SUCCESS
,
45 UPDATE_RESULT_BACKUP_HTTP_FAIL
,
46 UPDATE_RESULT_BACKUP_HTTP_SUCCESS
,
47 UPDATE_RESULT_BACKUP_NETWORK_FAIL
,
48 UPDATE_RESULT_BACKUP_NETWORK_SUCCESS
,
50 UPDATE_RESULT_BACKUP_START
= UPDATE_RESULT_BACKUP_CONNECT_FAIL
,
53 void RecordUpdateResult(UpdateResult result
) {
54 DCHECK(result
>= 0 && result
< UPDATE_RESULT_MAX
);
55 UMA_HISTOGRAM_ENUMERATION("SB2.UpdateResult", result
, UPDATE_RESULT_MAX
);
60 // Minimum time, in seconds, from start up before we must issue an update query.
61 static const int kSbTimerStartIntervalSecMin
= 60;
63 // Maximum time, in seconds, from start up before we must issue an update query.
64 static const int kSbTimerStartIntervalSecMax
= 300;
66 // The maximum time, in seconds, to wait for a response to an update request.
67 static const int kSbMaxUpdateWaitSec
= 30;
69 // Maximum back off multiplier.
70 static const size_t kSbMaxBackOff
= 8;
72 // The default SBProtocolManagerFactory.
73 class SBProtocolManagerFactoryImpl
: public SBProtocolManagerFactory
{
75 SBProtocolManagerFactoryImpl() { }
76 virtual ~SBProtocolManagerFactoryImpl() { }
77 virtual SafeBrowsingProtocolManager
* CreateProtocolManager(
78 SafeBrowsingProtocolManagerDelegate
* delegate
,
79 net::URLRequestContextGetter
* request_context_getter
,
80 const SafeBrowsingProtocolConfig
& config
) override
{
81 return new SafeBrowsingProtocolManager(
82 delegate
, request_context_getter
, config
);
85 DISALLOW_COPY_AND_ASSIGN(SBProtocolManagerFactoryImpl
);
88 // SafeBrowsingProtocolManager implementation ----------------------------------
91 SBProtocolManagerFactory
* SafeBrowsingProtocolManager::factory_
= NULL
;
94 SafeBrowsingProtocolManager
* SafeBrowsingProtocolManager::Create(
95 SafeBrowsingProtocolManagerDelegate
* delegate
,
96 net::URLRequestContextGetter
* request_context_getter
,
97 const SafeBrowsingProtocolConfig
& config
) {
99 factory_
= new SBProtocolManagerFactoryImpl();
100 return factory_
->CreateProtocolManager(
101 delegate
, request_context_getter
, config
);
104 SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
105 SafeBrowsingProtocolManagerDelegate
* delegate
,
106 net::URLRequestContextGetter
* request_context_getter
,
107 const SafeBrowsingProtocolConfig
& config
)
108 : delegate_(delegate
),
109 request_type_(NO_REQUEST
),
110 update_error_count_(0),
111 gethash_error_count_(0),
112 update_back_off_mult_(1),
113 gethash_back_off_mult_(1),
114 next_update_interval_(base::TimeDelta::FromSeconds(
115 base::RandInt(kSbTimerStartIntervalSecMin
,
116 kSbTimerStartIntervalSecMax
))),
117 update_state_(FIRST_REQUEST
),
118 chunk_pending_to_write_(false),
119 version_(config
.version
),
121 client_name_(config
.client_name
),
122 request_context_getter_(request_context_getter
),
123 url_prefix_(config
.url_prefix
),
124 backup_update_reason_(BACKUP_UPDATE_REASON_MAX
),
125 disable_auto_update_(config
.disable_auto_update
),
126 #if defined(OS_ANDROID)
127 disable_connection_check_(config
.disable_connection_check
),
130 app_in_foreground_(true) {
131 DCHECK(!url_prefix_
.empty());
133 backup_url_prefixes_
[BACKUP_UPDATE_REASON_CONNECT
] =
134 config
.backup_connect_error_url_prefix
;
135 backup_url_prefixes_
[BACKUP_UPDATE_REASON_HTTP
] =
136 config
.backup_http_error_url_prefix
;
137 backup_url_prefixes_
[BACKUP_UPDATE_REASON_NETWORK
] =
138 config
.backup_network_error_url_prefix
;
140 // Set the backoff multiplier fuzz to a random value between 0 and 1.
141 back_off_fuzz_
= static_cast<float>(base::RandDouble());
142 if (version_
.empty())
143 version_
= SafeBrowsingProtocolManagerHelper::Version();
147 void SafeBrowsingProtocolManager::RecordGetHashResult(
148 bool is_download
, ResultType result_type
) {
150 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type
,
151 GET_HASH_RESULT_MAX
);
153 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type
,
154 GET_HASH_RESULT_MAX
);
158 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const {
159 return update_timer_
.IsRunning();
162 SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
163 // Delete in-progress SafeBrowsing requests.
164 STLDeleteContainerPairFirstPointers(hash_requests_
.begin(),
165 hash_requests_
.end());
166 hash_requests_
.clear();
169 // We can only have one update or chunk request outstanding, but there may be
170 // multiple GetHash requests pending since we don't want to serialize them and
171 // slow down the user.
172 void SafeBrowsingProtocolManager::GetFullHash(
173 const std::vector
<SBPrefix
>& prefixes
,
174 FullHashCallback callback
,
176 DCHECK(CalledOnValidThread());
177 // If we are in GetHash backoff, we need to check if we're past the next
178 // allowed time. If we are, we can proceed with the request. If not, we are
179 // required to return empty results (i.e. treat the page as safe).
180 if (gethash_error_count_
&& Time::Now() <= next_gethash_time_
) {
181 RecordGetHashResult(is_download
, GET_HASH_BACKOFF_ERROR
);
182 std::vector
<SBFullHashResult
> full_hashes
;
183 callback
.Run(full_hashes
, base::TimeDelta());
186 GURL gethash_url
= GetHashUrl();
187 net::URLFetcher
* fetcher
= net::URLFetcher::Create(
188 url_fetcher_id_
++, gethash_url
, net::URLFetcher::POST
, this);
189 hash_requests_
[fetcher
] = FullHashDetails(callback
, is_download
);
191 const std::string get_hash
= safe_browsing::FormatGetHash(prefixes
);
193 fetcher
->SetLoadFlags(net::LOAD_DISABLE_CACHE
);
194 fetcher
->SetRequestContext(request_context_getter_
.get());
195 fetcher
->SetUploadData("text/plain", get_hash
);
199 void SafeBrowsingProtocolManager::GetNextUpdate() {
200 DCHECK(CalledOnValidThread());
201 if (request_
.get() || request_type_
!= NO_REQUEST
)
204 #if defined(OS_ANDROID)
205 if (!disable_connection_check_
) {
206 net::NetworkChangeNotifier::ConnectionType type
=
207 net::NetworkChangeNotifier::GetConnectionType();
208 if (type
!= net::NetworkChangeNotifier::CONNECTION_WIFI
) {
209 ScheduleNextUpdate(false /* no back off */);
215 IssueUpdateRequest();
218 // net::URLFetcherDelegate implementation ----------------------------------
220 // All SafeBrowsing request responses are handled here.
221 // TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
222 // chunk should retry the download and parse of that chunk (and
223 // what back off / how many times to try), and if that effects the
224 // update back off. For now, a failed parse of the chunk means we
225 // drop it. This isn't so bad because the next UPDATE_REQUEST we
226 // do will report all the chunks we have. If that chunk is still
227 // required, the SafeBrowsing servers will tell us to get it again.
228 void SafeBrowsingProtocolManager::OnURLFetchComplete(
229 const net::URLFetcher
* source
) {
230 // TODO(vadimt): Remove ScopedProfile below once crbug.com/422577 is fixed.
231 tracked_objects::ScopedProfile
tracking_profile(
232 FROM_HERE_WITH_EXPLICIT_FUNCTION(
233 "422577 SafeBrowsingProtocolManager::OnURLFetchComplete"));
235 DCHECK(CalledOnValidThread());
236 scoped_ptr
<const net::URLFetcher
> fetcher
;
238 HashRequests::iterator it
= hash_requests_
.find(source
);
239 if (it
!= hash_requests_
.end()) {
241 fetcher
.reset(it
->first
);
242 const FullHashDetails
& details
= it
->second
;
243 std::vector
<SBFullHashResult
> full_hashes
;
244 base::TimeDelta cache_lifetime
;
245 if (source
->GetStatus().is_success() &&
246 (source
->GetResponseCode() == 200 ||
247 source
->GetResponseCode() == 204)) {
248 // For tracking our GetHash false positive (204) rate, compared to real
250 if (source
->GetResponseCode() == 200)
251 RecordGetHashResult(details
.is_download
, GET_HASH_STATUS_200
);
253 RecordGetHashResult(details
.is_download
, GET_HASH_STATUS_204
);
255 gethash_error_count_
= 0;
256 gethash_back_off_mult_
= 1;
258 source
->GetResponseAsString(&data
);
259 if (!safe_browsing::ParseGetHash(
260 data
.data(), data
.length(), &cache_lifetime
, &full_hashes
)) {
262 RecordGetHashResult(details
.is_download
, GET_HASH_PARSE_ERROR
);
263 // TODO(cbentzel): Should cache_lifetime be set to 0 here? (See
264 // http://crbug.com/360232.)
267 HandleGetHashError(Time::Now());
268 if (source
->GetStatus().status() == net::URLRequestStatus::FAILED
) {
269 RecordGetHashResult(details
.is_download
, GET_HASH_NETWORK_ERROR
);
270 VLOG(1) << "SafeBrowsing GetHash request for: " << source
->GetURL()
271 << " failed with error: " << source
->GetStatus().error();
273 RecordGetHashResult(details
.is_download
, GET_HASH_HTTP_ERROR
);
274 VLOG(1) << "SafeBrowsing GetHash request for: " << source
->GetURL()
275 << " failed with error: " << source
->GetResponseCode();
279 // Invoke the callback with full_hashes, even if there was a parse error or
280 // an error response code (in which case full_hashes will be empty). The
281 // caller can't be blocked indefinitely.
282 details
.callback
.Run(full_hashes
, cache_lifetime
);
284 hash_requests_
.erase(it
);
286 // Update or chunk response.
287 fetcher
.reset(request_
.release());
289 if (request_type_
== UPDATE_REQUEST
||
290 request_type_
== BACKUP_UPDATE_REQUEST
) {
291 if (!fetcher
.get()) {
292 // We've timed out waiting for an update response, so we've cancelled
293 // the update request and scheduled a new one. Ignore this response.
297 // Cancel the update response timeout now that we have the response.
298 timeout_timer_
.Stop();
301 net::URLRequestStatus status
= source
->GetStatus();
302 if (status
.is_success() && source
->GetResponseCode() == 200) {
303 // We have data from the SafeBrowsing service.
305 source
->GetResponseAsString(&data
);
307 // TODO(shess): Cleanup the flow of this code so that |parsed_ok| can be
308 // removed or omitted.
309 const bool parsed_ok
= HandleServiceResponse(
310 source
->GetURL(), data
.data(), data
.length());
312 VLOG(1) << "SafeBrowsing request for: " << source
->GetURL()
314 chunk_request_urls_
.clear();
315 if (request_type_
== UPDATE_REQUEST
&&
316 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_HTTP
)) {
319 UpdateFinished(false);
322 switch (request_type_
) {
325 chunk_request_urls_
.pop_front();
326 if (chunk_request_urls_
.empty() && !chunk_pending_to_write_
)
327 UpdateFinished(true);
331 case BACKUP_UPDATE_REQUEST
:
332 if (chunk_request_urls_
.empty() && parsed_ok
) {
333 // We are up to date since the servers gave us nothing new, so we
334 // are done with this update cycle.
335 UpdateFinished(true);
339 // This can happen if HandleServiceResponse fails above.
346 if (status
.status() == net::URLRequestStatus::FAILED
) {
347 VLOG(1) << "SafeBrowsing request for: " << source
->GetURL()
348 << " failed with error: " << source
->GetStatus().error();
350 VLOG(1) << "SafeBrowsing request for: " << source
->GetURL()
351 << " failed with error: " << source
->GetResponseCode();
353 if (request_type_
== CHUNK_REQUEST
) {
354 // The SafeBrowsing service error, or very bad response code: back off.
355 chunk_request_urls_
.clear();
356 } else if (request_type_
== UPDATE_REQUEST
) {
357 BackupUpdateReason backup_update_reason
= BACKUP_UPDATE_REASON_MAX
;
358 if (status
.is_success()) {
359 backup_update_reason
= BACKUP_UPDATE_REASON_HTTP
;
361 switch (status
.error()) {
362 case net::ERR_INTERNET_DISCONNECTED
:
363 case net::ERR_NETWORK_CHANGED
:
364 backup_update_reason
= BACKUP_UPDATE_REASON_NETWORK
;
367 backup_update_reason
= BACKUP_UPDATE_REASON_CONNECT
;
371 if (backup_update_reason
!= BACKUP_UPDATE_REASON_MAX
&&
372 IssueBackupUpdateRequest(backup_update_reason
)) {
376 UpdateFinished(false);
380 // Get the next chunk if available.
384 bool SafeBrowsingProtocolManager::HandleServiceResponse(
385 const GURL
& url
, const char* data
, size_t length
) {
386 DCHECK(CalledOnValidThread());
388 switch (request_type_
) {
390 case BACKUP_UPDATE_REQUEST
: {
391 size_t next_update_sec
= 0;
393 scoped_ptr
<std::vector
<SBChunkDelete
> > chunk_deletes(
394 new std::vector
<SBChunkDelete
>);
395 std::vector
<ChunkUrl
> chunk_urls
;
396 if (!safe_browsing::ParseUpdate(data
, length
, &next_update_sec
, &reset
,
397 chunk_deletes
.get(), &chunk_urls
)) {
401 base::TimeDelta next_update_interval
=
402 base::TimeDelta::FromSeconds(next_update_sec
);
403 last_update_
= Time::Now();
405 if (update_state_
== FIRST_REQUEST
)
406 update_state_
= SECOND_REQUEST
;
407 else if (update_state_
== SECOND_REQUEST
)
408 update_state_
= NORMAL_REQUEST
;
410 // New time for the next update.
411 if (next_update_interval
> base::TimeDelta()) {
412 next_update_interval_
= next_update_interval
;
413 } else if (update_state_
== SECOND_REQUEST
) {
414 next_update_interval_
= base::TimeDelta::FromSeconds(
415 base::RandInt(15, 45));
418 // New chunks to download.
419 if (!chunk_urls
.empty()) {
420 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls
.size());
421 for (size_t i
= 0; i
< chunk_urls
.size(); ++i
)
422 chunk_request_urls_
.push_back(chunk_urls
[i
]);
425 // Handle the case were the SafeBrowsing service tells us to dump our
428 delegate_
->ResetDatabase();
432 // Chunks to delete from our storage.
433 if (!chunk_deletes
->empty())
434 delegate_
->DeleteChunks(chunk_deletes
.Pass());
438 case CHUNK_REQUEST
: {
439 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
440 base::Time::Now() - chunk_request_start_
);
442 const ChunkUrl chunk_url
= chunk_request_urls_
.front();
443 scoped_ptr
<ScopedVector
<SBChunkData
> >
444 chunks(new ScopedVector
<SBChunkData
>);
445 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length
);
446 update_size_
+= length
;
447 if (!safe_browsing::ParseChunk(data
, length
, chunks
.get()))
450 // Chunks to add to storage. Pass ownership of |chunks|.
451 if (!chunks
->empty()) {
452 chunk_pending_to_write_
= true;
453 delegate_
->AddChunks(
454 chunk_url
.list_name
, chunks
.Pass(),
455 base::Bind(&SafeBrowsingProtocolManager::OnAddChunksComplete
,
456 base::Unretained(this)));
469 void SafeBrowsingProtocolManager::Initialize() {
470 DCHECK(CalledOnValidThread());
471 // Don't want to hit the safe browsing servers on build/chrome bots.
472 scoped_ptr
<base::Environment
> env(base::Environment::Create());
473 if (env
->HasVar(env_vars::kHeadless
))
475 ScheduleNextUpdate(false /* no back off */);
478 void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off
) {
479 DCHECK(CalledOnValidThread());
480 if (disable_auto_update_
) {
481 // Unschedule any current timer.
482 update_timer_
.Stop();
485 // Reschedule with the new update.
486 base::TimeDelta next_update_interval
= GetNextUpdateInterval(back_off
);
487 ForceScheduleNextUpdate(next_update_interval
);
490 void SafeBrowsingProtocolManager::ForceScheduleNextUpdate(
491 base::TimeDelta interval
) {
492 DCHECK(CalledOnValidThread());
493 DCHECK(interval
>= base::TimeDelta());
494 // Unschedule any current timer.
495 update_timer_
.Stop();
496 update_timer_
.Start(FROM_HERE
, interval
, this,
497 &SafeBrowsingProtocolManager::GetNextUpdate
);
500 // According to section 5 of the SafeBrowsing protocol specification, we must
501 // back off after a certain number of errors. We only change |next_update_sec_|
502 // when we receive a response from the SafeBrowsing service.
503 base::TimeDelta
SafeBrowsingProtocolManager::GetNextUpdateInterval(
505 DCHECK(CalledOnValidThread());
506 DCHECK(next_update_interval_
> base::TimeDelta());
507 base::TimeDelta next
= next_update_interval_
;
509 next
= GetNextBackOffInterval(&update_error_count_
, &update_back_off_mult_
);
511 // Successful response means error reset.
512 update_error_count_
= 0;
513 update_back_off_mult_
= 1;
518 base::TimeDelta
SafeBrowsingProtocolManager::GetNextBackOffInterval(
519 size_t* error_count
, size_t* multiplier
) const {
520 DCHECK(CalledOnValidThread());
521 DCHECK(multiplier
&& error_count
);
523 if (*error_count
> 1 && *error_count
< 6) {
524 base::TimeDelta next
= base::TimeDelta::FromMinutes(
525 *multiplier
* (1 + back_off_fuzz_
) * 30);
527 if (*multiplier
> kSbMaxBackOff
)
528 *multiplier
= kSbMaxBackOff
;
531 if (*error_count
>= 6)
532 return base::TimeDelta::FromHours(8);
533 return base::TimeDelta::FromMinutes(1);
536 // This request requires getting a list of all the chunks for each list from the
537 // database asynchronously. The request will be issued when we're called back in
538 // OnGetChunksComplete.
539 // TODO(paulg): We should get this at start up and maintain a ChunkRange cache
540 // to avoid hitting the database with each update request. On the
541 // otherhand, this request will only occur ~20-30 minutes so there
542 // isn't that much overhead. Measure!
543 void SafeBrowsingProtocolManager::IssueUpdateRequest() {
544 DCHECK(CalledOnValidThread());
545 request_type_
= UPDATE_REQUEST
;
546 delegate_
->UpdateStarted();
547 delegate_
->GetChunks(
548 base::Bind(&SafeBrowsingProtocolManager::OnGetChunksComplete
,
549 base::Unretained(this)));
552 // The backup request can run immediately since the chunks have already been
553 // retrieved from the DB.
554 bool SafeBrowsingProtocolManager::IssueBackupUpdateRequest(
555 BackupUpdateReason backup_update_reason
) {
556 DCHECK(CalledOnValidThread());
557 DCHECK_EQ(request_type_
, UPDATE_REQUEST
);
558 DCHECK(backup_update_reason
>= 0 &&
559 backup_update_reason
< BACKUP_UPDATE_REASON_MAX
);
560 if (backup_url_prefixes_
[backup_update_reason
].empty())
562 request_type_
= BACKUP_UPDATE_REQUEST
;
563 backup_update_reason_
= backup_update_reason
;
565 GURL backup_update_url
= BackupUpdateUrl(backup_update_reason
);
566 request_
.reset(net::URLFetcher::Create(
567 url_fetcher_id_
++, backup_update_url
, net::URLFetcher::POST
, this));
568 request_
->SetLoadFlags(net::LOAD_DISABLE_CACHE
);
569 request_
->SetRequestContext(request_context_getter_
.get());
570 request_
->SetUploadData("text/plain", update_list_data_
);
573 // Begin the update request timeout.
574 timeout_timer_
.Start(FROM_HERE
, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec
),
576 &SafeBrowsingProtocolManager::UpdateResponseTimeout
);
581 void SafeBrowsingProtocolManager::IssueChunkRequest() {
582 DCHECK(CalledOnValidThread());
583 // We are only allowed to have one request outstanding at any time. Also,
584 // don't get the next url until the previous one has been written to disk so
585 // that we don't use too much memory.
586 if (request_
.get() || chunk_request_urls_
.empty() || chunk_pending_to_write_
)
589 ChunkUrl next_chunk
= chunk_request_urls_
.front();
590 DCHECK(!next_chunk
.url
.empty());
591 GURL chunk_url
= NextChunkUrl(next_chunk
.url
);
592 request_type_
= CHUNK_REQUEST
;
593 request_
.reset(net::URLFetcher::Create(
594 url_fetcher_id_
++, chunk_url
, net::URLFetcher::GET
, this));
595 request_
->SetLoadFlags(net::LOAD_DISABLE_CACHE
);
596 request_
->SetRequestContext(request_context_getter_
.get());
597 chunk_request_start_
= base::Time::Now();
601 void SafeBrowsingProtocolManager::OnGetChunksComplete(
602 const std::vector
<SBListChunkRanges
>& lists
, bool database_error
) {
603 DCHECK(CalledOnValidThread());
604 DCHECK_EQ(request_type_
, UPDATE_REQUEST
);
605 DCHECK(update_list_data_
.empty());
606 if (database_error
) {
607 // The update was not successful, but don't back off.
608 UpdateFinished(false, false);
612 // Format our stored chunks:
613 bool found_malware
= false;
614 bool found_phishing
= false;
615 for (size_t i
= 0; i
< lists
.size(); ++i
) {
616 update_list_data_
.append(safe_browsing::FormatList(lists
[i
]));
617 if (lists
[i
].name
== safe_browsing_util::kPhishingList
)
618 found_phishing
= true;
620 if (lists
[i
].name
== safe_browsing_util::kMalwareList
)
621 found_malware
= true;
624 // If we have an empty database, let the server know we want data for these
626 // TODO(shess): These cases never happen because the database fills in the
627 // lists in GetChunks(). Refactor the unit tests so that this code can be
629 if (!found_phishing
) {
630 update_list_data_
.append(safe_browsing::FormatList(
631 SBListChunkRanges(safe_browsing_util::kPhishingList
)));
633 if (!found_malware
) {
634 update_list_data_
.append(safe_browsing::FormatList(
635 SBListChunkRanges(safe_browsing_util::kMalwareList
)));
638 // Large requests are (probably) a sign of database corruption.
639 // Record stats to inform decisions about whether to automate
640 // deletion of such databases. http://crbug.com/120219
641 UMA_HISTOGRAM_COUNTS("SB2.UpdateRequestSize", update_list_data_
.size());
643 GURL update_url
= UpdateUrl();
644 request_
.reset(net::URLFetcher::Create(
645 url_fetcher_id_
++, update_url
, net::URLFetcher::POST
, this));
646 request_
->SetLoadFlags(net::LOAD_DISABLE_CACHE
);
647 request_
->SetRequestContext(request_context_getter_
.get());
648 request_
->SetUploadData("text/plain", update_list_data_
);
651 // Begin the update request timeout.
652 timeout_timer_
.Start(FROM_HERE
, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec
),
654 &SafeBrowsingProtocolManager::UpdateResponseTimeout
);
657 // If we haven't heard back from the server with an update response, this method
658 // will run. Close the current update session and schedule another update.
659 void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
660 DCHECK(CalledOnValidThread());
661 DCHECK(request_type_
== UPDATE_REQUEST
||
662 request_type_
== BACKUP_UPDATE_REQUEST
);
664 if (request_type_
== UPDATE_REQUEST
&&
665 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_CONNECT
)) {
668 UpdateFinished(false);
671 void SafeBrowsingProtocolManager::OnAddChunksComplete() {
672 DCHECK(CalledOnValidThread());
673 chunk_pending_to_write_
= false;
675 if (chunk_request_urls_
.empty()) {
676 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_
);
677 UpdateFinished(true);
683 void SafeBrowsingProtocolManager::HandleGetHashError(const Time
& now
) {
684 DCHECK(CalledOnValidThread());
685 base::TimeDelta next
= GetNextBackOffInterval(
686 &gethash_error_count_
, &gethash_back_off_mult_
);
687 next_gethash_time_
= now
+ next
;
690 void SafeBrowsingProtocolManager::UpdateFinished(bool success
) {
691 UpdateFinished(success
, !success
);
694 void SafeBrowsingProtocolManager::UpdateFinished(bool success
, bool back_off
) {
695 DCHECK(CalledOnValidThread());
696 #if defined(OS_ANDROID)
697 if (app_in_foreground_
)
698 UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeForeground", update_size_
);
700 UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeBackground", update_size_
);
702 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_
);
704 bool update_success
= success
|| request_type_
== CHUNK_REQUEST
;
705 if (backup_update_reason_
== BACKUP_UPDATE_REASON_MAX
) {
707 update_success
? UPDATE_RESULT_SUCCESS
: UPDATE_RESULT_FAIL
);
709 UpdateResult update_result
= static_cast<UpdateResult
>(
710 UPDATE_RESULT_BACKUP_START
+
711 (static_cast<int>(backup_update_reason_
) * 2) +
713 RecordUpdateResult(update_result
);
715 backup_update_reason_
= BACKUP_UPDATE_REASON_MAX
;
716 request_type_
= NO_REQUEST
;
717 update_list_data_
.clear();
718 delegate_
->UpdateFinished(success
);
719 ScheduleNextUpdate(back_off
);
722 GURL
SafeBrowsingProtocolManager::UpdateUrl() const {
723 std::string url
= SafeBrowsingProtocolManagerHelper::ComposeUrl(
724 url_prefix_
, "downloads", client_name_
, version_
, additional_query_
);
728 GURL
SafeBrowsingProtocolManager::BackupUpdateUrl(
729 BackupUpdateReason backup_update_reason
) const {
730 DCHECK(backup_update_reason
>= 0 &&
731 backup_update_reason
< BACKUP_UPDATE_REASON_MAX
);
732 DCHECK(!backup_url_prefixes_
[backup_update_reason
].empty());
733 std::string url
= SafeBrowsingProtocolManagerHelper::ComposeUrl(
734 backup_url_prefixes_
[backup_update_reason
], "downloads", client_name_
,
735 version_
, additional_query_
);
739 GURL
SafeBrowsingProtocolManager::GetHashUrl() const {
740 std::string url
= SafeBrowsingProtocolManagerHelper::ComposeUrl(
741 url_prefix_
, "gethash", client_name_
, version_
, additional_query_
);
745 GURL
SafeBrowsingProtocolManager::NextChunkUrl(const std::string
& url
) const {
746 DCHECK(CalledOnValidThread());
747 std::string next_url
;
748 if (!StartsWithASCII(url
, "http://", false) &&
749 !StartsWithASCII(url
, "https://", false)) {
750 // Use https if we updated via https, otherwise http (useful for testing).
751 if (StartsWithASCII(url_prefix_
, "https://", false))
752 next_url
.append("https://");
754 next_url
.append("http://");
755 next_url
.append(url
);
759 if (!additional_query_
.empty()) {
760 if (next_url
.find("?") != std::string::npos
) {
761 next_url
.append("&");
763 next_url
.append("?");
765 next_url
.append(additional_query_
);
767 return GURL(next_url
);
770 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails()
775 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails(
776 FullHashCallback callback
, bool is_download
)
777 : callback(callback
),
778 is_download(is_download
) {
781 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {
784 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {