1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "chrome/browser/safe_browsing/protocol_manager.h"
7 #include "base/environment.h"
8 #include "base/logging.h"
9 #include "base/memory/scoped_vector.h"
10 #include "base/metrics/histogram.h"
11 #include "base/rand_util.h"
12 #include "base/stl_util.h"
13 #include "base/strings/string_util.h"
14 #include "base/strings/stringprintf.h"
15 #include "base/timer/timer.h"
16 #include "chrome/browser/safe_browsing/protocol_parser.h"
17 #include "chrome/common/chrome_version_info.h"
18 #include "chrome/common/env_vars.h"
19 #include "google_apis/google_api_keys.h"
20 #include "net/base/escape.h"
21 #include "net/base/load_flags.h"
22 #include "net/base/net_errors.h"
23 #include "net/url_request/url_fetcher.h"
24 #include "net/url_request/url_request_context_getter.h"
25 #include "net/url_request/url_request_status.h"
27 #if defined(OS_ANDROID)
28 #include "net/base/network_change_notifier.h"
32 using base::TimeDelta
;
36 // UpdateResult indicates what happened with the primary and/or backup update
37 // requests. The ordering of the values must stay the same for UMA consistency,
38 // and is also ordered in this way to match ProtocolManager::BackupUpdateReason.
41 UPDATE_RESULT_SUCCESS
,
42 UPDATE_RESULT_BACKUP_CONNECT_FAIL
,
43 UPDATE_RESULT_BACKUP_CONNECT_SUCCESS
,
44 UPDATE_RESULT_BACKUP_HTTP_FAIL
,
45 UPDATE_RESULT_BACKUP_HTTP_SUCCESS
,
46 UPDATE_RESULT_BACKUP_NETWORK_FAIL
,
47 UPDATE_RESULT_BACKUP_NETWORK_SUCCESS
,
49 UPDATE_RESULT_BACKUP_START
= UPDATE_RESULT_BACKUP_CONNECT_FAIL
,
52 void RecordUpdateResult(UpdateResult result
) {
53 DCHECK(result
>= 0 && result
< UPDATE_RESULT_MAX
);
54 UMA_HISTOGRAM_ENUMERATION("SB2.UpdateResult", result
, UPDATE_RESULT_MAX
);
59 // Minimum time, in seconds, from start up before we must issue an update query.
60 static const int kSbTimerStartIntervalSecMin
= 60;
62 // Maximum time, in seconds, from start up before we must issue an update query.
63 static const int kSbTimerStartIntervalSecMax
= 300;
65 // The maximum time, in seconds, to wait for a response to an update request.
66 static const int kSbMaxUpdateWaitSec
= 30;
68 // Maximum back off multiplier.
69 static const size_t kSbMaxBackOff
= 8;
71 // The default SBProtocolManagerFactory.
72 class SBProtocolManagerFactoryImpl
: public SBProtocolManagerFactory
{
74 SBProtocolManagerFactoryImpl() { }
75 ~SBProtocolManagerFactoryImpl() override
{}
76 SafeBrowsingProtocolManager
* CreateProtocolManager(
77 SafeBrowsingProtocolManagerDelegate
* delegate
,
78 net::URLRequestContextGetter
* request_context_getter
,
79 const SafeBrowsingProtocolConfig
& config
) override
{
80 return new SafeBrowsingProtocolManager(
81 delegate
, request_context_getter
, config
);
84 DISALLOW_COPY_AND_ASSIGN(SBProtocolManagerFactoryImpl
);
87 // SafeBrowsingProtocolManager implementation ----------------------------------
90 SBProtocolManagerFactory
* SafeBrowsingProtocolManager::factory_
= NULL
;
93 SafeBrowsingProtocolManager
* SafeBrowsingProtocolManager::Create(
94 SafeBrowsingProtocolManagerDelegate
* delegate
,
95 net::URLRequestContextGetter
* request_context_getter
,
96 const SafeBrowsingProtocolConfig
& config
) {
98 factory_
= new SBProtocolManagerFactoryImpl();
99 return factory_
->CreateProtocolManager(
100 delegate
, request_context_getter
, config
);
103 SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
104 SafeBrowsingProtocolManagerDelegate
* delegate
,
105 net::URLRequestContextGetter
* request_context_getter
,
106 const SafeBrowsingProtocolConfig
& config
)
107 : delegate_(delegate
),
108 request_type_(NO_REQUEST
),
109 update_error_count_(0),
110 gethash_error_count_(0),
111 update_back_off_mult_(1),
112 gethash_back_off_mult_(1),
113 next_update_interval_(base::TimeDelta::FromSeconds(
114 base::RandInt(kSbTimerStartIntervalSecMin
,
115 kSbTimerStartIntervalSecMax
))),
116 update_state_(FIRST_REQUEST
),
117 chunk_pending_to_write_(false),
118 version_(config
.version
),
120 client_name_(config
.client_name
),
121 request_context_getter_(request_context_getter
),
122 url_prefix_(config
.url_prefix
),
123 backup_update_reason_(BACKUP_UPDATE_REASON_MAX
),
124 disable_auto_update_(config
.disable_auto_update
),
125 #if defined(OS_ANDROID)
126 disable_connection_check_(config
.disable_connection_check
),
129 app_in_foreground_(true) {
130 DCHECK(!url_prefix_
.empty());
132 backup_url_prefixes_
[BACKUP_UPDATE_REASON_CONNECT
] =
133 config
.backup_connect_error_url_prefix
;
134 backup_url_prefixes_
[BACKUP_UPDATE_REASON_HTTP
] =
135 config
.backup_http_error_url_prefix
;
136 backup_url_prefixes_
[BACKUP_UPDATE_REASON_NETWORK
] =
137 config
.backup_network_error_url_prefix
;
139 // Set the backoff multiplier fuzz to a random value between 0 and 1.
140 back_off_fuzz_
= static_cast<float>(base::RandDouble());
141 if (version_
.empty())
142 version_
= SafeBrowsingProtocolManagerHelper::Version();
146 void SafeBrowsingProtocolManager::RecordGetHashResult(
147 bool is_download
, ResultType result_type
) {
149 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type
,
150 GET_HASH_RESULT_MAX
);
152 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type
,
153 GET_HASH_RESULT_MAX
);
157 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const {
158 return update_timer_
.IsRunning();
161 SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
162 // Delete in-progress SafeBrowsing requests.
163 STLDeleteContainerPairFirstPointers(hash_requests_
.begin(),
164 hash_requests_
.end());
165 hash_requests_
.clear();
168 // We can only have one update or chunk request outstanding, but there may be
169 // multiple GetHash requests pending since we don't want to serialize them and
170 // slow down the user.
171 void SafeBrowsingProtocolManager::GetFullHash(
172 const std::vector
<SBPrefix
>& prefixes
,
173 FullHashCallback callback
,
175 DCHECK(CalledOnValidThread());
176 // If we are in GetHash backoff, we need to check if we're past the next
177 // allowed time. If we are, we can proceed with the request. If not, we are
178 // required to return empty results (i.e. treat the page as safe).
179 if (gethash_error_count_
&& Time::Now() <= next_gethash_time_
) {
180 RecordGetHashResult(is_download
, GET_HASH_BACKOFF_ERROR
);
181 std::vector
<SBFullHashResult
> full_hashes
;
182 callback
.Run(full_hashes
, base::TimeDelta());
185 GURL gethash_url
= GetHashUrl();
186 net::URLFetcher
* fetcher
= net::URLFetcher::Create(
187 url_fetcher_id_
++, gethash_url
, net::URLFetcher::POST
, this);
188 hash_requests_
[fetcher
] = FullHashDetails(callback
, is_download
);
190 const std::string get_hash
= safe_browsing::FormatGetHash(prefixes
);
192 fetcher
->SetLoadFlags(net::LOAD_DISABLE_CACHE
);
193 fetcher
->SetRequestContext(request_context_getter_
.get());
194 fetcher
->SetUploadData("text/plain", get_hash
);
198 void SafeBrowsingProtocolManager::GetNextUpdate() {
199 DCHECK(CalledOnValidThread());
200 if (request_
.get() || request_type_
!= NO_REQUEST
)
203 #if defined(OS_ANDROID)
204 if (!disable_connection_check_
) {
205 net::NetworkChangeNotifier::ConnectionType type
=
206 net::NetworkChangeNotifier::GetConnectionType();
207 if (type
!= net::NetworkChangeNotifier::CONNECTION_WIFI
) {
208 ScheduleNextUpdate(false /* no back off */);
214 IssueUpdateRequest();
217 // net::URLFetcherDelegate implementation ----------------------------------
219 // All SafeBrowsing request responses are handled here.
220 // TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
221 // chunk should retry the download and parse of that chunk (and
222 // what back off / how many times to try), and if that effects the
223 // update back off. For now, a failed parse of the chunk means we
224 // drop it. This isn't so bad because the next UPDATE_REQUEST we
225 // do will report all the chunks we have. If that chunk is still
226 // required, the SafeBrowsing servers will tell us to get it again.
227 void SafeBrowsingProtocolManager::OnURLFetchComplete(
228 const net::URLFetcher
* source
) {
229 DCHECK(CalledOnValidThread());
230 scoped_ptr
<const net::URLFetcher
> fetcher
;
232 HashRequests::iterator it
= hash_requests_
.find(source
);
233 if (it
!= hash_requests_
.end()) {
235 fetcher
.reset(it
->first
);
236 const FullHashDetails
& details
= it
->second
;
237 std::vector
<SBFullHashResult
> full_hashes
;
238 base::TimeDelta cache_lifetime
;
239 if (source
->GetStatus().is_success() &&
240 (source
->GetResponseCode() == 200 ||
241 source
->GetResponseCode() == 204)) {
242 // For tracking our GetHash false positive (204) rate, compared to real
244 if (source
->GetResponseCode() == 200)
245 RecordGetHashResult(details
.is_download
, GET_HASH_STATUS_200
);
247 RecordGetHashResult(details
.is_download
, GET_HASH_STATUS_204
);
249 gethash_error_count_
= 0;
250 gethash_back_off_mult_
= 1;
252 source
->GetResponseAsString(&data
);
253 if (!safe_browsing::ParseGetHash(
254 data
.data(), data
.length(), &cache_lifetime
, &full_hashes
)) {
256 RecordGetHashResult(details
.is_download
, GET_HASH_PARSE_ERROR
);
257 // TODO(cbentzel): Should cache_lifetime be set to 0 here? (See
258 // http://crbug.com/360232.)
261 HandleGetHashError(Time::Now());
262 if (source
->GetStatus().status() == net::URLRequestStatus::FAILED
) {
263 RecordGetHashResult(details
.is_download
, GET_HASH_NETWORK_ERROR
);
264 DVLOG(1) << "SafeBrowsing GetHash request for: " << source
->GetURL()
265 << " failed with error: " << source
->GetStatus().error();
267 RecordGetHashResult(details
.is_download
, GET_HASH_HTTP_ERROR
);
268 DVLOG(1) << "SafeBrowsing GetHash request for: " << source
->GetURL()
269 << " failed with error: " << source
->GetResponseCode();
273 // Invoke the callback with full_hashes, even if there was a parse error or
274 // an error response code (in which case full_hashes will be empty). The
275 // caller can't be blocked indefinitely.
276 details
.callback
.Run(full_hashes
, cache_lifetime
);
278 hash_requests_
.erase(it
);
280 // Update or chunk response.
281 fetcher
.reset(request_
.release());
283 if (request_type_
== UPDATE_REQUEST
||
284 request_type_
== BACKUP_UPDATE_REQUEST
) {
285 if (!fetcher
.get()) {
286 // We've timed out waiting for an update response, so we've cancelled
287 // the update request and scheduled a new one. Ignore this response.
291 // Cancel the update response timeout now that we have the response.
292 timeout_timer_
.Stop();
295 net::URLRequestStatus status
= source
->GetStatus();
296 if (status
.is_success() && source
->GetResponseCode() == 200) {
297 // We have data from the SafeBrowsing service.
299 source
->GetResponseAsString(&data
);
301 // TODO(shess): Cleanup the flow of this code so that |parsed_ok| can be
302 // removed or omitted.
303 const bool parsed_ok
= HandleServiceResponse(
304 source
->GetURL(), data
.data(), data
.length());
306 DVLOG(1) << "SafeBrowsing request for: " << source
->GetURL()
308 chunk_request_urls_
.clear();
309 if (request_type_
== UPDATE_REQUEST
&&
310 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_HTTP
)) {
313 UpdateFinished(false);
316 switch (request_type_
) {
319 chunk_request_urls_
.pop_front();
320 if (chunk_request_urls_
.empty() && !chunk_pending_to_write_
)
321 UpdateFinished(true);
325 case BACKUP_UPDATE_REQUEST
:
326 if (chunk_request_urls_
.empty() && parsed_ok
) {
327 // We are up to date since the servers gave us nothing new, so we
328 // are done with this update cycle.
329 UpdateFinished(true);
333 // This can happen if HandleServiceResponse fails above.
340 if (status
.status() == net::URLRequestStatus::FAILED
) {
341 DVLOG(1) << "SafeBrowsing request for: " << source
->GetURL()
342 << " failed with error: " << source
->GetStatus().error();
344 DVLOG(1) << "SafeBrowsing request for: " << source
->GetURL()
345 << " failed with error: " << source
->GetResponseCode();
347 if (request_type_
== CHUNK_REQUEST
) {
348 // The SafeBrowsing service error, or very bad response code: back off.
349 chunk_request_urls_
.clear();
350 } else if (request_type_
== UPDATE_REQUEST
) {
351 BackupUpdateReason backup_update_reason
= BACKUP_UPDATE_REASON_MAX
;
352 if (status
.is_success()) {
353 backup_update_reason
= BACKUP_UPDATE_REASON_HTTP
;
355 switch (status
.error()) {
356 case net::ERR_INTERNET_DISCONNECTED
:
357 case net::ERR_NETWORK_CHANGED
:
358 backup_update_reason
= BACKUP_UPDATE_REASON_NETWORK
;
361 backup_update_reason
= BACKUP_UPDATE_REASON_CONNECT
;
365 if (backup_update_reason
!= BACKUP_UPDATE_REASON_MAX
&&
366 IssueBackupUpdateRequest(backup_update_reason
)) {
370 UpdateFinished(false);
374 // Get the next chunk if available.
378 bool SafeBrowsingProtocolManager::HandleServiceResponse(
379 const GURL
& url
, const char* data
, size_t length
) {
380 DCHECK(CalledOnValidThread());
382 switch (request_type_
) {
384 case BACKUP_UPDATE_REQUEST
: {
385 size_t next_update_sec
= 0;
387 scoped_ptr
<std::vector
<SBChunkDelete
> > chunk_deletes(
388 new std::vector
<SBChunkDelete
>);
389 std::vector
<ChunkUrl
> chunk_urls
;
390 if (!safe_browsing::ParseUpdate(data
, length
, &next_update_sec
, &reset
,
391 chunk_deletes
.get(), &chunk_urls
)) {
395 base::TimeDelta next_update_interval
=
396 base::TimeDelta::FromSeconds(next_update_sec
);
397 last_update_
= Time::Now();
399 if (update_state_
== FIRST_REQUEST
)
400 update_state_
= SECOND_REQUEST
;
401 else if (update_state_
== SECOND_REQUEST
)
402 update_state_
= NORMAL_REQUEST
;
404 // New time for the next update.
405 if (next_update_interval
> base::TimeDelta()) {
406 next_update_interval_
= next_update_interval
;
407 } else if (update_state_
== SECOND_REQUEST
) {
408 next_update_interval_
= base::TimeDelta::FromSeconds(
409 base::RandInt(15, 45));
412 // New chunks to download.
413 if (!chunk_urls
.empty()) {
414 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls
.size());
415 for (size_t i
= 0; i
< chunk_urls
.size(); ++i
)
416 chunk_request_urls_
.push_back(chunk_urls
[i
]);
419 // Handle the case were the SafeBrowsing service tells us to dump our
422 delegate_
->ResetDatabase();
426 // Chunks to delete from our storage.
427 if (!chunk_deletes
->empty())
428 delegate_
->DeleteChunks(chunk_deletes
.Pass());
432 case CHUNK_REQUEST
: {
433 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
434 base::Time::Now() - chunk_request_start_
);
436 const ChunkUrl chunk_url
= chunk_request_urls_
.front();
437 scoped_ptr
<ScopedVector
<SBChunkData
> >
438 chunks(new ScopedVector
<SBChunkData
>);
439 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length
);
440 update_size_
+= length
;
441 if (!safe_browsing::ParseChunk(data
, length
, chunks
.get()))
444 // Chunks to add to storage. Pass ownership of |chunks|.
445 if (!chunks
->empty()) {
446 chunk_pending_to_write_
= true;
447 delegate_
->AddChunks(
448 chunk_url
.list_name
, chunks
.Pass(),
449 base::Bind(&SafeBrowsingProtocolManager::OnAddChunksComplete
,
450 base::Unretained(this)));
463 void SafeBrowsingProtocolManager::Initialize() {
464 DCHECK(CalledOnValidThread());
465 // Don't want to hit the safe browsing servers on build/chrome bots.
466 scoped_ptr
<base::Environment
> env(base::Environment::Create());
467 if (env
->HasVar(env_vars::kHeadless
))
469 ScheduleNextUpdate(false /* no back off */);
472 void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off
) {
473 DCHECK(CalledOnValidThread());
474 if (disable_auto_update_
) {
475 // Unschedule any current timer.
476 update_timer_
.Stop();
479 // Reschedule with the new update.
480 base::TimeDelta next_update_interval
= GetNextUpdateInterval(back_off
);
481 ForceScheduleNextUpdate(next_update_interval
);
484 void SafeBrowsingProtocolManager::ForceScheduleNextUpdate(
485 base::TimeDelta interval
) {
486 DCHECK(CalledOnValidThread());
487 DCHECK(interval
>= base::TimeDelta());
488 // Unschedule any current timer.
489 update_timer_
.Stop();
490 update_timer_
.Start(FROM_HERE
, interval
, this,
491 &SafeBrowsingProtocolManager::GetNextUpdate
);
494 // According to section 5 of the SafeBrowsing protocol specification, we must
495 // back off after a certain number of errors. We only change |next_update_sec_|
496 // when we receive a response from the SafeBrowsing service.
497 base::TimeDelta
SafeBrowsingProtocolManager::GetNextUpdateInterval(
499 DCHECK(CalledOnValidThread());
500 DCHECK(next_update_interval_
> base::TimeDelta());
501 base::TimeDelta next
= next_update_interval_
;
503 next
= GetNextBackOffInterval(&update_error_count_
, &update_back_off_mult_
);
505 // Successful response means error reset.
506 update_error_count_
= 0;
507 update_back_off_mult_
= 1;
512 base::TimeDelta
SafeBrowsingProtocolManager::GetNextBackOffInterval(
513 size_t* error_count
, size_t* multiplier
) const {
514 DCHECK(CalledOnValidThread());
515 DCHECK(multiplier
&& error_count
);
517 if (*error_count
> 1 && *error_count
< 6) {
518 base::TimeDelta next
= base::TimeDelta::FromMinutes(
519 *multiplier
* (1 + back_off_fuzz_
) * 30);
521 if (*multiplier
> kSbMaxBackOff
)
522 *multiplier
= kSbMaxBackOff
;
525 if (*error_count
>= 6)
526 return base::TimeDelta::FromHours(8);
527 return base::TimeDelta::FromMinutes(1);
530 // This request requires getting a list of all the chunks for each list from the
531 // database asynchronously. The request will be issued when we're called back in
532 // OnGetChunksComplete.
533 // TODO(paulg): We should get this at start up and maintain a ChunkRange cache
534 // to avoid hitting the database with each update request. On the
535 // otherhand, this request will only occur ~20-30 minutes so there
536 // isn't that much overhead. Measure!
537 void SafeBrowsingProtocolManager::IssueUpdateRequest() {
538 DCHECK(CalledOnValidThread());
539 request_type_
= UPDATE_REQUEST
;
540 delegate_
->UpdateStarted();
541 delegate_
->GetChunks(
542 base::Bind(&SafeBrowsingProtocolManager::OnGetChunksComplete
,
543 base::Unretained(this)));
546 // The backup request can run immediately since the chunks have already been
547 // retrieved from the DB.
548 bool SafeBrowsingProtocolManager::IssueBackupUpdateRequest(
549 BackupUpdateReason backup_update_reason
) {
550 DCHECK(CalledOnValidThread());
551 DCHECK_EQ(request_type_
, UPDATE_REQUEST
);
552 DCHECK(backup_update_reason
>= 0 &&
553 backup_update_reason
< BACKUP_UPDATE_REASON_MAX
);
554 if (backup_url_prefixes_
[backup_update_reason
].empty())
556 request_type_
= BACKUP_UPDATE_REQUEST
;
557 backup_update_reason_
= backup_update_reason
;
559 GURL backup_update_url
= BackupUpdateUrl(backup_update_reason
);
560 request_
.reset(net::URLFetcher::Create(
561 url_fetcher_id_
++, backup_update_url
, net::URLFetcher::POST
, this));
562 request_
->SetLoadFlags(net::LOAD_DISABLE_CACHE
);
563 request_
->SetRequestContext(request_context_getter_
.get());
564 request_
->SetUploadData("text/plain", update_list_data_
);
567 // Begin the update request timeout.
568 timeout_timer_
.Start(FROM_HERE
, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec
),
570 &SafeBrowsingProtocolManager::UpdateResponseTimeout
);
575 void SafeBrowsingProtocolManager::IssueChunkRequest() {
576 DCHECK(CalledOnValidThread());
577 // We are only allowed to have one request outstanding at any time. Also,
578 // don't get the next url until the previous one has been written to disk so
579 // that we don't use too much memory.
580 if (request_
.get() || chunk_request_urls_
.empty() || chunk_pending_to_write_
)
583 ChunkUrl next_chunk
= chunk_request_urls_
.front();
584 DCHECK(!next_chunk
.url
.empty());
585 GURL chunk_url
= NextChunkUrl(next_chunk
.url
);
586 request_type_
= CHUNK_REQUEST
;
587 request_
.reset(net::URLFetcher::Create(
588 url_fetcher_id_
++, chunk_url
, net::URLFetcher::GET
, this));
589 request_
->SetLoadFlags(net::LOAD_DISABLE_CACHE
);
590 request_
->SetRequestContext(request_context_getter_
.get());
591 chunk_request_start_
= base::Time::Now();
595 void SafeBrowsingProtocolManager::OnGetChunksComplete(
596 const std::vector
<SBListChunkRanges
>& lists
, bool database_error
) {
597 DCHECK(CalledOnValidThread());
598 DCHECK_EQ(request_type_
, UPDATE_REQUEST
);
599 DCHECK(update_list_data_
.empty());
600 if (database_error
) {
601 // The update was not successful, but don't back off.
602 UpdateFinished(false, false);
606 // Format our stored chunks:
607 bool found_malware
= false;
608 bool found_phishing
= false;
609 for (size_t i
= 0; i
< lists
.size(); ++i
) {
610 update_list_data_
.append(safe_browsing::FormatList(lists
[i
]));
611 if (lists
[i
].name
== safe_browsing_util::kPhishingList
)
612 found_phishing
= true;
614 if (lists
[i
].name
== safe_browsing_util::kMalwareList
)
615 found_malware
= true;
618 // If we have an empty database, let the server know we want data for these
620 // TODO(shess): These cases never happen because the database fills in the
621 // lists in GetChunks(). Refactor the unit tests so that this code can be
623 if (!found_phishing
) {
624 update_list_data_
.append(safe_browsing::FormatList(
625 SBListChunkRanges(safe_browsing_util::kPhishingList
)));
627 if (!found_malware
) {
628 update_list_data_
.append(safe_browsing::FormatList(
629 SBListChunkRanges(safe_browsing_util::kMalwareList
)));
632 // Large requests are (probably) a sign of database corruption.
633 // Record stats to inform decisions about whether to automate
634 // deletion of such databases. http://crbug.com/120219
635 UMA_HISTOGRAM_COUNTS("SB2.UpdateRequestSize", update_list_data_
.size());
637 GURL update_url
= UpdateUrl();
638 request_
.reset(net::URLFetcher::Create(
639 url_fetcher_id_
++, update_url
, net::URLFetcher::POST
, this));
640 request_
->SetLoadFlags(net::LOAD_DISABLE_CACHE
);
641 request_
->SetRequestContext(request_context_getter_
.get());
642 request_
->SetUploadData("text/plain", update_list_data_
);
645 // Begin the update request timeout.
646 timeout_timer_
.Start(FROM_HERE
, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec
),
648 &SafeBrowsingProtocolManager::UpdateResponseTimeout
);
651 // If we haven't heard back from the server with an update response, this method
652 // will run. Close the current update session and schedule another update.
653 void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
654 DCHECK(CalledOnValidThread());
655 DCHECK(request_type_
== UPDATE_REQUEST
||
656 request_type_
== BACKUP_UPDATE_REQUEST
);
658 if (request_type_
== UPDATE_REQUEST
&&
659 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_CONNECT
)) {
662 UpdateFinished(false);
665 void SafeBrowsingProtocolManager::OnAddChunksComplete() {
666 DCHECK(CalledOnValidThread());
667 chunk_pending_to_write_
= false;
669 if (chunk_request_urls_
.empty()) {
670 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_
);
671 UpdateFinished(true);
677 void SafeBrowsingProtocolManager::HandleGetHashError(const Time
& now
) {
678 DCHECK(CalledOnValidThread());
679 base::TimeDelta next
= GetNextBackOffInterval(
680 &gethash_error_count_
, &gethash_back_off_mult_
);
681 next_gethash_time_
= now
+ next
;
684 void SafeBrowsingProtocolManager::UpdateFinished(bool success
) {
685 UpdateFinished(success
, !success
);
688 void SafeBrowsingProtocolManager::UpdateFinished(bool success
, bool back_off
) {
689 DCHECK(CalledOnValidThread());
690 #if defined(OS_ANDROID)
691 if (app_in_foreground_
)
692 UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeForeground", update_size_
);
694 UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeBackground", update_size_
);
696 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_
);
698 bool update_success
= success
|| request_type_
== CHUNK_REQUEST
;
699 if (backup_update_reason_
== BACKUP_UPDATE_REASON_MAX
) {
701 update_success
? UPDATE_RESULT_SUCCESS
: UPDATE_RESULT_FAIL
);
703 UpdateResult update_result
= static_cast<UpdateResult
>(
704 UPDATE_RESULT_BACKUP_START
+
705 (static_cast<int>(backup_update_reason_
) * 2) +
707 RecordUpdateResult(update_result
);
709 backup_update_reason_
= BACKUP_UPDATE_REASON_MAX
;
710 request_type_
= NO_REQUEST
;
711 update_list_data_
.clear();
712 delegate_
->UpdateFinished(success
);
713 ScheduleNextUpdate(back_off
);
716 GURL
SafeBrowsingProtocolManager::UpdateUrl() const {
717 std::string url
= SafeBrowsingProtocolManagerHelper::ComposeUrl(
718 url_prefix_
, "downloads", client_name_
, version_
, additional_query_
);
722 GURL
SafeBrowsingProtocolManager::BackupUpdateUrl(
723 BackupUpdateReason backup_update_reason
) const {
724 DCHECK(backup_update_reason
>= 0 &&
725 backup_update_reason
< BACKUP_UPDATE_REASON_MAX
);
726 DCHECK(!backup_url_prefixes_
[backup_update_reason
].empty());
727 std::string url
= SafeBrowsingProtocolManagerHelper::ComposeUrl(
728 backup_url_prefixes_
[backup_update_reason
], "downloads", client_name_
,
729 version_
, additional_query_
);
733 GURL
SafeBrowsingProtocolManager::GetHashUrl() const {
734 std::string url
= SafeBrowsingProtocolManagerHelper::ComposeUrl(
735 url_prefix_
, "gethash", client_name_
, version_
, additional_query_
);
739 GURL
SafeBrowsingProtocolManager::NextChunkUrl(const std::string
& url
) const {
740 DCHECK(CalledOnValidThread());
741 std::string next_url
;
742 if (!StartsWithASCII(url
, "http://", false) &&
743 !StartsWithASCII(url
, "https://", false)) {
744 // Use https if we updated via https, otherwise http (useful for testing).
745 if (StartsWithASCII(url_prefix_
, "https://", false))
746 next_url
.append("https://");
748 next_url
.append("http://");
749 next_url
.append(url
);
753 if (!additional_query_
.empty()) {
754 if (next_url
.find("?") != std::string::npos
) {
755 next_url
.append("&");
757 next_url
.append("?");
759 next_url
.append(additional_query_
);
761 return GURL(next_url
);
764 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails()
769 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails(
770 FullHashCallback callback
, bool is_download
)
771 : callback(callback
),
772 is_download(is_download
) {
775 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {
778 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {