1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "chrome/browser/safe_browsing/protocol_manager.h"
7 #include "base/environment.h"
8 #include "base/logging.h"
9 #include "base/memory/scoped_vector.h"
10 #include "base/metrics/histogram.h"
11 #include "base/metrics/sparse_histogram.h"
12 #include "base/profiler/scoped_tracker.h"
13 #include "base/rand_util.h"
14 #include "base/stl_util.h"
15 #include "base/strings/string_util.h"
16 #include "base/strings/stringprintf.h"
17 #include "base/timer/timer.h"
18 #include "chrome/browser/safe_browsing/protocol_parser.h"
19 #include "chrome/common/env_vars.h"
20 #include "google_apis/google_api_keys.h"
21 #include "net/base/escape.h"
22 #include "net/base/load_flags.h"
23 #include "net/base/net_errors.h"
24 #include "net/http/http_response_headers.h"
25 #include "net/http/http_status_code.h"
26 #include "net/url_request/url_fetcher.h"
27 #include "net/url_request/url_request_context_getter.h"
28 #include "net/url_request/url_request_status.h"
30 #if defined(OS_ANDROID)
31 #include "net/base/network_change_notifier.h"
35 using base::TimeDelta
;
39 // UpdateResult indicates what happened with the primary and/or backup update
40 // requests. The ordering of the values must stay the same for UMA consistency,
41 // and is also ordered in this way to match ProtocolManager::BackupUpdateReason.
44 UPDATE_RESULT_SUCCESS
,
45 UPDATE_RESULT_BACKUP_CONNECT_FAIL
,
46 UPDATE_RESULT_BACKUP_CONNECT_SUCCESS
,
47 UPDATE_RESULT_BACKUP_HTTP_FAIL
,
48 UPDATE_RESULT_BACKUP_HTTP_SUCCESS
,
49 UPDATE_RESULT_BACKUP_NETWORK_FAIL
,
50 UPDATE_RESULT_BACKUP_NETWORK_SUCCESS
,
52 UPDATE_RESULT_BACKUP_START
= UPDATE_RESULT_BACKUP_CONNECT_FAIL
,
55 void RecordUpdateResult(UpdateResult result
) {
56 DCHECK(result
>= 0 && result
< UPDATE_RESULT_MAX
);
57 UMA_HISTOGRAM_ENUMERATION("SB2.UpdateResult", result
, UPDATE_RESULT_MAX
);
62 // Minimum time, in seconds, from start up before we must issue an update query.
63 static const int kSbTimerStartIntervalSecMin
= 60;
65 // Maximum time, in seconds, from start up before we must issue an update query.
66 static const int kSbTimerStartIntervalSecMax
= 300;
68 // The maximum time, in seconds, to wait for a response to an update request.
69 static const int kSbMaxUpdateWaitSec
= 30;
71 // Maximum back off multiplier.
72 static const size_t kSbMaxBackOff
= 8;
74 const char kUmaHashResponseMetric
[] = "SB2.GetHashResponseOrErrorCode";
76 // The default SBProtocolManagerFactory.
77 class SBProtocolManagerFactoryImpl
: public SBProtocolManagerFactory
{
79 SBProtocolManagerFactoryImpl() { }
80 ~SBProtocolManagerFactoryImpl() override
{}
81 SafeBrowsingProtocolManager
* CreateProtocolManager(
82 SafeBrowsingProtocolManagerDelegate
* delegate
,
83 net::URLRequestContextGetter
* request_context_getter
,
84 const SafeBrowsingProtocolConfig
& config
) override
{
85 return new SafeBrowsingProtocolManager(
86 delegate
, request_context_getter
, config
);
89 DISALLOW_COPY_AND_ASSIGN(SBProtocolManagerFactoryImpl
);
92 // SafeBrowsingProtocolManager implementation ----------------------------------
95 SBProtocolManagerFactory
* SafeBrowsingProtocolManager::factory_
= NULL
;
98 SafeBrowsingProtocolManager
* SafeBrowsingProtocolManager::Create(
99 SafeBrowsingProtocolManagerDelegate
* delegate
,
100 net::URLRequestContextGetter
* request_context_getter
,
101 const SafeBrowsingProtocolConfig
& config
) {
102 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/483689 is fixed.
103 tracked_objects::ScopedTracker
tracking_profile(
104 FROM_HERE_WITH_EXPLICIT_FUNCTION(
105 "483689 SafeBrowsingProtocolManager::Create"));
107 factory_
= new SBProtocolManagerFactoryImpl();
108 return factory_
->CreateProtocolManager(
109 delegate
, request_context_getter
, config
);
112 SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
113 SafeBrowsingProtocolManagerDelegate
* delegate
,
114 net::URLRequestContextGetter
* request_context_getter
,
115 const SafeBrowsingProtocolConfig
& config
)
116 : delegate_(delegate
),
117 request_type_(NO_REQUEST
),
118 update_error_count_(0),
119 gethash_error_count_(0),
120 update_back_off_mult_(1),
121 gethash_back_off_mult_(1),
122 next_update_interval_(base::TimeDelta::FromSeconds(
123 base::RandInt(kSbTimerStartIntervalSecMin
,
124 kSbTimerStartIntervalSecMax
))),
125 update_state_(FIRST_REQUEST
),
126 chunk_pending_to_write_(false),
127 version_(config
.version
),
129 client_name_(config
.client_name
),
130 request_context_getter_(request_context_getter
),
131 url_prefix_(config
.url_prefix
),
132 backup_update_reason_(BACKUP_UPDATE_REASON_MAX
),
133 disable_auto_update_(config
.disable_auto_update
),
134 #if defined(OS_ANDROID)
135 disable_connection_check_(config
.disable_connection_check
),
138 app_in_foreground_(true) {
139 DCHECK(!url_prefix_
.empty());
141 backup_url_prefixes_
[BACKUP_UPDATE_REASON_CONNECT
] =
142 config
.backup_connect_error_url_prefix
;
143 backup_url_prefixes_
[BACKUP_UPDATE_REASON_HTTP
] =
144 config
.backup_http_error_url_prefix
;
145 backup_url_prefixes_
[BACKUP_UPDATE_REASON_NETWORK
] =
146 config
.backup_network_error_url_prefix
;
148 // Set the backoff multiplier fuzz to a random value between 0 and 1.
149 back_off_fuzz_
= static_cast<float>(base::RandDouble());
150 if (version_
.empty())
151 version_
= SafeBrowsingProtocolManagerHelper::Version();
155 void SafeBrowsingProtocolManager::RecordGetHashResult(
156 bool is_download
, ResultType result_type
) {
158 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type
,
159 GET_HASH_RESULT_MAX
);
161 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type
,
162 GET_HASH_RESULT_MAX
);
166 void SafeBrowsingProtocolManager::RecordGetHashResponseOrErrorCode(
167 net::URLRequestStatus status
, int response_code
) {
168 UMA_HISTOGRAM_SPARSE_SLOWLY(
169 kUmaHashResponseMetric
,
170 status
.is_success() ? response_code
: status
.error());
173 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const {
174 return update_timer_
.IsRunning();
177 SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
178 // Delete in-progress SafeBrowsing requests.
179 STLDeleteContainerPairFirstPointers(hash_requests_
.begin(),
180 hash_requests_
.end());
181 hash_requests_
.clear();
184 // We can only have one update or chunk request outstanding, but there may be
185 // multiple GetHash requests pending since we don't want to serialize them and
186 // slow down the user.
187 void SafeBrowsingProtocolManager::GetFullHash(
188 const std::vector
<SBPrefix
>& prefixes
,
189 FullHashCallback callback
,
191 bool is_extended_reporting
) {
192 DCHECK(CalledOnValidThread());
193 // If we are in GetHash backoff, we need to check if we're past the next
194 // allowed time. If we are, we can proceed with the request. If not, we are
195 // required to return empty results (i.e. treat the page as safe).
196 if (gethash_error_count_
&& Time::Now() <= next_gethash_time_
) {
197 RecordGetHashResult(is_download
, GET_HASH_BACKOFF_ERROR
);
198 std::vector
<SBFullHashResult
> full_hashes
;
199 callback
.Run(full_hashes
, base::TimeDelta());
202 GURL gethash_url
= GetHashUrl(is_extended_reporting
);
203 net::URLFetcher
* fetcher
=
204 net::URLFetcher::Create(url_fetcher_id_
++, gethash_url
,
205 net::URLFetcher::POST
, this).release();
206 hash_requests_
[fetcher
] = FullHashDetails(callback
, is_download
);
208 const std::string get_hash
= safe_browsing::FormatGetHash(prefixes
);
210 fetcher
->SetLoadFlags(net::LOAD_DISABLE_CACHE
);
211 fetcher
->SetRequestContext(request_context_getter_
.get());
212 fetcher
->SetUploadData("text/plain", get_hash
);
216 void SafeBrowsingProtocolManager::GetNextUpdate() {
217 DCHECK(CalledOnValidThread());
218 if (request_
.get() || request_type_
!= NO_REQUEST
)
221 #if defined(OS_ANDROID)
222 if (!disable_connection_check_
) {
223 net::NetworkChangeNotifier::ConnectionType type
=
224 net::NetworkChangeNotifier::GetConnectionType();
225 if (type
!= net::NetworkChangeNotifier::CONNECTION_WIFI
) {
226 ScheduleNextUpdate(false /* no back off */);
232 IssueUpdateRequest();
235 // net::URLFetcherDelegate implementation ----------------------------------
237 // All SafeBrowsing request responses are handled here.
238 // TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
239 // chunk should retry the download and parse of that chunk (and
240 // what back off / how many times to try), and if that effects the
241 // update back off. For now, a failed parse of the chunk means we
242 // drop it. This isn't so bad because the next UPDATE_REQUEST we
243 // do will report all the chunks we have. If that chunk is still
244 // required, the SafeBrowsing servers will tell us to get it again.
245 void SafeBrowsingProtocolManager::OnURLFetchComplete(
246 const net::URLFetcher
* source
) {
247 DCHECK(CalledOnValidThread());
248 scoped_ptr
<const net::URLFetcher
> fetcher
;
250 HashRequests::iterator it
= hash_requests_
.find(source
);
251 int response_code
= source
->GetResponseCode();
252 net::URLRequestStatus status
= source
->GetStatus();
253 RecordGetHashResponseOrErrorCode(status
, response_code
);
254 if (it
!= hash_requests_
.end()) {
256 fetcher
.reset(it
->first
);
257 const FullHashDetails
& details
= it
->second
;
258 std::vector
<SBFullHashResult
> full_hashes
;
259 base::TimeDelta cache_lifetime
;
260 if (status
.is_success() &&
261 (response_code
== net::HTTP_OK
||
262 response_code
== net::HTTP_NO_CONTENT
)) {
263 // For tracking our GetHash false positive (net::HTTP_NO_CONTENT) rate,
264 // compared to real (net::HTTP_OK) responses.
265 if (response_code
== net::HTTP_OK
)
266 RecordGetHashResult(details
.is_download
, GET_HASH_STATUS_200
);
268 RecordGetHashResult(details
.is_download
, GET_HASH_STATUS_204
);
270 gethash_error_count_
= 0;
271 gethash_back_off_mult_
= 1;
273 source
->GetResponseAsString(&data
);
274 if (!safe_browsing::ParseGetHash(
275 data
.data(), data
.length(), &cache_lifetime
, &full_hashes
)) {
277 RecordGetHashResult(details
.is_download
, GET_HASH_PARSE_ERROR
);
278 // TODO(cbentzel): Should cache_lifetime be set to 0 here? (See
279 // http://crbug.com/360232.)
282 HandleGetHashError(Time::Now());
283 if (status
.status() == net::URLRequestStatus::FAILED
) {
284 RecordGetHashResult(details
.is_download
, GET_HASH_NETWORK_ERROR
);
285 DVLOG(1) << "SafeBrowsing GetHash request for: " << source
->GetURL()
286 << " failed with error: " << status
.error();
288 RecordGetHashResult(details
.is_download
, GET_HASH_HTTP_ERROR
);
289 DVLOG(1) << "SafeBrowsing GetHash request for: " << source
->GetURL()
290 << " failed with error: " << response_code
;
294 // Invoke the callback with full_hashes, even if there was a parse error or
295 // an error response code (in which case full_hashes will be empty). The
296 // caller can't be blocked indefinitely.
297 details
.callback
.Run(full_hashes
, cache_lifetime
);
299 hash_requests_
.erase(it
);
301 // Update or chunk response.
302 fetcher
.reset(request_
.release());
304 if (request_type_
== UPDATE_REQUEST
||
305 request_type_
== BACKUP_UPDATE_REQUEST
) {
306 if (!fetcher
.get()) {
307 // We've timed out waiting for an update response, so we've cancelled
308 // the update request and scheduled a new one. Ignore this response.
312 // Cancel the update response timeout now that we have the response.
313 timeout_timer_
.Stop();
316 if (status
.is_success() && response_code
== net::HTTP_OK
) {
317 // We have data from the SafeBrowsing service.
319 source
->GetResponseAsString(&data
);
321 // TODO(shess): Cleanup the flow of this code so that |parsed_ok| can be
322 // removed or omitted.
323 const bool parsed_ok
= HandleServiceResponse(
324 source
->GetURL(), data
.data(), data
.length());
326 DVLOG(1) << "SafeBrowsing request for: " << source
->GetURL()
328 chunk_request_urls_
.clear();
329 if (request_type_
== UPDATE_REQUEST
&&
330 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_HTTP
)) {
333 UpdateFinished(false);
336 switch (request_type_
) {
339 chunk_request_urls_
.pop_front();
340 if (chunk_request_urls_
.empty() && !chunk_pending_to_write_
)
341 UpdateFinished(true);
345 case BACKUP_UPDATE_REQUEST
:
346 if (chunk_request_urls_
.empty() && parsed_ok
) {
347 // We are up to date since the servers gave us nothing new, so we
348 // are done with this update cycle.
349 UpdateFinished(true);
353 // This can happen if HandleServiceResponse fails above.
360 if (status
.status() == net::URLRequestStatus::FAILED
) {
361 DVLOG(1) << "SafeBrowsing request for: " << source
->GetURL()
362 << " failed with error: " << status
.error();
364 DVLOG(1) << "SafeBrowsing request for: " << source
->GetURL()
365 << " failed with error: " << response_code
;
367 if (request_type_
== CHUNK_REQUEST
) {
368 // The SafeBrowsing service error, or very bad response code: back off.
369 chunk_request_urls_
.clear();
370 } else if (request_type_
== UPDATE_REQUEST
) {
371 BackupUpdateReason backup_update_reason
= BACKUP_UPDATE_REASON_MAX
;
372 if (status
.is_success()) {
373 backup_update_reason
= BACKUP_UPDATE_REASON_HTTP
;
375 switch (status
.error()) {
376 case net::ERR_INTERNET_DISCONNECTED
:
377 case net::ERR_NETWORK_CHANGED
:
378 backup_update_reason
= BACKUP_UPDATE_REASON_NETWORK
;
381 backup_update_reason
= BACKUP_UPDATE_REASON_CONNECT
;
385 if (backup_update_reason
!= BACKUP_UPDATE_REASON_MAX
&&
386 IssueBackupUpdateRequest(backup_update_reason
)) {
390 UpdateFinished(false);
394 // Get the next chunk if available.
398 bool SafeBrowsingProtocolManager::HandleServiceResponse(
399 const GURL
& url
, const char* data
, size_t length
) {
400 DCHECK(CalledOnValidThread());
402 switch (request_type_
) {
404 case BACKUP_UPDATE_REQUEST
: {
405 size_t next_update_sec
= 0;
407 scoped_ptr
<std::vector
<SBChunkDelete
> > chunk_deletes(
408 new std::vector
<SBChunkDelete
>);
409 std::vector
<ChunkUrl
> chunk_urls
;
410 if (!safe_browsing::ParseUpdate(data
, length
, &next_update_sec
, &reset
,
411 chunk_deletes
.get(), &chunk_urls
)) {
415 base::TimeDelta next_update_interval
=
416 base::TimeDelta::FromSeconds(next_update_sec
);
417 last_update_
= Time::Now();
419 if (update_state_
== FIRST_REQUEST
)
420 update_state_
= SECOND_REQUEST
;
421 else if (update_state_
== SECOND_REQUEST
)
422 update_state_
= NORMAL_REQUEST
;
424 // New time for the next update.
425 if (next_update_interval
> base::TimeDelta()) {
426 next_update_interval_
= next_update_interval
;
427 } else if (update_state_
== SECOND_REQUEST
) {
428 next_update_interval_
= base::TimeDelta::FromSeconds(
429 base::RandInt(15, 45));
432 // New chunks to download.
433 if (!chunk_urls
.empty()) {
434 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls
.size());
435 for (size_t i
= 0; i
< chunk_urls
.size(); ++i
)
436 chunk_request_urls_
.push_back(chunk_urls
[i
]);
439 // Handle the case were the SafeBrowsing service tells us to dump our
442 delegate_
->ResetDatabase();
446 // Chunks to delete from our storage.
447 if (!chunk_deletes
->empty())
448 delegate_
->DeleteChunks(chunk_deletes
.Pass());
452 case CHUNK_REQUEST
: {
453 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
454 base::Time::Now() - chunk_request_start_
);
456 const ChunkUrl chunk_url
= chunk_request_urls_
.front();
457 scoped_ptr
<ScopedVector
<SBChunkData
> >
458 chunks(new ScopedVector
<SBChunkData
>);
459 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length
);
460 update_size_
+= length
;
461 if (!safe_browsing::ParseChunk(data
, length
, chunks
.get()))
464 // Chunks to add to storage. Pass ownership of |chunks|.
465 if (!chunks
->empty()) {
466 chunk_pending_to_write_
= true;
467 delegate_
->AddChunks(
468 chunk_url
.list_name
, chunks
.Pass(),
469 base::Bind(&SafeBrowsingProtocolManager::OnAddChunksComplete
,
470 base::Unretained(this)));
483 void SafeBrowsingProtocolManager::Initialize() {
484 DCHECK(CalledOnValidThread());
485 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/483689 is fixed.
486 tracked_objects::ScopedTracker
tracking_profile(
487 FROM_HERE_WITH_EXPLICIT_FUNCTION(
488 "483689 SafeBrowsingProtocolManager::Initialize"));
489 // Don't want to hit the safe browsing servers on build/chrome bots.
490 scoped_ptr
<base::Environment
> env(base::Environment::Create());
491 if (env
->HasVar(env_vars::kHeadless
))
493 ScheduleNextUpdate(false /* no back off */);
496 void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off
) {
497 DCHECK(CalledOnValidThread());
498 if (disable_auto_update_
) {
499 // Unschedule any current timer.
500 update_timer_
.Stop();
503 // Reschedule with the new update.
504 base::TimeDelta next_update_interval
= GetNextUpdateInterval(back_off
);
505 ForceScheduleNextUpdate(next_update_interval
);
508 void SafeBrowsingProtocolManager::ForceScheduleNextUpdate(
509 base::TimeDelta interval
) {
510 DCHECK(CalledOnValidThread());
511 DCHECK(interval
>= base::TimeDelta());
512 // Unschedule any current timer.
513 update_timer_
.Stop();
514 update_timer_
.Start(FROM_HERE
, interval
, this,
515 &SafeBrowsingProtocolManager::GetNextUpdate
);
518 // According to section 5 of the SafeBrowsing protocol specification, we must
519 // back off after a certain number of errors. We only change |next_update_sec_|
520 // when we receive a response from the SafeBrowsing service.
521 base::TimeDelta
SafeBrowsingProtocolManager::GetNextUpdateInterval(
523 DCHECK(CalledOnValidThread());
524 DCHECK(next_update_interval_
> base::TimeDelta());
525 base::TimeDelta next
= next_update_interval_
;
527 next
= GetNextBackOffInterval(&update_error_count_
, &update_back_off_mult_
);
529 // Successful response means error reset.
530 update_error_count_
= 0;
531 update_back_off_mult_
= 1;
536 base::TimeDelta
SafeBrowsingProtocolManager::GetNextBackOffInterval(
537 size_t* error_count
, size_t* multiplier
) const {
538 DCHECK(CalledOnValidThread());
539 DCHECK(multiplier
&& error_count
);
541 if (*error_count
> 1 && *error_count
< 6) {
542 base::TimeDelta next
= base::TimeDelta::FromMinutes(
543 *multiplier
* (1 + back_off_fuzz_
) * 30);
545 if (*multiplier
> kSbMaxBackOff
)
546 *multiplier
= kSbMaxBackOff
;
549 if (*error_count
>= 6)
550 return base::TimeDelta::FromHours(8);
551 return base::TimeDelta::FromMinutes(1);
554 // This request requires getting a list of all the chunks for each list from the
555 // database asynchronously. The request will be issued when we're called back in
556 // OnGetChunksComplete.
557 // TODO(paulg): We should get this at start up and maintain a ChunkRange cache
558 // to avoid hitting the database with each update request. On the
559 // otherhand, this request will only occur ~20-30 minutes so there
560 // isn't that much overhead. Measure!
561 void SafeBrowsingProtocolManager::IssueUpdateRequest() {
562 DCHECK(CalledOnValidThread());
563 request_type_
= UPDATE_REQUEST
;
564 delegate_
->UpdateStarted();
565 delegate_
->GetChunks(
566 base::Bind(&SafeBrowsingProtocolManager::OnGetChunksComplete
,
567 base::Unretained(this)));
570 // The backup request can run immediately since the chunks have already been
571 // retrieved from the DB.
572 bool SafeBrowsingProtocolManager::IssueBackupUpdateRequest(
573 BackupUpdateReason backup_update_reason
) {
574 DCHECK(CalledOnValidThread());
575 DCHECK_EQ(request_type_
, UPDATE_REQUEST
);
576 DCHECK(backup_update_reason
>= 0 &&
577 backup_update_reason
< BACKUP_UPDATE_REASON_MAX
);
578 if (backup_url_prefixes_
[backup_update_reason
].empty())
580 request_type_
= BACKUP_UPDATE_REQUEST
;
581 backup_update_reason_
= backup_update_reason
;
583 GURL backup_update_url
= BackupUpdateUrl(backup_update_reason
);
584 request_
= net::URLFetcher::Create(url_fetcher_id_
++, backup_update_url
,
585 net::URLFetcher::POST
, this);
586 request_
->SetLoadFlags(net::LOAD_DISABLE_CACHE
);
587 request_
->SetRequestContext(request_context_getter_
.get());
588 request_
->SetUploadData("text/plain", update_list_data_
);
591 // Begin the update request timeout.
592 timeout_timer_
.Start(FROM_HERE
, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec
),
594 &SafeBrowsingProtocolManager::UpdateResponseTimeout
);
599 void SafeBrowsingProtocolManager::IssueChunkRequest() {
600 DCHECK(CalledOnValidThread());
601 // We are only allowed to have one request outstanding at any time. Also,
602 // don't get the next url until the previous one has been written to disk so
603 // that we don't use too much memory.
604 if (request_
.get() || chunk_request_urls_
.empty() || chunk_pending_to_write_
)
607 ChunkUrl next_chunk
= chunk_request_urls_
.front();
608 DCHECK(!next_chunk
.url
.empty());
609 GURL chunk_url
= NextChunkUrl(next_chunk
.url
);
610 request_type_
= CHUNK_REQUEST
;
611 request_
= net::URLFetcher::Create(url_fetcher_id_
++, chunk_url
,
612 net::URLFetcher::GET
, this);
613 request_
->SetLoadFlags(net::LOAD_DISABLE_CACHE
);
614 request_
->SetRequestContext(request_context_getter_
.get());
615 chunk_request_start_
= base::Time::Now();
619 void SafeBrowsingProtocolManager::OnGetChunksComplete(
620 const std::vector
<SBListChunkRanges
>& lists
,
622 bool is_extended_reporting
) {
623 DCHECK(CalledOnValidThread());
624 DCHECK_EQ(request_type_
, UPDATE_REQUEST
);
625 DCHECK(update_list_data_
.empty());
626 if (database_error
) {
627 // The update was not successful, but don't back off.
628 UpdateFinished(false, false);
632 // Format our stored chunks:
633 bool found_malware
= false;
634 bool found_phishing
= false;
635 for (size_t i
= 0; i
< lists
.size(); ++i
) {
636 update_list_data_
.append(safe_browsing::FormatList(lists
[i
]));
637 if (lists
[i
].name
== safe_browsing_util::kPhishingList
)
638 found_phishing
= true;
640 if (lists
[i
].name
== safe_browsing_util::kMalwareList
)
641 found_malware
= true;
644 // If we have an empty database, let the server know we want data for these
646 // TODO(shess): These cases never happen because the database fills in the
647 // lists in GetChunks(). Refactor the unit tests so that this code can be
649 if (!found_phishing
) {
650 update_list_data_
.append(safe_browsing::FormatList(
651 SBListChunkRanges(safe_browsing_util::kPhishingList
)));
653 if (!found_malware
) {
654 update_list_data_
.append(safe_browsing::FormatList(
655 SBListChunkRanges(safe_browsing_util::kMalwareList
)));
658 // Large requests are (probably) a sign of database corruption.
659 // Record stats to inform decisions about whether to automate
660 // deletion of such databases. http://crbug.com/120219
661 UMA_HISTOGRAM_COUNTS("SB2.UpdateRequestSize", update_list_data_
.size());
663 GURL update_url
= UpdateUrl(is_extended_reporting
);
664 request_
= net::URLFetcher::Create(url_fetcher_id_
++, update_url
,
665 net::URLFetcher::POST
, this);
666 request_
->SetLoadFlags(net::LOAD_DISABLE_CACHE
);
667 request_
->SetRequestContext(request_context_getter_
.get());
668 request_
->SetUploadData("text/plain", update_list_data_
);
671 // Begin the update request timeout.
672 timeout_timer_
.Start(FROM_HERE
, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec
),
674 &SafeBrowsingProtocolManager::UpdateResponseTimeout
);
677 // If we haven't heard back from the server with an update response, this method
678 // will run. Close the current update session and schedule another update.
679 void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
680 DCHECK(CalledOnValidThread());
681 DCHECK(request_type_
== UPDATE_REQUEST
||
682 request_type_
== BACKUP_UPDATE_REQUEST
);
684 if (request_type_
== UPDATE_REQUEST
&&
685 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_CONNECT
)) {
688 UpdateFinished(false);
691 void SafeBrowsingProtocolManager::OnAddChunksComplete() {
692 DCHECK(CalledOnValidThread());
693 chunk_pending_to_write_
= false;
695 if (chunk_request_urls_
.empty()) {
696 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_
);
697 UpdateFinished(true);
703 void SafeBrowsingProtocolManager::HandleGetHashError(const Time
& now
) {
704 DCHECK(CalledOnValidThread());
705 base::TimeDelta next
= GetNextBackOffInterval(
706 &gethash_error_count_
, &gethash_back_off_mult_
);
707 next_gethash_time_
= now
+ next
;
710 void SafeBrowsingProtocolManager::UpdateFinished(bool success
) {
711 UpdateFinished(success
, !success
);
714 void SafeBrowsingProtocolManager::UpdateFinished(bool success
, bool back_off
) {
715 DCHECK(CalledOnValidThread());
716 #if defined(OS_ANDROID)
717 if (app_in_foreground_
)
718 UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeForeground", update_size_
);
720 UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeBackground", update_size_
);
722 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_
);
724 bool update_success
= success
|| request_type_
== CHUNK_REQUEST
;
725 if (backup_update_reason_
== BACKUP_UPDATE_REASON_MAX
) {
727 update_success
? UPDATE_RESULT_SUCCESS
: UPDATE_RESULT_FAIL
);
729 UpdateResult update_result
= static_cast<UpdateResult
>(
730 UPDATE_RESULT_BACKUP_START
+
731 (static_cast<int>(backup_update_reason_
) * 2) +
733 RecordUpdateResult(update_result
);
735 backup_update_reason_
= BACKUP_UPDATE_REASON_MAX
;
736 request_type_
= NO_REQUEST
;
737 update_list_data_
.clear();
738 delegate_
->UpdateFinished(success
);
739 ScheduleNextUpdate(back_off
);
742 GURL
SafeBrowsingProtocolManager::UpdateUrl(bool is_extended_reporting
) const {
743 std::string url
= SafeBrowsingProtocolManagerHelper::ComposeUrl(
744 url_prefix_
, "downloads", client_name_
, version_
, additional_query_
,
745 is_extended_reporting
);
749 GURL
SafeBrowsingProtocolManager::BackupUpdateUrl(
750 BackupUpdateReason backup_update_reason
) const {
751 DCHECK(backup_update_reason
>= 0 &&
752 backup_update_reason
< BACKUP_UPDATE_REASON_MAX
);
753 DCHECK(!backup_url_prefixes_
[backup_update_reason
].empty());
754 std::string url
= SafeBrowsingProtocolManagerHelper::ComposeUrl(
755 backup_url_prefixes_
[backup_update_reason
], "downloads", client_name_
,
756 version_
, additional_query_
);
760 GURL
SafeBrowsingProtocolManager::GetHashUrl(bool is_extended_reporting
) const {
761 std::string url
= SafeBrowsingProtocolManagerHelper::ComposeUrl(
762 url_prefix_
, "gethash", client_name_
, version_
, additional_query_
,
763 is_extended_reporting
);
767 GURL
SafeBrowsingProtocolManager::NextChunkUrl(const std::string
& url
) const {
768 DCHECK(CalledOnValidThread());
769 std::string next_url
;
770 if (!base::StartsWith(url
, "http://",
771 base::CompareCase::INSENSITIVE_ASCII
) &&
772 !base::StartsWith(url
, "https://",
773 base::CompareCase::INSENSITIVE_ASCII
)) {
774 // Use https if we updated via https, otherwise http (useful for testing).
775 if (base::StartsWith(url_prefix_
, "https://",
776 base::CompareCase::INSENSITIVE_ASCII
))
777 next_url
.append("https://");
779 next_url
.append("http://");
780 next_url
.append(url
);
784 if (!additional_query_
.empty()) {
785 if (next_url
.find("?") != std::string::npos
) {
786 next_url
.append("&");
788 next_url
.append("?");
790 next_url
.append(additional_query_
);
792 return GURL(next_url
);
795 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails()
800 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails(
801 FullHashCallback callback
, bool is_download
)
802 : callback(callback
),
803 is_download(is_download
) {
806 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {
809 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {