[sql] Remove _HAS_EXCEPTIONS=0 from build info.
[chromium-blink-merge.git] / chrome / browser / safe_browsing / protocol_manager.cc
blob947653bea3abf301813b467d739a92c15124f9c4
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "chrome/browser/safe_browsing/protocol_manager.h"
7 #include "base/environment.h"
8 #include "base/logging.h"
9 #include "base/memory/scoped_vector.h"
10 #include "base/metrics/histogram.h"
11 #include "base/profiler/scoped_tracker.h"
12 #include "base/rand_util.h"
13 #include "base/stl_util.h"
14 #include "base/strings/string_util.h"
15 #include "base/strings/stringprintf.h"
16 #include "base/timer/timer.h"
17 #include "chrome/browser/safe_browsing/protocol_parser.h"
18 #include "chrome/common/env_vars.h"
19 #include "google_apis/google_api_keys.h"
20 #include "net/base/escape.h"
21 #include "net/base/load_flags.h"
22 #include "net/base/net_errors.h"
23 #include "net/url_request/url_fetcher.h"
24 #include "net/url_request/url_request_context_getter.h"
25 #include "net/url_request/url_request_status.h"
27 #if defined(OS_ANDROID)
28 #include "net/base/network_change_notifier.h"
29 #endif
31 using base::Time;
32 using base::TimeDelta;
34 namespace {
36 // UpdateResult indicates what happened with the primary and/or backup update
37 // requests. The ordering of the values must stay the same for UMA consistency,
38 // and is also ordered in this way to match ProtocolManager::BackupUpdateReason.
39 enum UpdateResult {
40 UPDATE_RESULT_FAIL,
41 UPDATE_RESULT_SUCCESS,
42 UPDATE_RESULT_BACKUP_CONNECT_FAIL,
43 UPDATE_RESULT_BACKUP_CONNECT_SUCCESS,
44 UPDATE_RESULT_BACKUP_HTTP_FAIL,
45 UPDATE_RESULT_BACKUP_HTTP_SUCCESS,
46 UPDATE_RESULT_BACKUP_NETWORK_FAIL,
47 UPDATE_RESULT_BACKUP_NETWORK_SUCCESS,
48 UPDATE_RESULT_MAX,
49 UPDATE_RESULT_BACKUP_START = UPDATE_RESULT_BACKUP_CONNECT_FAIL,
52 void RecordUpdateResult(UpdateResult result) {
53 DCHECK(result >= 0 && result < UPDATE_RESULT_MAX);
54 UMA_HISTOGRAM_ENUMERATION("SB2.UpdateResult", result, UPDATE_RESULT_MAX);
57 } // namespace
59 // Minimum time, in seconds, from start up before we must issue an update query.
60 static const int kSbTimerStartIntervalSecMin = 60;
62 // Maximum time, in seconds, from start up before we must issue an update query.
63 static const int kSbTimerStartIntervalSecMax = 300;
65 // The maximum time, in seconds, to wait for a response to an update request.
66 static const int kSbMaxUpdateWaitSec = 30;
68 // Maximum back off multiplier.
69 static const size_t kSbMaxBackOff = 8;
71 // The default SBProtocolManagerFactory.
72 class SBProtocolManagerFactoryImpl : public SBProtocolManagerFactory {
73 public:
74 SBProtocolManagerFactoryImpl() { }
75 ~SBProtocolManagerFactoryImpl() override {}
76 SafeBrowsingProtocolManager* CreateProtocolManager(
77 SafeBrowsingProtocolManagerDelegate* delegate,
78 net::URLRequestContextGetter* request_context_getter,
79 const SafeBrowsingProtocolConfig& config) override {
80 return new SafeBrowsingProtocolManager(
81 delegate, request_context_getter, config);
83 private:
84 DISALLOW_COPY_AND_ASSIGN(SBProtocolManagerFactoryImpl);
87 // SafeBrowsingProtocolManager implementation ----------------------------------
89 // static
90 SBProtocolManagerFactory* SafeBrowsingProtocolManager::factory_ = NULL;
92 // static
93 SafeBrowsingProtocolManager* SafeBrowsingProtocolManager::Create(
94 SafeBrowsingProtocolManagerDelegate* delegate,
95 net::URLRequestContextGetter* request_context_getter,
96 const SafeBrowsingProtocolConfig& config) {
97 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/483689 is fixed.
98 tracked_objects::ScopedTracker tracking_profile(
99 FROM_HERE_WITH_EXPLICIT_FUNCTION(
100 "483689 SafeBrowsingProtocolManager::Create"));
101 if (!factory_)
102 factory_ = new SBProtocolManagerFactoryImpl();
103 return factory_->CreateProtocolManager(
104 delegate, request_context_getter, config);
107 SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
108 SafeBrowsingProtocolManagerDelegate* delegate,
109 net::URLRequestContextGetter* request_context_getter,
110 const SafeBrowsingProtocolConfig& config)
111 : delegate_(delegate),
112 request_type_(NO_REQUEST),
113 update_error_count_(0),
114 gethash_error_count_(0),
115 update_back_off_mult_(1),
116 gethash_back_off_mult_(1),
117 next_update_interval_(base::TimeDelta::FromSeconds(
118 base::RandInt(kSbTimerStartIntervalSecMin,
119 kSbTimerStartIntervalSecMax))),
120 update_state_(FIRST_REQUEST),
121 chunk_pending_to_write_(false),
122 version_(config.version),
123 update_size_(0),
124 client_name_(config.client_name),
125 request_context_getter_(request_context_getter),
126 url_prefix_(config.url_prefix),
127 backup_update_reason_(BACKUP_UPDATE_REASON_MAX),
128 disable_auto_update_(config.disable_auto_update),
129 #if defined(OS_ANDROID)
130 disable_connection_check_(config.disable_connection_check),
131 #endif
132 url_fetcher_id_(0),
133 app_in_foreground_(true) {
134 DCHECK(!url_prefix_.empty());
136 backup_url_prefixes_[BACKUP_UPDATE_REASON_CONNECT] =
137 config.backup_connect_error_url_prefix;
138 backup_url_prefixes_[BACKUP_UPDATE_REASON_HTTP] =
139 config.backup_http_error_url_prefix;
140 backup_url_prefixes_[BACKUP_UPDATE_REASON_NETWORK] =
141 config.backup_network_error_url_prefix;
143 // Set the backoff multiplier fuzz to a random value between 0 and 1.
144 back_off_fuzz_ = static_cast<float>(base::RandDouble());
145 if (version_.empty())
146 version_ = SafeBrowsingProtocolManagerHelper::Version();
149 // static
150 void SafeBrowsingProtocolManager::RecordGetHashResult(
151 bool is_download, ResultType result_type) {
152 if (is_download) {
153 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type,
154 GET_HASH_RESULT_MAX);
155 } else {
156 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type,
157 GET_HASH_RESULT_MAX);
161 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const {
162 return update_timer_.IsRunning();
165 SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
166 // Delete in-progress SafeBrowsing requests.
167 STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
168 hash_requests_.end());
169 hash_requests_.clear();
172 // We can only have one update or chunk request outstanding, but there may be
173 // multiple GetHash requests pending since we don't want to serialize them and
174 // slow down the user.
175 void SafeBrowsingProtocolManager::GetFullHash(
176 const std::vector<SBPrefix>& prefixes,
177 FullHashCallback callback,
178 bool is_download) {
179 DCHECK(CalledOnValidThread());
180 // If we are in GetHash backoff, we need to check if we're past the next
181 // allowed time. If we are, we can proceed with the request. If not, we are
182 // required to return empty results (i.e. treat the page as safe).
183 if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
184 RecordGetHashResult(is_download, GET_HASH_BACKOFF_ERROR);
185 std::vector<SBFullHashResult> full_hashes;
186 callback.Run(full_hashes, base::TimeDelta());
187 return;
189 GURL gethash_url = GetHashUrl();
190 net::URLFetcher* fetcher =
191 net::URLFetcher::Create(url_fetcher_id_++, gethash_url,
192 net::URLFetcher::POST, this).release();
193 hash_requests_[fetcher] = FullHashDetails(callback, is_download);
195 const std::string get_hash = safe_browsing::FormatGetHash(prefixes);
197 fetcher->SetLoadFlags(net::LOAD_DISABLE_CACHE);
198 fetcher->SetRequestContext(request_context_getter_.get());
199 fetcher->SetUploadData("text/plain", get_hash);
200 fetcher->Start();
203 void SafeBrowsingProtocolManager::GetNextUpdate() {
204 DCHECK(CalledOnValidThread());
205 if (request_.get() || request_type_ != NO_REQUEST)
206 return;
208 #if defined(OS_ANDROID)
209 if (!disable_connection_check_) {
210 net::NetworkChangeNotifier::ConnectionType type =
211 net::NetworkChangeNotifier::GetConnectionType();
212 if (type != net::NetworkChangeNotifier::CONNECTION_WIFI) {
213 ScheduleNextUpdate(false /* no back off */);
214 return;
217 #endif
219 IssueUpdateRequest();
222 // net::URLFetcherDelegate implementation ----------------------------------
224 // All SafeBrowsing request responses are handled here.
225 // TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
226 // chunk should retry the download and parse of that chunk (and
227 // what back off / how many times to try), and if that effects the
228 // update back off. For now, a failed parse of the chunk means we
229 // drop it. This isn't so bad because the next UPDATE_REQUEST we
230 // do will report all the chunks we have. If that chunk is still
231 // required, the SafeBrowsing servers will tell us to get it again.
232 void SafeBrowsingProtocolManager::OnURLFetchComplete(
233 const net::URLFetcher* source) {
234 DCHECK(CalledOnValidThread());
235 scoped_ptr<const net::URLFetcher> fetcher;
237 HashRequests::iterator it = hash_requests_.find(source);
238 if (it != hash_requests_.end()) {
239 // GetHash response.
240 fetcher.reset(it->first);
241 const FullHashDetails& details = it->second;
242 std::vector<SBFullHashResult> full_hashes;
243 base::TimeDelta cache_lifetime;
244 if (source->GetStatus().is_success() &&
245 (source->GetResponseCode() == 200 ||
246 source->GetResponseCode() == 204)) {
247 // For tracking our GetHash false positive (204) rate, compared to real
248 // (200) responses.
249 if (source->GetResponseCode() == 200)
250 RecordGetHashResult(details.is_download, GET_HASH_STATUS_200);
251 else
252 RecordGetHashResult(details.is_download, GET_HASH_STATUS_204);
254 gethash_error_count_ = 0;
255 gethash_back_off_mult_ = 1;
256 std::string data;
257 source->GetResponseAsString(&data);
258 if (!safe_browsing::ParseGetHash(
259 data.data(), data.length(), &cache_lifetime, &full_hashes)) {
260 full_hashes.clear();
261 RecordGetHashResult(details.is_download, GET_HASH_PARSE_ERROR);
262 // TODO(cbentzel): Should cache_lifetime be set to 0 here? (See
263 // http://crbug.com/360232.)
265 } else {
266 HandleGetHashError(Time::Now());
267 if (source->GetStatus().status() == net::URLRequestStatus::FAILED) {
268 RecordGetHashResult(details.is_download, GET_HASH_NETWORK_ERROR);
269 DVLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
270 << " failed with error: " << source->GetStatus().error();
271 } else {
272 RecordGetHashResult(details.is_download, GET_HASH_HTTP_ERROR);
273 DVLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
274 << " failed with error: " << source->GetResponseCode();
278 // Invoke the callback with full_hashes, even if there was a parse error or
279 // an error response code (in which case full_hashes will be empty). The
280 // caller can't be blocked indefinitely.
281 details.callback.Run(full_hashes, cache_lifetime);
283 hash_requests_.erase(it);
284 } else {
285 // Update or chunk response.
286 fetcher.reset(request_.release());
288 if (request_type_ == UPDATE_REQUEST ||
289 request_type_ == BACKUP_UPDATE_REQUEST) {
290 if (!fetcher.get()) {
291 // We've timed out waiting for an update response, so we've cancelled
292 // the update request and scheduled a new one. Ignore this response.
293 return;
296 // Cancel the update response timeout now that we have the response.
297 timeout_timer_.Stop();
300 net::URLRequestStatus status = source->GetStatus();
301 if (status.is_success() && source->GetResponseCode() == 200) {
302 // We have data from the SafeBrowsing service.
303 std::string data;
304 source->GetResponseAsString(&data);
306 // TODO(shess): Cleanup the flow of this code so that |parsed_ok| can be
307 // removed or omitted.
308 const bool parsed_ok = HandleServiceResponse(
309 source->GetURL(), data.data(), data.length());
310 if (!parsed_ok) {
311 DVLOG(1) << "SafeBrowsing request for: " << source->GetURL()
312 << " failed parse.";
313 chunk_request_urls_.clear();
314 if (request_type_ == UPDATE_REQUEST &&
315 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_HTTP)) {
316 return;
318 UpdateFinished(false);
321 switch (request_type_) {
322 case CHUNK_REQUEST:
323 if (parsed_ok) {
324 chunk_request_urls_.pop_front();
325 if (chunk_request_urls_.empty() && !chunk_pending_to_write_)
326 UpdateFinished(true);
328 break;
329 case UPDATE_REQUEST:
330 case BACKUP_UPDATE_REQUEST:
331 if (chunk_request_urls_.empty() && parsed_ok) {
332 // We are up to date since the servers gave us nothing new, so we
333 // are done with this update cycle.
334 UpdateFinished(true);
336 break;
337 case NO_REQUEST:
338 // This can happen if HandleServiceResponse fails above.
339 break;
340 default:
341 NOTREACHED();
342 break;
344 } else {
345 if (status.status() == net::URLRequestStatus::FAILED) {
346 DVLOG(1) << "SafeBrowsing request for: " << source->GetURL()
347 << " failed with error: " << source->GetStatus().error();
348 } else {
349 DVLOG(1) << "SafeBrowsing request for: " << source->GetURL()
350 << " failed with error: " << source->GetResponseCode();
352 if (request_type_ == CHUNK_REQUEST) {
353 // The SafeBrowsing service error, or very bad response code: back off.
354 chunk_request_urls_.clear();
355 } else if (request_type_ == UPDATE_REQUEST) {
356 BackupUpdateReason backup_update_reason = BACKUP_UPDATE_REASON_MAX;
357 if (status.is_success()) {
358 backup_update_reason = BACKUP_UPDATE_REASON_HTTP;
359 } else {
360 switch (status.error()) {
361 case net::ERR_INTERNET_DISCONNECTED:
362 case net::ERR_NETWORK_CHANGED:
363 backup_update_reason = BACKUP_UPDATE_REASON_NETWORK;
364 break;
365 default:
366 backup_update_reason = BACKUP_UPDATE_REASON_CONNECT;
367 break;
370 if (backup_update_reason != BACKUP_UPDATE_REASON_MAX &&
371 IssueBackupUpdateRequest(backup_update_reason)) {
372 return;
375 UpdateFinished(false);
379 // Get the next chunk if available.
380 IssueChunkRequest();
383 bool SafeBrowsingProtocolManager::HandleServiceResponse(
384 const GURL& url, const char* data, size_t length) {
385 DCHECK(CalledOnValidThread());
387 switch (request_type_) {
388 case UPDATE_REQUEST:
389 case BACKUP_UPDATE_REQUEST: {
390 size_t next_update_sec = 0;
391 bool reset = false;
392 scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
393 new std::vector<SBChunkDelete>);
394 std::vector<ChunkUrl> chunk_urls;
395 if (!safe_browsing::ParseUpdate(data, length, &next_update_sec, &reset,
396 chunk_deletes.get(), &chunk_urls)) {
397 return false;
400 base::TimeDelta next_update_interval =
401 base::TimeDelta::FromSeconds(next_update_sec);
402 last_update_ = Time::Now();
404 if (update_state_ == FIRST_REQUEST)
405 update_state_ = SECOND_REQUEST;
406 else if (update_state_ == SECOND_REQUEST)
407 update_state_ = NORMAL_REQUEST;
409 // New time for the next update.
410 if (next_update_interval > base::TimeDelta()) {
411 next_update_interval_ = next_update_interval;
412 } else if (update_state_ == SECOND_REQUEST) {
413 next_update_interval_ = base::TimeDelta::FromSeconds(
414 base::RandInt(15, 45));
417 // New chunks to download.
418 if (!chunk_urls.empty()) {
419 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
420 for (size_t i = 0; i < chunk_urls.size(); ++i)
421 chunk_request_urls_.push_back(chunk_urls[i]);
424 // Handle the case were the SafeBrowsing service tells us to dump our
425 // database.
426 if (reset) {
427 delegate_->ResetDatabase();
428 return true;
431 // Chunks to delete from our storage.
432 if (!chunk_deletes->empty())
433 delegate_->DeleteChunks(chunk_deletes.Pass());
435 break;
437 case CHUNK_REQUEST: {
438 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
439 base::Time::Now() - chunk_request_start_);
441 const ChunkUrl chunk_url = chunk_request_urls_.front();
442 scoped_ptr<ScopedVector<SBChunkData> >
443 chunks(new ScopedVector<SBChunkData>);
444 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
445 update_size_ += length;
446 if (!safe_browsing::ParseChunk(data, length, chunks.get()))
447 return false;
449 // Chunks to add to storage. Pass ownership of |chunks|.
450 if (!chunks->empty()) {
451 chunk_pending_to_write_ = true;
452 delegate_->AddChunks(
453 chunk_url.list_name, chunks.Pass(),
454 base::Bind(&SafeBrowsingProtocolManager::OnAddChunksComplete,
455 base::Unretained(this)));
458 break;
461 default:
462 return false;
465 return true;
468 void SafeBrowsingProtocolManager::Initialize() {
469 DCHECK(CalledOnValidThread());
470 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/483689 is fixed.
471 tracked_objects::ScopedTracker tracking_profile(
472 FROM_HERE_WITH_EXPLICIT_FUNCTION(
473 "483689 SafeBrowsingProtocolManager::Initialize"));
474 // Don't want to hit the safe browsing servers on build/chrome bots.
475 scoped_ptr<base::Environment> env(base::Environment::Create());
476 if (env->HasVar(env_vars::kHeadless))
477 return;
478 ScheduleNextUpdate(false /* no back off */);
481 void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
482 DCHECK(CalledOnValidThread());
483 if (disable_auto_update_) {
484 // Unschedule any current timer.
485 update_timer_.Stop();
486 return;
488 // Reschedule with the new update.
489 base::TimeDelta next_update_interval = GetNextUpdateInterval(back_off);
490 ForceScheduleNextUpdate(next_update_interval);
493 void SafeBrowsingProtocolManager::ForceScheduleNextUpdate(
494 base::TimeDelta interval) {
495 DCHECK(CalledOnValidThread());
496 DCHECK(interval >= base::TimeDelta());
497 // Unschedule any current timer.
498 update_timer_.Stop();
499 update_timer_.Start(FROM_HERE, interval, this,
500 &SafeBrowsingProtocolManager::GetNextUpdate);
503 // According to section 5 of the SafeBrowsing protocol specification, we must
504 // back off after a certain number of errors. We only change |next_update_sec_|
505 // when we receive a response from the SafeBrowsing service.
506 base::TimeDelta SafeBrowsingProtocolManager::GetNextUpdateInterval(
507 bool back_off) {
508 DCHECK(CalledOnValidThread());
509 DCHECK(next_update_interval_ > base::TimeDelta());
510 base::TimeDelta next = next_update_interval_;
511 if (back_off) {
512 next = GetNextBackOffInterval(&update_error_count_, &update_back_off_mult_);
513 } else {
514 // Successful response means error reset.
515 update_error_count_ = 0;
516 update_back_off_mult_ = 1;
518 return next;
521 base::TimeDelta SafeBrowsingProtocolManager::GetNextBackOffInterval(
522 size_t* error_count, size_t* multiplier) const {
523 DCHECK(CalledOnValidThread());
524 DCHECK(multiplier && error_count);
525 (*error_count)++;
526 if (*error_count > 1 && *error_count < 6) {
527 base::TimeDelta next = base::TimeDelta::FromMinutes(
528 *multiplier * (1 + back_off_fuzz_) * 30);
529 *multiplier *= 2;
530 if (*multiplier > kSbMaxBackOff)
531 *multiplier = kSbMaxBackOff;
532 return next;
534 if (*error_count >= 6)
535 return base::TimeDelta::FromHours(8);
536 return base::TimeDelta::FromMinutes(1);
539 // This request requires getting a list of all the chunks for each list from the
540 // database asynchronously. The request will be issued when we're called back in
541 // OnGetChunksComplete.
542 // TODO(paulg): We should get this at start up and maintain a ChunkRange cache
543 // to avoid hitting the database with each update request. On the
544 // otherhand, this request will only occur ~20-30 minutes so there
545 // isn't that much overhead. Measure!
546 void SafeBrowsingProtocolManager::IssueUpdateRequest() {
547 DCHECK(CalledOnValidThread());
548 request_type_ = UPDATE_REQUEST;
549 delegate_->UpdateStarted();
550 delegate_->GetChunks(
551 base::Bind(&SafeBrowsingProtocolManager::OnGetChunksComplete,
552 base::Unretained(this)));
555 // The backup request can run immediately since the chunks have already been
556 // retrieved from the DB.
557 bool SafeBrowsingProtocolManager::IssueBackupUpdateRequest(
558 BackupUpdateReason backup_update_reason) {
559 DCHECK(CalledOnValidThread());
560 DCHECK_EQ(request_type_, UPDATE_REQUEST);
561 DCHECK(backup_update_reason >= 0 &&
562 backup_update_reason < BACKUP_UPDATE_REASON_MAX);
563 if (backup_url_prefixes_[backup_update_reason].empty())
564 return false;
565 request_type_ = BACKUP_UPDATE_REQUEST;
566 backup_update_reason_ = backup_update_reason;
568 GURL backup_update_url = BackupUpdateUrl(backup_update_reason);
569 request_ = net::URLFetcher::Create(url_fetcher_id_++, backup_update_url,
570 net::URLFetcher::POST, this);
571 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
572 request_->SetRequestContext(request_context_getter_.get());
573 request_->SetUploadData("text/plain", update_list_data_);
574 request_->Start();
576 // Begin the update request timeout.
577 timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
578 this,
579 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
581 return true;
584 void SafeBrowsingProtocolManager::IssueChunkRequest() {
585 DCHECK(CalledOnValidThread());
586 // We are only allowed to have one request outstanding at any time. Also,
587 // don't get the next url until the previous one has been written to disk so
588 // that we don't use too much memory.
589 if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
590 return;
592 ChunkUrl next_chunk = chunk_request_urls_.front();
593 DCHECK(!next_chunk.url.empty());
594 GURL chunk_url = NextChunkUrl(next_chunk.url);
595 request_type_ = CHUNK_REQUEST;
596 request_ = net::URLFetcher::Create(url_fetcher_id_++, chunk_url,
597 net::URLFetcher::GET, this);
598 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
599 request_->SetRequestContext(request_context_getter_.get());
600 chunk_request_start_ = base::Time::Now();
601 request_->Start();
604 void SafeBrowsingProtocolManager::OnGetChunksComplete(
605 const std::vector<SBListChunkRanges>& lists, bool database_error) {
606 DCHECK(CalledOnValidThread());
607 DCHECK_EQ(request_type_, UPDATE_REQUEST);
608 DCHECK(update_list_data_.empty());
609 if (database_error) {
610 // The update was not successful, but don't back off.
611 UpdateFinished(false, false);
612 return;
615 // Format our stored chunks:
616 bool found_malware = false;
617 bool found_phishing = false;
618 for (size_t i = 0; i < lists.size(); ++i) {
619 update_list_data_.append(safe_browsing::FormatList(lists[i]));
620 if (lists[i].name == safe_browsing_util::kPhishingList)
621 found_phishing = true;
623 if (lists[i].name == safe_browsing_util::kMalwareList)
624 found_malware = true;
627 // If we have an empty database, let the server know we want data for these
628 // lists.
629 // TODO(shess): These cases never happen because the database fills in the
630 // lists in GetChunks(). Refactor the unit tests so that this code can be
631 // removed.
632 if (!found_phishing) {
633 update_list_data_.append(safe_browsing::FormatList(
634 SBListChunkRanges(safe_browsing_util::kPhishingList)));
636 if (!found_malware) {
637 update_list_data_.append(safe_browsing::FormatList(
638 SBListChunkRanges(safe_browsing_util::kMalwareList)));
641 // Large requests are (probably) a sign of database corruption.
642 // Record stats to inform decisions about whether to automate
643 // deletion of such databases. http://crbug.com/120219
644 UMA_HISTOGRAM_COUNTS("SB2.UpdateRequestSize", update_list_data_.size());
646 GURL update_url = UpdateUrl();
647 request_ = net::URLFetcher::Create(url_fetcher_id_++, update_url,
648 net::URLFetcher::POST, this);
649 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
650 request_->SetRequestContext(request_context_getter_.get());
651 request_->SetUploadData("text/plain", update_list_data_);
652 request_->Start();
654 // Begin the update request timeout.
655 timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
656 this,
657 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
660 // If we haven't heard back from the server with an update response, this method
661 // will run. Close the current update session and schedule another update.
662 void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
663 DCHECK(CalledOnValidThread());
664 DCHECK(request_type_ == UPDATE_REQUEST ||
665 request_type_ == BACKUP_UPDATE_REQUEST);
666 request_.reset();
667 if (request_type_ == UPDATE_REQUEST &&
668 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_CONNECT)) {
669 return;
671 UpdateFinished(false);
674 void SafeBrowsingProtocolManager::OnAddChunksComplete() {
675 DCHECK(CalledOnValidThread());
676 chunk_pending_to_write_ = false;
678 if (chunk_request_urls_.empty()) {
679 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
680 UpdateFinished(true);
681 } else {
682 IssueChunkRequest();
686 void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
687 DCHECK(CalledOnValidThread());
688 base::TimeDelta next = GetNextBackOffInterval(
689 &gethash_error_count_, &gethash_back_off_mult_);
690 next_gethash_time_ = now + next;
693 void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
694 UpdateFinished(success, !success);
697 void SafeBrowsingProtocolManager::UpdateFinished(bool success, bool back_off) {
698 DCHECK(CalledOnValidThread());
699 #if defined(OS_ANDROID)
700 if (app_in_foreground_)
701 UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeForeground", update_size_);
702 else
703 UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeBackground", update_size_);
704 #endif
705 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
706 update_size_ = 0;
707 bool update_success = success || request_type_ == CHUNK_REQUEST;
708 if (backup_update_reason_ == BACKUP_UPDATE_REASON_MAX) {
709 RecordUpdateResult(
710 update_success ? UPDATE_RESULT_SUCCESS : UPDATE_RESULT_FAIL);
711 } else {
712 UpdateResult update_result = static_cast<UpdateResult>(
713 UPDATE_RESULT_BACKUP_START +
714 (static_cast<int>(backup_update_reason_) * 2) +
715 update_success);
716 RecordUpdateResult(update_result);
718 backup_update_reason_ = BACKUP_UPDATE_REASON_MAX;
719 request_type_ = NO_REQUEST;
720 update_list_data_.clear();
721 delegate_->UpdateFinished(success);
722 ScheduleNextUpdate(back_off);
725 GURL SafeBrowsingProtocolManager::UpdateUrl() const {
726 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
727 url_prefix_, "downloads", client_name_, version_, additional_query_);
728 return GURL(url);
731 GURL SafeBrowsingProtocolManager::BackupUpdateUrl(
732 BackupUpdateReason backup_update_reason) const {
733 DCHECK(backup_update_reason >= 0 &&
734 backup_update_reason < BACKUP_UPDATE_REASON_MAX);
735 DCHECK(!backup_url_prefixes_[backup_update_reason].empty());
736 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
737 backup_url_prefixes_[backup_update_reason], "downloads", client_name_,
738 version_, additional_query_);
739 return GURL(url);
742 GURL SafeBrowsingProtocolManager::GetHashUrl() const {
743 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
744 url_prefix_, "gethash", client_name_, version_, additional_query_);
745 return GURL(url);
748 GURL SafeBrowsingProtocolManager::NextChunkUrl(const std::string& url) const {
749 DCHECK(CalledOnValidThread());
750 std::string next_url;
751 if (!base::StartsWith(url, "http://",
752 base::CompareCase::INSENSITIVE_ASCII) &&
753 !base::StartsWith(url, "https://",
754 base::CompareCase::INSENSITIVE_ASCII)) {
755 // Use https if we updated via https, otherwise http (useful for testing).
756 if (base::StartsWith(url_prefix_, "https://",
757 base::CompareCase::INSENSITIVE_ASCII))
758 next_url.append("https://");
759 else
760 next_url.append("http://");
761 next_url.append(url);
762 } else {
763 next_url = url;
765 if (!additional_query_.empty()) {
766 if (next_url.find("?") != std::string::npos) {
767 next_url.append("&");
768 } else {
769 next_url.append("?");
771 next_url.append(additional_query_);
773 return GURL(next_url);
776 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails()
777 : callback(),
778 is_download(false) {
781 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails(
782 FullHashCallback callback, bool is_download)
783 : callback(callback),
784 is_download(is_download) {
787 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {
790 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {