Added affiliation IDs, that in future will be used to determine user affiliation...
[chromium-blink-merge.git] / chrome / browser / safe_browsing / protocol_manager.cc
blob8415327117fa65d1702ffd5044186d666468c5ca
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "chrome/browser/safe_browsing/protocol_manager.h"
7 #include "base/environment.h"
8 #include "base/logging.h"
9 #include "base/memory/scoped_vector.h"
10 #include "base/metrics/histogram.h"
11 #include "base/profiler/scoped_tracker.h"
12 #include "base/rand_util.h"
13 #include "base/stl_util.h"
14 #include "base/strings/string_util.h"
15 #include "base/strings/stringprintf.h"
16 #include "base/timer/timer.h"
17 #include "chrome/browser/safe_browsing/protocol_parser.h"
18 #include "chrome/common/chrome_version_info.h"
19 #include "chrome/common/env_vars.h"
20 #include "google_apis/google_api_keys.h"
21 #include "net/base/escape.h"
22 #include "net/base/load_flags.h"
23 #include "net/base/net_errors.h"
24 #include "net/url_request/url_fetcher.h"
25 #include "net/url_request/url_request_context_getter.h"
26 #include "net/url_request/url_request_status.h"
28 #if defined(OS_ANDROID)
29 #include "net/base/network_change_notifier.h"
30 #endif
32 using base::Time;
33 using base::TimeDelta;
35 namespace {
37 // UpdateResult indicates what happened with the primary and/or backup update
38 // requests. The ordering of the values must stay the same for UMA consistency,
39 // and is also ordered in this way to match ProtocolManager::BackupUpdateReason.
40 enum UpdateResult {
41 UPDATE_RESULT_FAIL,
42 UPDATE_RESULT_SUCCESS,
43 UPDATE_RESULT_BACKUP_CONNECT_FAIL,
44 UPDATE_RESULT_BACKUP_CONNECT_SUCCESS,
45 UPDATE_RESULT_BACKUP_HTTP_FAIL,
46 UPDATE_RESULT_BACKUP_HTTP_SUCCESS,
47 UPDATE_RESULT_BACKUP_NETWORK_FAIL,
48 UPDATE_RESULT_BACKUP_NETWORK_SUCCESS,
49 UPDATE_RESULT_MAX,
50 UPDATE_RESULT_BACKUP_START = UPDATE_RESULT_BACKUP_CONNECT_FAIL,
53 void RecordUpdateResult(UpdateResult result) {
54 DCHECK(result >= 0 && result < UPDATE_RESULT_MAX);
55 UMA_HISTOGRAM_ENUMERATION("SB2.UpdateResult", result, UPDATE_RESULT_MAX);
58 } // namespace
60 // Minimum time, in seconds, from start up before we must issue an update query.
61 static const int kSbTimerStartIntervalSecMin = 60;
63 // Maximum time, in seconds, from start up before we must issue an update query.
64 static const int kSbTimerStartIntervalSecMax = 300;
66 // The maximum time, in seconds, to wait for a response to an update request.
67 static const int kSbMaxUpdateWaitSec = 30;
69 // Maximum back off multiplier.
70 static const size_t kSbMaxBackOff = 8;
72 // The default SBProtocolManagerFactory.
73 class SBProtocolManagerFactoryImpl : public SBProtocolManagerFactory {
74 public:
75 SBProtocolManagerFactoryImpl() { }
76 ~SBProtocolManagerFactoryImpl() override {}
77 SafeBrowsingProtocolManager* CreateProtocolManager(
78 SafeBrowsingProtocolManagerDelegate* delegate,
79 net::URLRequestContextGetter* request_context_getter,
80 const SafeBrowsingProtocolConfig& config) override {
81 return new SafeBrowsingProtocolManager(
82 delegate, request_context_getter, config);
84 private:
85 DISALLOW_COPY_AND_ASSIGN(SBProtocolManagerFactoryImpl);
88 // SafeBrowsingProtocolManager implementation ----------------------------------
90 // static
91 SBProtocolManagerFactory* SafeBrowsingProtocolManager::factory_ = NULL;
93 // static
94 SafeBrowsingProtocolManager* SafeBrowsingProtocolManager::Create(
95 SafeBrowsingProtocolManagerDelegate* delegate,
96 net::URLRequestContextGetter* request_context_getter,
97 const SafeBrowsingProtocolConfig& config) {
98 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/483689 is fixed.
99 tracked_objects::ScopedTracker tracking_profile(
100 FROM_HERE_WITH_EXPLICIT_FUNCTION(
101 "483689 SafeBrowsingProtocolManager::Create"));
102 if (!factory_)
103 factory_ = new SBProtocolManagerFactoryImpl();
104 return factory_->CreateProtocolManager(
105 delegate, request_context_getter, config);
108 SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
109 SafeBrowsingProtocolManagerDelegate* delegate,
110 net::URLRequestContextGetter* request_context_getter,
111 const SafeBrowsingProtocolConfig& config)
112 : delegate_(delegate),
113 request_type_(NO_REQUEST),
114 update_error_count_(0),
115 gethash_error_count_(0),
116 update_back_off_mult_(1),
117 gethash_back_off_mult_(1),
118 next_update_interval_(base::TimeDelta::FromSeconds(
119 base::RandInt(kSbTimerStartIntervalSecMin,
120 kSbTimerStartIntervalSecMax))),
121 update_state_(FIRST_REQUEST),
122 chunk_pending_to_write_(false),
123 version_(config.version),
124 update_size_(0),
125 client_name_(config.client_name),
126 request_context_getter_(request_context_getter),
127 url_prefix_(config.url_prefix),
128 backup_update_reason_(BACKUP_UPDATE_REASON_MAX),
129 disable_auto_update_(config.disable_auto_update),
130 #if defined(OS_ANDROID)
131 disable_connection_check_(config.disable_connection_check),
132 #endif
133 url_fetcher_id_(0),
134 app_in_foreground_(true) {
135 DCHECK(!url_prefix_.empty());
137 backup_url_prefixes_[BACKUP_UPDATE_REASON_CONNECT] =
138 config.backup_connect_error_url_prefix;
139 backup_url_prefixes_[BACKUP_UPDATE_REASON_HTTP] =
140 config.backup_http_error_url_prefix;
141 backup_url_prefixes_[BACKUP_UPDATE_REASON_NETWORK] =
142 config.backup_network_error_url_prefix;
144 // Set the backoff multiplier fuzz to a random value between 0 and 1.
145 back_off_fuzz_ = static_cast<float>(base::RandDouble());
146 if (version_.empty())
147 version_ = SafeBrowsingProtocolManagerHelper::Version();
150 // static
151 void SafeBrowsingProtocolManager::RecordGetHashResult(
152 bool is_download, ResultType result_type) {
153 if (is_download) {
154 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type,
155 GET_HASH_RESULT_MAX);
156 } else {
157 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type,
158 GET_HASH_RESULT_MAX);
162 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const {
163 return update_timer_.IsRunning();
166 SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
167 // Delete in-progress SafeBrowsing requests.
168 STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
169 hash_requests_.end());
170 hash_requests_.clear();
173 // We can only have one update or chunk request outstanding, but there may be
174 // multiple GetHash requests pending since we don't want to serialize them and
175 // slow down the user.
176 void SafeBrowsingProtocolManager::GetFullHash(
177 const std::vector<SBPrefix>& prefixes,
178 FullHashCallback callback,
179 bool is_download) {
180 DCHECK(CalledOnValidThread());
181 // If we are in GetHash backoff, we need to check if we're past the next
182 // allowed time. If we are, we can proceed with the request. If not, we are
183 // required to return empty results (i.e. treat the page as safe).
184 if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
185 RecordGetHashResult(is_download, GET_HASH_BACKOFF_ERROR);
186 std::vector<SBFullHashResult> full_hashes;
187 callback.Run(full_hashes, base::TimeDelta());
188 return;
190 GURL gethash_url = GetHashUrl();
191 net::URLFetcher* fetcher =
192 net::URLFetcher::Create(url_fetcher_id_++, gethash_url,
193 net::URLFetcher::POST, this).release();
194 hash_requests_[fetcher] = FullHashDetails(callback, is_download);
196 const std::string get_hash = safe_browsing::FormatGetHash(prefixes);
198 fetcher->SetLoadFlags(net::LOAD_DISABLE_CACHE);
199 fetcher->SetRequestContext(request_context_getter_.get());
200 fetcher->SetUploadData("text/plain", get_hash);
201 fetcher->Start();
204 void SafeBrowsingProtocolManager::GetNextUpdate() {
205 DCHECK(CalledOnValidThread());
206 if (request_.get() || request_type_ != NO_REQUEST)
207 return;
209 #if defined(OS_ANDROID)
210 if (!disable_connection_check_) {
211 net::NetworkChangeNotifier::ConnectionType type =
212 net::NetworkChangeNotifier::GetConnectionType();
213 if (type != net::NetworkChangeNotifier::CONNECTION_WIFI) {
214 ScheduleNextUpdate(false /* no back off */);
215 return;
218 #endif
220 IssueUpdateRequest();
223 // net::URLFetcherDelegate implementation ----------------------------------
225 // All SafeBrowsing request responses are handled here.
226 // TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
227 // chunk should retry the download and parse of that chunk (and
228 // what back off / how many times to try), and if that effects the
229 // update back off. For now, a failed parse of the chunk means we
230 // drop it. This isn't so bad because the next UPDATE_REQUEST we
231 // do will report all the chunks we have. If that chunk is still
232 // required, the SafeBrowsing servers will tell us to get it again.
233 void SafeBrowsingProtocolManager::OnURLFetchComplete(
234 const net::URLFetcher* source) {
235 DCHECK(CalledOnValidThread());
236 scoped_ptr<const net::URLFetcher> fetcher;
238 HashRequests::iterator it = hash_requests_.find(source);
239 if (it != hash_requests_.end()) {
240 // GetHash response.
241 fetcher.reset(it->first);
242 const FullHashDetails& details = it->second;
243 std::vector<SBFullHashResult> full_hashes;
244 base::TimeDelta cache_lifetime;
245 if (source->GetStatus().is_success() &&
246 (source->GetResponseCode() == 200 ||
247 source->GetResponseCode() == 204)) {
248 // For tracking our GetHash false positive (204) rate, compared to real
249 // (200) responses.
250 if (source->GetResponseCode() == 200)
251 RecordGetHashResult(details.is_download, GET_HASH_STATUS_200);
252 else
253 RecordGetHashResult(details.is_download, GET_HASH_STATUS_204);
255 gethash_error_count_ = 0;
256 gethash_back_off_mult_ = 1;
257 std::string data;
258 source->GetResponseAsString(&data);
259 if (!safe_browsing::ParseGetHash(
260 data.data(), data.length(), &cache_lifetime, &full_hashes)) {
261 full_hashes.clear();
262 RecordGetHashResult(details.is_download, GET_HASH_PARSE_ERROR);
263 // TODO(cbentzel): Should cache_lifetime be set to 0 here? (See
264 // http://crbug.com/360232.)
266 } else {
267 HandleGetHashError(Time::Now());
268 if (source->GetStatus().status() == net::URLRequestStatus::FAILED) {
269 RecordGetHashResult(details.is_download, GET_HASH_NETWORK_ERROR);
270 DVLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
271 << " failed with error: " << source->GetStatus().error();
272 } else {
273 RecordGetHashResult(details.is_download, GET_HASH_HTTP_ERROR);
274 DVLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
275 << " failed with error: " << source->GetResponseCode();
279 // Invoke the callback with full_hashes, even if there was a parse error or
280 // an error response code (in which case full_hashes will be empty). The
281 // caller can't be blocked indefinitely.
282 details.callback.Run(full_hashes, cache_lifetime);
284 hash_requests_.erase(it);
285 } else {
286 // Update or chunk response.
287 fetcher.reset(request_.release());
289 if (request_type_ == UPDATE_REQUEST ||
290 request_type_ == BACKUP_UPDATE_REQUEST) {
291 if (!fetcher.get()) {
292 // We've timed out waiting for an update response, so we've cancelled
293 // the update request and scheduled a new one. Ignore this response.
294 return;
297 // Cancel the update response timeout now that we have the response.
298 timeout_timer_.Stop();
301 net::URLRequestStatus status = source->GetStatus();
302 if (status.is_success() && source->GetResponseCode() == 200) {
303 // We have data from the SafeBrowsing service.
304 std::string data;
305 source->GetResponseAsString(&data);
307 // TODO(shess): Cleanup the flow of this code so that |parsed_ok| can be
308 // removed or omitted.
309 const bool parsed_ok = HandleServiceResponse(
310 source->GetURL(), data.data(), data.length());
311 if (!parsed_ok) {
312 DVLOG(1) << "SafeBrowsing request for: " << source->GetURL()
313 << " failed parse.";
314 chunk_request_urls_.clear();
315 if (request_type_ == UPDATE_REQUEST &&
316 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_HTTP)) {
317 return;
319 UpdateFinished(false);
322 switch (request_type_) {
323 case CHUNK_REQUEST:
324 if (parsed_ok) {
325 chunk_request_urls_.pop_front();
326 if (chunk_request_urls_.empty() && !chunk_pending_to_write_)
327 UpdateFinished(true);
329 break;
330 case UPDATE_REQUEST:
331 case BACKUP_UPDATE_REQUEST:
332 if (chunk_request_urls_.empty() && parsed_ok) {
333 // We are up to date since the servers gave us nothing new, so we
334 // are done with this update cycle.
335 UpdateFinished(true);
337 break;
338 case NO_REQUEST:
339 // This can happen if HandleServiceResponse fails above.
340 break;
341 default:
342 NOTREACHED();
343 break;
345 } else {
346 if (status.status() == net::URLRequestStatus::FAILED) {
347 DVLOG(1) << "SafeBrowsing request for: " << source->GetURL()
348 << " failed with error: " << source->GetStatus().error();
349 } else {
350 DVLOG(1) << "SafeBrowsing request for: " << source->GetURL()
351 << " failed with error: " << source->GetResponseCode();
353 if (request_type_ == CHUNK_REQUEST) {
354 // The SafeBrowsing service error, or very bad response code: back off.
355 chunk_request_urls_.clear();
356 } else if (request_type_ == UPDATE_REQUEST) {
357 BackupUpdateReason backup_update_reason = BACKUP_UPDATE_REASON_MAX;
358 if (status.is_success()) {
359 backup_update_reason = BACKUP_UPDATE_REASON_HTTP;
360 } else {
361 switch (status.error()) {
362 case net::ERR_INTERNET_DISCONNECTED:
363 case net::ERR_NETWORK_CHANGED:
364 backup_update_reason = BACKUP_UPDATE_REASON_NETWORK;
365 break;
366 default:
367 backup_update_reason = BACKUP_UPDATE_REASON_CONNECT;
368 break;
371 if (backup_update_reason != BACKUP_UPDATE_REASON_MAX &&
372 IssueBackupUpdateRequest(backup_update_reason)) {
373 return;
376 UpdateFinished(false);
380 // Get the next chunk if available.
381 IssueChunkRequest();
384 bool SafeBrowsingProtocolManager::HandleServiceResponse(
385 const GURL& url, const char* data, size_t length) {
386 DCHECK(CalledOnValidThread());
388 switch (request_type_) {
389 case UPDATE_REQUEST:
390 case BACKUP_UPDATE_REQUEST: {
391 size_t next_update_sec = 0;
392 bool reset = false;
393 scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
394 new std::vector<SBChunkDelete>);
395 std::vector<ChunkUrl> chunk_urls;
396 if (!safe_browsing::ParseUpdate(data, length, &next_update_sec, &reset,
397 chunk_deletes.get(), &chunk_urls)) {
398 return false;
401 base::TimeDelta next_update_interval =
402 base::TimeDelta::FromSeconds(next_update_sec);
403 last_update_ = Time::Now();
405 if (update_state_ == FIRST_REQUEST)
406 update_state_ = SECOND_REQUEST;
407 else if (update_state_ == SECOND_REQUEST)
408 update_state_ = NORMAL_REQUEST;
410 // New time for the next update.
411 if (next_update_interval > base::TimeDelta()) {
412 next_update_interval_ = next_update_interval;
413 } else if (update_state_ == SECOND_REQUEST) {
414 next_update_interval_ = base::TimeDelta::FromSeconds(
415 base::RandInt(15, 45));
418 // New chunks to download.
419 if (!chunk_urls.empty()) {
420 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
421 for (size_t i = 0; i < chunk_urls.size(); ++i)
422 chunk_request_urls_.push_back(chunk_urls[i]);
425 // Handle the case were the SafeBrowsing service tells us to dump our
426 // database.
427 if (reset) {
428 delegate_->ResetDatabase();
429 return true;
432 // Chunks to delete from our storage.
433 if (!chunk_deletes->empty())
434 delegate_->DeleteChunks(chunk_deletes.Pass());
436 break;
438 case CHUNK_REQUEST: {
439 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
440 base::Time::Now() - chunk_request_start_);
442 const ChunkUrl chunk_url = chunk_request_urls_.front();
443 scoped_ptr<ScopedVector<SBChunkData> >
444 chunks(new ScopedVector<SBChunkData>);
445 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
446 update_size_ += length;
447 if (!safe_browsing::ParseChunk(data, length, chunks.get()))
448 return false;
450 // Chunks to add to storage. Pass ownership of |chunks|.
451 if (!chunks->empty()) {
452 chunk_pending_to_write_ = true;
453 delegate_->AddChunks(
454 chunk_url.list_name, chunks.Pass(),
455 base::Bind(&SafeBrowsingProtocolManager::OnAddChunksComplete,
456 base::Unretained(this)));
459 break;
462 default:
463 return false;
466 return true;
469 void SafeBrowsingProtocolManager::Initialize() {
470 DCHECK(CalledOnValidThread());
471 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/483689 is fixed.
472 tracked_objects::ScopedTracker tracking_profile(
473 FROM_HERE_WITH_EXPLICIT_FUNCTION(
474 "483689 SafeBrowsingProtocolManager::Initialize"));
475 // Don't want to hit the safe browsing servers on build/chrome bots.
476 scoped_ptr<base::Environment> env(base::Environment::Create());
477 if (env->HasVar(env_vars::kHeadless))
478 return;
479 ScheduleNextUpdate(false /* no back off */);
482 void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
483 DCHECK(CalledOnValidThread());
484 if (disable_auto_update_) {
485 // Unschedule any current timer.
486 update_timer_.Stop();
487 return;
489 // Reschedule with the new update.
490 base::TimeDelta next_update_interval = GetNextUpdateInterval(back_off);
491 ForceScheduleNextUpdate(next_update_interval);
494 void SafeBrowsingProtocolManager::ForceScheduleNextUpdate(
495 base::TimeDelta interval) {
496 DCHECK(CalledOnValidThread());
497 DCHECK(interval >= base::TimeDelta());
498 // Unschedule any current timer.
499 update_timer_.Stop();
500 update_timer_.Start(FROM_HERE, interval, this,
501 &SafeBrowsingProtocolManager::GetNextUpdate);
504 // According to section 5 of the SafeBrowsing protocol specification, we must
505 // back off after a certain number of errors. We only change |next_update_sec_|
506 // when we receive a response from the SafeBrowsing service.
507 base::TimeDelta SafeBrowsingProtocolManager::GetNextUpdateInterval(
508 bool back_off) {
509 DCHECK(CalledOnValidThread());
510 DCHECK(next_update_interval_ > base::TimeDelta());
511 base::TimeDelta next = next_update_interval_;
512 if (back_off) {
513 next = GetNextBackOffInterval(&update_error_count_, &update_back_off_mult_);
514 } else {
515 // Successful response means error reset.
516 update_error_count_ = 0;
517 update_back_off_mult_ = 1;
519 return next;
522 base::TimeDelta SafeBrowsingProtocolManager::GetNextBackOffInterval(
523 size_t* error_count, size_t* multiplier) const {
524 DCHECK(CalledOnValidThread());
525 DCHECK(multiplier && error_count);
526 (*error_count)++;
527 if (*error_count > 1 && *error_count < 6) {
528 base::TimeDelta next = base::TimeDelta::FromMinutes(
529 *multiplier * (1 + back_off_fuzz_) * 30);
530 *multiplier *= 2;
531 if (*multiplier > kSbMaxBackOff)
532 *multiplier = kSbMaxBackOff;
533 return next;
535 if (*error_count >= 6)
536 return base::TimeDelta::FromHours(8);
537 return base::TimeDelta::FromMinutes(1);
540 // This request requires getting a list of all the chunks for each list from the
541 // database asynchronously. The request will be issued when we're called back in
542 // OnGetChunksComplete.
543 // TODO(paulg): We should get this at start up and maintain a ChunkRange cache
544 // to avoid hitting the database with each update request. On the
545 // otherhand, this request will only occur ~20-30 minutes so there
546 // isn't that much overhead. Measure!
547 void SafeBrowsingProtocolManager::IssueUpdateRequest() {
548 DCHECK(CalledOnValidThread());
549 request_type_ = UPDATE_REQUEST;
550 delegate_->UpdateStarted();
551 delegate_->GetChunks(
552 base::Bind(&SafeBrowsingProtocolManager::OnGetChunksComplete,
553 base::Unretained(this)));
556 // The backup request can run immediately since the chunks have already been
557 // retrieved from the DB.
558 bool SafeBrowsingProtocolManager::IssueBackupUpdateRequest(
559 BackupUpdateReason backup_update_reason) {
560 DCHECK(CalledOnValidThread());
561 DCHECK_EQ(request_type_, UPDATE_REQUEST);
562 DCHECK(backup_update_reason >= 0 &&
563 backup_update_reason < BACKUP_UPDATE_REASON_MAX);
564 if (backup_url_prefixes_[backup_update_reason].empty())
565 return false;
566 request_type_ = BACKUP_UPDATE_REQUEST;
567 backup_update_reason_ = backup_update_reason;
569 GURL backup_update_url = BackupUpdateUrl(backup_update_reason);
570 request_ = net::URLFetcher::Create(url_fetcher_id_++, backup_update_url,
571 net::URLFetcher::POST, this);
572 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
573 request_->SetRequestContext(request_context_getter_.get());
574 request_->SetUploadData("text/plain", update_list_data_);
575 request_->Start();
577 // Begin the update request timeout.
578 timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
579 this,
580 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
582 return true;
585 void SafeBrowsingProtocolManager::IssueChunkRequest() {
586 DCHECK(CalledOnValidThread());
587 // We are only allowed to have one request outstanding at any time. Also,
588 // don't get the next url until the previous one has been written to disk so
589 // that we don't use too much memory.
590 if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
591 return;
593 ChunkUrl next_chunk = chunk_request_urls_.front();
594 DCHECK(!next_chunk.url.empty());
595 GURL chunk_url = NextChunkUrl(next_chunk.url);
596 request_type_ = CHUNK_REQUEST;
597 request_ = net::URLFetcher::Create(url_fetcher_id_++, chunk_url,
598 net::URLFetcher::GET, this);
599 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
600 request_->SetRequestContext(request_context_getter_.get());
601 chunk_request_start_ = base::Time::Now();
602 request_->Start();
605 void SafeBrowsingProtocolManager::OnGetChunksComplete(
606 const std::vector<SBListChunkRanges>& lists, bool database_error) {
607 DCHECK(CalledOnValidThread());
608 DCHECK_EQ(request_type_, UPDATE_REQUEST);
609 DCHECK(update_list_data_.empty());
610 if (database_error) {
611 // The update was not successful, but don't back off.
612 UpdateFinished(false, false);
613 return;
616 // Format our stored chunks:
617 bool found_malware = false;
618 bool found_phishing = false;
619 for (size_t i = 0; i < lists.size(); ++i) {
620 update_list_data_.append(safe_browsing::FormatList(lists[i]));
621 if (lists[i].name == safe_browsing_util::kPhishingList)
622 found_phishing = true;
624 if (lists[i].name == safe_browsing_util::kMalwareList)
625 found_malware = true;
628 // If we have an empty database, let the server know we want data for these
629 // lists.
630 // TODO(shess): These cases never happen because the database fills in the
631 // lists in GetChunks(). Refactor the unit tests so that this code can be
632 // removed.
633 if (!found_phishing) {
634 update_list_data_.append(safe_browsing::FormatList(
635 SBListChunkRanges(safe_browsing_util::kPhishingList)));
637 if (!found_malware) {
638 update_list_data_.append(safe_browsing::FormatList(
639 SBListChunkRanges(safe_browsing_util::kMalwareList)));
642 // Large requests are (probably) a sign of database corruption.
643 // Record stats to inform decisions about whether to automate
644 // deletion of such databases. http://crbug.com/120219
645 UMA_HISTOGRAM_COUNTS("SB2.UpdateRequestSize", update_list_data_.size());
647 GURL update_url = UpdateUrl();
648 request_ = net::URLFetcher::Create(url_fetcher_id_++, update_url,
649 net::URLFetcher::POST, this);
650 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
651 request_->SetRequestContext(request_context_getter_.get());
652 request_->SetUploadData("text/plain", update_list_data_);
653 request_->Start();
655 // Begin the update request timeout.
656 timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
657 this,
658 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
661 // If we haven't heard back from the server with an update response, this method
662 // will run. Close the current update session and schedule another update.
663 void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
664 DCHECK(CalledOnValidThread());
665 DCHECK(request_type_ == UPDATE_REQUEST ||
666 request_type_ == BACKUP_UPDATE_REQUEST);
667 request_.reset();
668 if (request_type_ == UPDATE_REQUEST &&
669 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_CONNECT)) {
670 return;
672 UpdateFinished(false);
675 void SafeBrowsingProtocolManager::OnAddChunksComplete() {
676 DCHECK(CalledOnValidThread());
677 chunk_pending_to_write_ = false;
679 if (chunk_request_urls_.empty()) {
680 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
681 UpdateFinished(true);
682 } else {
683 IssueChunkRequest();
687 void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
688 DCHECK(CalledOnValidThread());
689 base::TimeDelta next = GetNextBackOffInterval(
690 &gethash_error_count_, &gethash_back_off_mult_);
691 next_gethash_time_ = now + next;
694 void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
695 UpdateFinished(success, !success);
698 void SafeBrowsingProtocolManager::UpdateFinished(bool success, bool back_off) {
699 DCHECK(CalledOnValidThread());
700 #if defined(OS_ANDROID)
701 if (app_in_foreground_)
702 UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeForeground", update_size_);
703 else
704 UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeBackground", update_size_);
705 #endif
706 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
707 update_size_ = 0;
708 bool update_success = success || request_type_ == CHUNK_REQUEST;
709 if (backup_update_reason_ == BACKUP_UPDATE_REASON_MAX) {
710 RecordUpdateResult(
711 update_success ? UPDATE_RESULT_SUCCESS : UPDATE_RESULT_FAIL);
712 } else {
713 UpdateResult update_result = static_cast<UpdateResult>(
714 UPDATE_RESULT_BACKUP_START +
715 (static_cast<int>(backup_update_reason_) * 2) +
716 update_success);
717 RecordUpdateResult(update_result);
719 backup_update_reason_ = BACKUP_UPDATE_REASON_MAX;
720 request_type_ = NO_REQUEST;
721 update_list_data_.clear();
722 delegate_->UpdateFinished(success);
723 ScheduleNextUpdate(back_off);
726 GURL SafeBrowsingProtocolManager::UpdateUrl() const {
727 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
728 url_prefix_, "downloads", client_name_, version_, additional_query_);
729 return GURL(url);
732 GURL SafeBrowsingProtocolManager::BackupUpdateUrl(
733 BackupUpdateReason backup_update_reason) const {
734 DCHECK(backup_update_reason >= 0 &&
735 backup_update_reason < BACKUP_UPDATE_REASON_MAX);
736 DCHECK(!backup_url_prefixes_[backup_update_reason].empty());
737 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
738 backup_url_prefixes_[backup_update_reason], "downloads", client_name_,
739 version_, additional_query_);
740 return GURL(url);
743 GURL SafeBrowsingProtocolManager::GetHashUrl() const {
744 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
745 url_prefix_, "gethash", client_name_, version_, additional_query_);
746 return GURL(url);
749 GURL SafeBrowsingProtocolManager::NextChunkUrl(const std::string& url) const {
750 DCHECK(CalledOnValidThread());
751 std::string next_url;
752 if (!base::StartsWith(url, "http://",
753 base::CompareCase::INSENSITIVE_ASCII) &&
754 !base::StartsWith(url, "https://",
755 base::CompareCase::INSENSITIVE_ASCII)) {
756 // Use https if we updated via https, otherwise http (useful for testing).
757 if (base::StartsWith(url_prefix_, "https://",
758 base::CompareCase::INSENSITIVE_ASCII))
759 next_url.append("https://");
760 else
761 next_url.append("http://");
762 next_url.append(url);
763 } else {
764 next_url = url;
766 if (!additional_query_.empty()) {
767 if (next_url.find("?") != std::string::npos) {
768 next_url.append("&");
769 } else {
770 next_url.append("?");
772 next_url.append(additional_query_);
774 return GURL(next_url);
777 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails()
778 : callback(),
779 is_download(false) {
782 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails(
783 FullHashCallback callback, bool is_download)
784 : callback(callback),
785 is_download(is_download) {
788 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {
791 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {