Only grant permissions to new extensions from sync if they have the expected version
[chromium-blink-merge.git] / chrome / browser / safe_browsing / protocol_manager.cc
blob1e196bcf1b21548181b95577fcfb9a1be797a268
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "chrome/browser/safe_browsing/protocol_manager.h"
7 #include "base/environment.h"
8 #include "base/logging.h"
9 #include "base/memory/scoped_vector.h"
10 #include "base/metrics/histogram_macros.h"
11 #include "base/metrics/sparse_histogram.h"
12 #include "base/profiler/scoped_tracker.h"
13 #include "base/rand_util.h"
14 #include "base/stl_util.h"
15 #include "base/strings/string_util.h"
16 #include "base/strings/stringprintf.h"
17 #include "base/timer/timer.h"
18 #include "chrome/browser/safe_browsing/protocol_parser.h"
19 #include "chrome/common/env_vars.h"
20 #include "google_apis/google_api_keys.h"
21 #include "net/base/escape.h"
22 #include "net/base/load_flags.h"
23 #include "net/base/net_errors.h"
24 #include "net/http/http_response_headers.h"
25 #include "net/http/http_status_code.h"
26 #include "net/url_request/url_fetcher.h"
27 #include "net/url_request/url_request_context_getter.h"
28 #include "net/url_request/url_request_status.h"
30 #if defined(OS_ANDROID)
31 #include "net/base/network_change_notifier.h"
32 #endif
34 using base::Time;
35 using base::TimeDelta;
37 namespace {
39 // UpdateResult indicates what happened with the primary and/or backup update
40 // requests. The ordering of the values must stay the same for UMA consistency,
41 // and is also ordered in this way to match ProtocolManager::BackupUpdateReason.
42 enum UpdateResult {
43 UPDATE_RESULT_FAIL,
44 UPDATE_RESULT_SUCCESS,
45 UPDATE_RESULT_BACKUP_CONNECT_FAIL,
46 UPDATE_RESULT_BACKUP_CONNECT_SUCCESS,
47 UPDATE_RESULT_BACKUP_HTTP_FAIL,
48 UPDATE_RESULT_BACKUP_HTTP_SUCCESS,
49 UPDATE_RESULT_BACKUP_NETWORK_FAIL,
50 UPDATE_RESULT_BACKUP_NETWORK_SUCCESS,
51 UPDATE_RESULT_MAX,
52 UPDATE_RESULT_BACKUP_START = UPDATE_RESULT_BACKUP_CONNECT_FAIL,
55 void RecordUpdateResult(UpdateResult result) {
56 DCHECK(result >= 0 && result < UPDATE_RESULT_MAX);
57 UMA_HISTOGRAM_ENUMERATION("SB2.UpdateResult", result, UPDATE_RESULT_MAX);
60 } // namespace
62 // Minimum time, in seconds, from start up before we must issue an update query.
63 static const int kSbTimerStartIntervalSecMin = 60;
65 // Maximum time, in seconds, from start up before we must issue an update query.
66 static const int kSbTimerStartIntervalSecMax = 300;
68 // The maximum time, in seconds, to wait for a response to an update request.
69 static const int kSbMaxUpdateWaitSec = 30;
71 // Maximum back off multiplier.
72 static const size_t kSbMaxBackOff = 8;
74 const char kUmaHashResponseMetricName[] = "SB2.GetHashResponseOrErrorCode";
76 // The default SBProtocolManagerFactory.
77 class SBProtocolManagerFactoryImpl : public SBProtocolManagerFactory {
78 public:
79 SBProtocolManagerFactoryImpl() { }
80 ~SBProtocolManagerFactoryImpl() override {}
81 SafeBrowsingProtocolManager* CreateProtocolManager(
82 SafeBrowsingProtocolManagerDelegate* delegate,
83 net::URLRequestContextGetter* request_context_getter,
84 const SafeBrowsingProtocolConfig& config) override {
85 return new SafeBrowsingProtocolManager(
86 delegate, request_context_getter, config);
88 private:
89 DISALLOW_COPY_AND_ASSIGN(SBProtocolManagerFactoryImpl);
92 // SafeBrowsingProtocolManager implementation ----------------------------------
94 // static
95 SBProtocolManagerFactory* SafeBrowsingProtocolManager::factory_ = NULL;
97 // static
98 SafeBrowsingProtocolManager* SafeBrowsingProtocolManager::Create(
99 SafeBrowsingProtocolManagerDelegate* delegate,
100 net::URLRequestContextGetter* request_context_getter,
101 const SafeBrowsingProtocolConfig& config) {
102 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/483689 is fixed.
103 tracked_objects::ScopedTracker tracking_profile(
104 FROM_HERE_WITH_EXPLICIT_FUNCTION(
105 "483689 SafeBrowsingProtocolManager::Create"));
106 if (!factory_)
107 factory_ = new SBProtocolManagerFactoryImpl();
108 return factory_->CreateProtocolManager(
109 delegate, request_context_getter, config);
112 SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
113 SafeBrowsingProtocolManagerDelegate* delegate,
114 net::URLRequestContextGetter* request_context_getter,
115 const SafeBrowsingProtocolConfig& config)
116 : delegate_(delegate),
117 request_type_(NO_REQUEST),
118 update_error_count_(0),
119 gethash_error_count_(0),
120 update_back_off_mult_(1),
121 gethash_back_off_mult_(1),
122 next_update_interval_(base::TimeDelta::FromSeconds(
123 base::RandInt(kSbTimerStartIntervalSecMin,
124 kSbTimerStartIntervalSecMax))),
125 update_state_(FIRST_REQUEST),
126 chunk_pending_to_write_(false),
127 version_(config.version),
128 update_size_(0),
129 client_name_(config.client_name),
130 request_context_getter_(request_context_getter),
131 url_prefix_(config.url_prefix),
132 backup_update_reason_(BACKUP_UPDATE_REASON_MAX),
133 disable_auto_update_(config.disable_auto_update),
134 #if defined(OS_ANDROID)
135 disable_connection_check_(config.disable_connection_check),
136 #endif
137 url_fetcher_id_(0),
138 app_in_foreground_(true) {
139 DCHECK(!url_prefix_.empty());
141 backup_url_prefixes_[BACKUP_UPDATE_REASON_CONNECT] =
142 config.backup_connect_error_url_prefix;
143 backup_url_prefixes_[BACKUP_UPDATE_REASON_HTTP] =
144 config.backup_http_error_url_prefix;
145 backup_url_prefixes_[BACKUP_UPDATE_REASON_NETWORK] =
146 config.backup_network_error_url_prefix;
148 // Set the backoff multiplier fuzz to a random value between 0 and 1.
149 back_off_fuzz_ = static_cast<float>(base::RandDouble());
150 if (version_.empty())
151 version_ = SafeBrowsingProtocolManagerHelper::Version();
154 // static
155 void SafeBrowsingProtocolManager::RecordGetHashResult(
156 bool is_download, ResultType result_type) {
157 if (is_download) {
158 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type,
159 GET_HASH_RESULT_MAX);
160 } else {
161 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type,
162 GET_HASH_RESULT_MAX);
166 void SafeBrowsingProtocolManager::RecordHttpResponseOrErrorCode(
167 const char* metric_name, const net::URLRequestStatus& status,
168 int response_code) {
169 UMA_HISTOGRAM_SPARSE_SLOWLY(
170 metric_name, status.is_success() ? response_code : status.error());
173 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const {
174 return update_timer_.IsRunning();
177 SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
178 // Delete in-progress SafeBrowsing requests.
179 STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
180 hash_requests_.end());
181 hash_requests_.clear();
184 // We can only have one update or chunk request outstanding, but there may be
185 // multiple GetHash requests pending since we don't want to serialize them and
186 // slow down the user.
187 void SafeBrowsingProtocolManager::GetFullHash(
188 const std::vector<SBPrefix>& prefixes,
189 FullHashCallback callback,
190 bool is_download,
191 bool is_extended_reporting) {
192 DCHECK(CalledOnValidThread());
193 // If we are in GetHash backoff, we need to check if we're past the next
194 // allowed time. If we are, we can proceed with the request. If not, we are
195 // required to return empty results (i.e. treat the page as safe).
196 if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
197 RecordGetHashResult(is_download, GET_HASH_BACKOFF_ERROR);
198 std::vector<SBFullHashResult> full_hashes;
199 callback.Run(full_hashes, base::TimeDelta());
200 return;
202 GURL gethash_url = GetHashUrl(is_extended_reporting);
203 net::URLFetcher* fetcher =
204 net::URLFetcher::Create(url_fetcher_id_++, gethash_url,
205 net::URLFetcher::POST, this).release();
206 hash_requests_[fetcher] = FullHashDetails(callback, is_download);
208 const std::string get_hash = safe_browsing::FormatGetHash(prefixes);
210 fetcher->SetLoadFlags(net::LOAD_DISABLE_CACHE);
211 fetcher->SetRequestContext(request_context_getter_.get());
212 fetcher->SetUploadData("text/plain", get_hash);
213 fetcher->Start();
216 void SafeBrowsingProtocolManager::GetNextUpdate() {
217 DCHECK(CalledOnValidThread());
218 if (request_.get() || request_type_ != NO_REQUEST)
219 return;
221 #if defined(OS_ANDROID)
222 if (!disable_connection_check_) {
223 net::NetworkChangeNotifier::ConnectionType type =
224 net::NetworkChangeNotifier::GetConnectionType();
225 if (type != net::NetworkChangeNotifier::CONNECTION_WIFI) {
226 ScheduleNextUpdate(false /* no back off */);
227 return;
230 #endif
232 IssueUpdateRequest();
235 // net::URLFetcherDelegate implementation ----------------------------------
237 // All SafeBrowsing request responses are handled here.
238 // TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
239 // chunk should retry the download and parse of that chunk (and
240 // what back off / how many times to try), and if that effects the
241 // update back off. For now, a failed parse of the chunk means we
242 // drop it. This isn't so bad because the next UPDATE_REQUEST we
243 // do will report all the chunks we have. If that chunk is still
244 // required, the SafeBrowsing servers will tell us to get it again.
245 void SafeBrowsingProtocolManager::OnURLFetchComplete(
246 const net::URLFetcher* source) {
247 DCHECK(CalledOnValidThread());
248 scoped_ptr<const net::URLFetcher> fetcher;
250 HashRequests::iterator it = hash_requests_.find(source);
251 int response_code = source->GetResponseCode();
252 net::URLRequestStatus status = source->GetStatus();
253 RecordHttpResponseOrErrorCode(
254 kUmaHashResponseMetricName, status, response_code);
255 if (it != hash_requests_.end()) {
256 // GetHash response.
257 fetcher.reset(it->first);
258 const FullHashDetails& details = it->second;
259 std::vector<SBFullHashResult> full_hashes;
260 base::TimeDelta cache_lifetime;
261 if (status.is_success() &&
262 (response_code == net::HTTP_OK ||
263 response_code == net::HTTP_NO_CONTENT)) {
264 // For tracking our GetHash false positive (net::HTTP_NO_CONTENT) rate,
265 // compared to real (net::HTTP_OK) responses.
266 if (response_code == net::HTTP_OK)
267 RecordGetHashResult(details.is_download, GET_HASH_STATUS_200);
268 else
269 RecordGetHashResult(details.is_download, GET_HASH_STATUS_204);
271 gethash_error_count_ = 0;
272 gethash_back_off_mult_ = 1;
273 std::string data;
274 source->GetResponseAsString(&data);
275 if (!safe_browsing::ParseGetHash(
276 data.data(), data.length(), &cache_lifetime, &full_hashes)) {
277 full_hashes.clear();
278 RecordGetHashResult(details.is_download, GET_HASH_PARSE_ERROR);
279 // TODO(cbentzel): Should cache_lifetime be set to 0 here? (See
280 // http://crbug.com/360232.)
282 } else {
283 HandleGetHashError(Time::Now());
284 if (status.status() == net::URLRequestStatus::FAILED) {
285 RecordGetHashResult(details.is_download, GET_HASH_NETWORK_ERROR);
286 DVLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
287 << " failed with error: " << status.error();
288 } else {
289 RecordGetHashResult(details.is_download, GET_HASH_HTTP_ERROR);
290 DVLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
291 << " failed with error: " << response_code;
295 // Invoke the callback with full_hashes, even if there was a parse error or
296 // an error response code (in which case full_hashes will be empty). The
297 // caller can't be blocked indefinitely.
298 details.callback.Run(full_hashes, cache_lifetime);
300 hash_requests_.erase(it);
301 } else {
302 // Update or chunk response.
303 fetcher.reset(request_.release());
305 if (request_type_ == UPDATE_REQUEST ||
306 request_type_ == BACKUP_UPDATE_REQUEST) {
307 if (!fetcher.get()) {
308 // We've timed out waiting for an update response, so we've cancelled
309 // the update request and scheduled a new one. Ignore this response.
310 return;
313 // Cancel the update response timeout now that we have the response.
314 timeout_timer_.Stop();
317 if (status.is_success() && response_code == net::HTTP_OK) {
318 // We have data from the SafeBrowsing service.
319 std::string data;
320 source->GetResponseAsString(&data);
322 // TODO(shess): Cleanup the flow of this code so that |parsed_ok| can be
323 // removed or omitted.
324 const bool parsed_ok = HandleServiceResponse(
325 source->GetURL(), data.data(), data.length());
326 if (!parsed_ok) {
327 DVLOG(1) << "SafeBrowsing request for: " << source->GetURL()
328 << " failed parse.";
329 chunk_request_urls_.clear();
330 if (request_type_ == UPDATE_REQUEST &&
331 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_HTTP)) {
332 return;
334 UpdateFinished(false);
337 switch (request_type_) {
338 case CHUNK_REQUEST:
339 if (parsed_ok) {
340 chunk_request_urls_.pop_front();
341 if (chunk_request_urls_.empty() && !chunk_pending_to_write_)
342 UpdateFinished(true);
344 break;
345 case UPDATE_REQUEST:
346 case BACKUP_UPDATE_REQUEST:
347 if (chunk_request_urls_.empty() && parsed_ok) {
348 // We are up to date since the servers gave us nothing new, so we
349 // are done with this update cycle.
350 UpdateFinished(true);
352 break;
353 case NO_REQUEST:
354 // This can happen if HandleServiceResponse fails above.
355 break;
356 default:
357 NOTREACHED();
358 break;
360 } else {
361 if (status.status() == net::URLRequestStatus::FAILED) {
362 DVLOG(1) << "SafeBrowsing request for: " << source->GetURL()
363 << " failed with error: " << status.error();
364 } else {
365 DVLOG(1) << "SafeBrowsing request for: " << source->GetURL()
366 << " failed with error: " << response_code;
368 if (request_type_ == CHUNK_REQUEST) {
369 // The SafeBrowsing service error, or very bad response code: back off.
370 chunk_request_urls_.clear();
371 } else if (request_type_ == UPDATE_REQUEST) {
372 BackupUpdateReason backup_update_reason = BACKUP_UPDATE_REASON_MAX;
373 if (status.is_success()) {
374 backup_update_reason = BACKUP_UPDATE_REASON_HTTP;
375 } else {
376 switch (status.error()) {
377 case net::ERR_INTERNET_DISCONNECTED:
378 case net::ERR_NETWORK_CHANGED:
379 backup_update_reason = BACKUP_UPDATE_REASON_NETWORK;
380 break;
381 default:
382 backup_update_reason = BACKUP_UPDATE_REASON_CONNECT;
383 break;
386 if (backup_update_reason != BACKUP_UPDATE_REASON_MAX &&
387 IssueBackupUpdateRequest(backup_update_reason)) {
388 return;
391 UpdateFinished(false);
395 // Get the next chunk if available.
396 IssueChunkRequest();
399 bool SafeBrowsingProtocolManager::HandleServiceResponse(
400 const GURL& url, const char* data, size_t length) {
401 DCHECK(CalledOnValidThread());
403 switch (request_type_) {
404 case UPDATE_REQUEST:
405 case BACKUP_UPDATE_REQUEST: {
406 size_t next_update_sec = 0;
407 bool reset = false;
408 scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
409 new std::vector<SBChunkDelete>);
410 std::vector<ChunkUrl> chunk_urls;
411 if (!safe_browsing::ParseUpdate(data, length, &next_update_sec, &reset,
412 chunk_deletes.get(), &chunk_urls)) {
413 return false;
416 base::TimeDelta next_update_interval =
417 base::TimeDelta::FromSeconds(next_update_sec);
418 last_update_ = Time::Now();
420 if (update_state_ == FIRST_REQUEST)
421 update_state_ = SECOND_REQUEST;
422 else if (update_state_ == SECOND_REQUEST)
423 update_state_ = NORMAL_REQUEST;
425 // New time for the next update.
426 if (next_update_interval > base::TimeDelta()) {
427 next_update_interval_ = next_update_interval;
428 } else if (update_state_ == SECOND_REQUEST) {
429 next_update_interval_ = base::TimeDelta::FromSeconds(
430 base::RandInt(15, 45));
433 // New chunks to download.
434 if (!chunk_urls.empty()) {
435 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
436 for (size_t i = 0; i < chunk_urls.size(); ++i)
437 chunk_request_urls_.push_back(chunk_urls[i]);
440 // Handle the case were the SafeBrowsing service tells us to dump our
441 // database.
442 if (reset) {
443 delegate_->ResetDatabase();
444 return true;
447 // Chunks to delete from our storage.
448 if (!chunk_deletes->empty())
449 delegate_->DeleteChunks(chunk_deletes.Pass());
451 break;
453 case CHUNK_REQUEST: {
454 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
455 base::Time::Now() - chunk_request_start_);
457 const ChunkUrl chunk_url = chunk_request_urls_.front();
458 scoped_ptr<ScopedVector<SBChunkData> >
459 chunks(new ScopedVector<SBChunkData>);
460 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
461 update_size_ += length;
462 if (!safe_browsing::ParseChunk(data, length, chunks.get()))
463 return false;
465 // Chunks to add to storage. Pass ownership of |chunks|.
466 if (!chunks->empty()) {
467 chunk_pending_to_write_ = true;
468 delegate_->AddChunks(
469 chunk_url.list_name, chunks.Pass(),
470 base::Bind(&SafeBrowsingProtocolManager::OnAddChunksComplete,
471 base::Unretained(this)));
474 break;
477 default:
478 return false;
481 return true;
484 void SafeBrowsingProtocolManager::Initialize() {
485 DCHECK(CalledOnValidThread());
486 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/483689 is fixed.
487 tracked_objects::ScopedTracker tracking_profile(
488 FROM_HERE_WITH_EXPLICIT_FUNCTION(
489 "483689 SafeBrowsingProtocolManager::Initialize"));
490 // Don't want to hit the safe browsing servers on build/chrome bots.
491 scoped_ptr<base::Environment> env(base::Environment::Create());
492 if (env->HasVar(env_vars::kHeadless))
493 return;
494 ScheduleNextUpdate(false /* no back off */);
497 void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
498 DCHECK(CalledOnValidThread());
499 if (disable_auto_update_) {
500 // Unschedule any current timer.
501 update_timer_.Stop();
502 return;
504 // Reschedule with the new update.
505 base::TimeDelta next_update_interval = GetNextUpdateInterval(back_off);
506 ForceScheduleNextUpdate(next_update_interval);
509 void SafeBrowsingProtocolManager::ForceScheduleNextUpdate(
510 base::TimeDelta interval) {
511 DCHECK(CalledOnValidThread());
512 DCHECK(interval >= base::TimeDelta());
513 // Unschedule any current timer.
514 update_timer_.Stop();
515 update_timer_.Start(FROM_HERE, interval, this,
516 &SafeBrowsingProtocolManager::GetNextUpdate);
519 // According to section 5 of the SafeBrowsing protocol specification, we must
520 // back off after a certain number of errors. We only change |next_update_sec_|
521 // when we receive a response from the SafeBrowsing service.
522 base::TimeDelta SafeBrowsingProtocolManager::GetNextUpdateInterval(
523 bool back_off) {
524 DCHECK(CalledOnValidThread());
525 DCHECK(next_update_interval_ > base::TimeDelta());
526 base::TimeDelta next = next_update_interval_;
527 if (back_off) {
528 next = GetNextBackOffInterval(&update_error_count_, &update_back_off_mult_);
529 } else {
530 // Successful response means error reset.
531 update_error_count_ = 0;
532 update_back_off_mult_ = 1;
534 return next;
537 base::TimeDelta SafeBrowsingProtocolManager::GetNextBackOffInterval(
538 size_t* error_count, size_t* multiplier) const {
539 DCHECK(CalledOnValidThread());
540 DCHECK(multiplier && error_count);
541 (*error_count)++;
542 if (*error_count > 1 && *error_count < 6) {
543 base::TimeDelta next = base::TimeDelta::FromMinutes(
544 *multiplier * (1 + back_off_fuzz_) * 30);
545 *multiplier *= 2;
546 if (*multiplier > kSbMaxBackOff)
547 *multiplier = kSbMaxBackOff;
548 return next;
550 if (*error_count >= 6)
551 return base::TimeDelta::FromHours(8);
552 return base::TimeDelta::FromMinutes(1);
555 // This request requires getting a list of all the chunks for each list from the
556 // database asynchronously. The request will be issued when we're called back in
557 // OnGetChunksComplete.
558 // TODO(paulg): We should get this at start up and maintain a ChunkRange cache
559 // to avoid hitting the database with each update request. On the
560 // otherhand, this request will only occur ~20-30 minutes so there
561 // isn't that much overhead. Measure!
562 void SafeBrowsingProtocolManager::IssueUpdateRequest() {
563 DCHECK(CalledOnValidThread());
564 request_type_ = UPDATE_REQUEST;
565 delegate_->UpdateStarted();
566 delegate_->GetChunks(
567 base::Bind(&SafeBrowsingProtocolManager::OnGetChunksComplete,
568 base::Unretained(this)));
571 // The backup request can run immediately since the chunks have already been
572 // retrieved from the DB.
573 bool SafeBrowsingProtocolManager::IssueBackupUpdateRequest(
574 BackupUpdateReason backup_update_reason) {
575 DCHECK(CalledOnValidThread());
576 DCHECK_EQ(request_type_, UPDATE_REQUEST);
577 DCHECK(backup_update_reason >= 0 &&
578 backup_update_reason < BACKUP_UPDATE_REASON_MAX);
579 if (backup_url_prefixes_[backup_update_reason].empty())
580 return false;
581 request_type_ = BACKUP_UPDATE_REQUEST;
582 backup_update_reason_ = backup_update_reason;
584 GURL backup_update_url = BackupUpdateUrl(backup_update_reason);
585 request_ = net::URLFetcher::Create(url_fetcher_id_++, backup_update_url,
586 net::URLFetcher::POST, this);
587 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
588 request_->SetRequestContext(request_context_getter_.get());
589 request_->SetUploadData("text/plain", update_list_data_);
590 request_->Start();
592 // Begin the update request timeout.
593 timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
594 this,
595 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
597 return true;
600 void SafeBrowsingProtocolManager::IssueChunkRequest() {
601 DCHECK(CalledOnValidThread());
602 // We are only allowed to have one request outstanding at any time. Also,
603 // don't get the next url until the previous one has been written to disk so
604 // that we don't use too much memory.
605 if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
606 return;
608 ChunkUrl next_chunk = chunk_request_urls_.front();
609 DCHECK(!next_chunk.url.empty());
610 GURL chunk_url = NextChunkUrl(next_chunk.url);
611 request_type_ = CHUNK_REQUEST;
612 request_ = net::URLFetcher::Create(url_fetcher_id_++, chunk_url,
613 net::URLFetcher::GET, this);
614 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
615 request_->SetRequestContext(request_context_getter_.get());
616 chunk_request_start_ = base::Time::Now();
617 request_->Start();
620 void SafeBrowsingProtocolManager::OnGetChunksComplete(
621 const std::vector<SBListChunkRanges>& lists,
622 bool database_error,
623 bool is_extended_reporting) {
624 DCHECK(CalledOnValidThread());
625 DCHECK_EQ(request_type_, UPDATE_REQUEST);
626 DCHECK(update_list_data_.empty());
627 if (database_error) {
628 // The update was not successful, but don't back off.
629 UpdateFinished(false, false);
630 return;
633 // Format our stored chunks:
634 bool found_malware = false;
635 bool found_phishing = false;
636 for (size_t i = 0; i < lists.size(); ++i) {
637 update_list_data_.append(safe_browsing::FormatList(lists[i]));
638 if (lists[i].name == safe_browsing_util::kPhishingList)
639 found_phishing = true;
641 if (lists[i].name == safe_browsing_util::kMalwareList)
642 found_malware = true;
645 // If we have an empty database, let the server know we want data for these
646 // lists.
647 // TODO(shess): These cases never happen because the database fills in the
648 // lists in GetChunks(). Refactor the unit tests so that this code can be
649 // removed.
650 if (!found_phishing) {
651 update_list_data_.append(safe_browsing::FormatList(
652 SBListChunkRanges(safe_browsing_util::kPhishingList)));
654 if (!found_malware) {
655 update_list_data_.append(safe_browsing::FormatList(
656 SBListChunkRanges(safe_browsing_util::kMalwareList)));
659 // Large requests are (probably) a sign of database corruption.
660 // Record stats to inform decisions about whether to automate
661 // deletion of such databases. http://crbug.com/120219
662 UMA_HISTOGRAM_COUNTS("SB2.UpdateRequestSize", update_list_data_.size());
664 GURL update_url = UpdateUrl(is_extended_reporting);
665 request_ = net::URLFetcher::Create(url_fetcher_id_++, update_url,
666 net::URLFetcher::POST, this);
667 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
668 request_->SetRequestContext(request_context_getter_.get());
669 request_->SetUploadData("text/plain", update_list_data_);
670 request_->Start();
672 // Begin the update request timeout.
673 timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
674 this,
675 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
678 // If we haven't heard back from the server with an update response, this method
679 // will run. Close the current update session and schedule another update.
680 void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
681 DCHECK(CalledOnValidThread());
682 DCHECK(request_type_ == UPDATE_REQUEST ||
683 request_type_ == BACKUP_UPDATE_REQUEST);
684 request_.reset();
685 if (request_type_ == UPDATE_REQUEST &&
686 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_CONNECT)) {
687 return;
689 UpdateFinished(false);
692 void SafeBrowsingProtocolManager::OnAddChunksComplete() {
693 DCHECK(CalledOnValidThread());
694 chunk_pending_to_write_ = false;
696 if (chunk_request_urls_.empty()) {
697 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
698 UpdateFinished(true);
699 } else {
700 IssueChunkRequest();
704 void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
705 DCHECK(CalledOnValidThread());
706 base::TimeDelta next = GetNextBackOffInterval(
707 &gethash_error_count_, &gethash_back_off_mult_);
708 next_gethash_time_ = now + next;
711 void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
712 UpdateFinished(success, !success);
715 void SafeBrowsingProtocolManager::UpdateFinished(bool success, bool back_off) {
716 DCHECK(CalledOnValidThread());
717 #if defined(OS_ANDROID)
718 if (app_in_foreground_)
719 UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeForeground", update_size_);
720 else
721 UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeBackground", update_size_);
722 #endif
723 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
724 update_size_ = 0;
725 bool update_success = success || request_type_ == CHUNK_REQUEST;
726 if (backup_update_reason_ == BACKUP_UPDATE_REASON_MAX) {
727 RecordUpdateResult(
728 update_success ? UPDATE_RESULT_SUCCESS : UPDATE_RESULT_FAIL);
729 } else {
730 UpdateResult update_result = static_cast<UpdateResult>(
731 UPDATE_RESULT_BACKUP_START +
732 (static_cast<int>(backup_update_reason_) * 2) +
733 update_success);
734 RecordUpdateResult(update_result);
736 backup_update_reason_ = BACKUP_UPDATE_REASON_MAX;
737 request_type_ = NO_REQUEST;
738 update_list_data_.clear();
739 delegate_->UpdateFinished(success);
740 ScheduleNextUpdate(back_off);
743 GURL SafeBrowsingProtocolManager::UpdateUrl(bool is_extended_reporting) const {
744 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
745 url_prefix_, "downloads", client_name_, version_, additional_query_,
746 is_extended_reporting);
747 return GURL(url);
750 GURL SafeBrowsingProtocolManager::BackupUpdateUrl(
751 BackupUpdateReason backup_update_reason) const {
752 DCHECK(backup_update_reason >= 0 &&
753 backup_update_reason < BACKUP_UPDATE_REASON_MAX);
754 DCHECK(!backup_url_prefixes_[backup_update_reason].empty());
755 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
756 backup_url_prefixes_[backup_update_reason], "downloads", client_name_,
757 version_, additional_query_);
758 return GURL(url);
761 GURL SafeBrowsingProtocolManager::GetHashUrl(bool is_extended_reporting) const {
762 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
763 url_prefix_, "gethash", client_name_, version_, additional_query_,
764 is_extended_reporting);
765 return GURL(url);
768 GURL SafeBrowsingProtocolManager::NextChunkUrl(const std::string& url) const {
769 DCHECK(CalledOnValidThread());
770 std::string next_url;
771 if (!base::StartsWith(url, "http://",
772 base::CompareCase::INSENSITIVE_ASCII) &&
773 !base::StartsWith(url, "https://",
774 base::CompareCase::INSENSITIVE_ASCII)) {
775 // Use https if we updated via https, otherwise http (useful for testing).
776 if (base::StartsWith(url_prefix_, "https://",
777 base::CompareCase::INSENSITIVE_ASCII))
778 next_url.append("https://");
779 else
780 next_url.append("http://");
781 next_url.append(url);
782 } else {
783 next_url = url;
785 if (!additional_query_.empty()) {
786 if (next_url.find("?") != std::string::npos) {
787 next_url.append("&");
788 } else {
789 next_url.append("?");
791 next_url.append(additional_query_);
793 return GURL(next_url);
796 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails()
797 : callback(),
798 is_download(false) {
801 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails(
802 FullHashCallback callback, bool is_download)
803 : callback(callback),
804 is_download(is_download) {
807 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {
810 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {