cros: Remove default pinned apps trial.
[chromium-blink-merge.git] / chrome / browser / safe_browsing / protocol_manager.cc
blob6c1377906cc8257a06e0ad1c647ac77dd019c12a
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "chrome/browser/safe_browsing/protocol_manager.h"
7 #ifndef NDEBUG
8 #include "base/base64.h"
9 #endif
10 #include "base/environment.h"
11 #include "base/logging.h"
12 #include "base/metrics/histogram.h"
13 #include "base/rand_util.h"
14 #include "base/stl_util.h"
15 #include "base/strings/string_util.h"
16 #include "base/strings/stringprintf.h"
17 #include "base/timer/timer.h"
18 #include "chrome/browser/safe_browsing/protocol_parser.h"
19 #include "chrome/common/chrome_version_info.h"
20 #include "chrome/common/env_vars.h"
21 #include "google_apis/google_api_keys.h"
22 #include "net/base/escape.h"
23 #include "net/base/load_flags.h"
24 #include "net/base/net_errors.h"
25 #include "net/url_request/url_fetcher.h"
26 #include "net/url_request/url_request_context_getter.h"
27 #include "net/url_request/url_request_status.h"
29 using base::Time;
30 using base::TimeDelta;
32 namespace {
34 // UpdateResult indicates what happened with the primary and/or backup update
35 // requests. The ordering of the values must stay the same for UMA consistency,
36 // and is also ordered in this way to match ProtocolManager::BackupUpdateReason.
37 enum UpdateResult {
38 UPDATE_RESULT_FAIL,
39 UPDATE_RESULT_SUCCESS,
40 UPDATE_RESULT_BACKUP_CONNECT_FAIL,
41 UPDATE_RESULT_BACKUP_CONNECT_SUCCESS,
42 UPDATE_RESULT_BACKUP_HTTP_FAIL,
43 UPDATE_RESULT_BACKUP_HTTP_SUCCESS,
44 UPDATE_RESULT_BACKUP_NETWORK_FAIL,
45 UPDATE_RESULT_BACKUP_NETWORK_SUCCESS,
46 UPDATE_RESULT_MAX,
47 UPDATE_RESULT_BACKUP_START = UPDATE_RESULT_BACKUP_CONNECT_FAIL,
50 void RecordUpdateResult(UpdateResult result) {
51 DCHECK(result >= 0 && result < UPDATE_RESULT_MAX);
52 UMA_HISTOGRAM_ENUMERATION("SB2.UpdateResult", result, UPDATE_RESULT_MAX);
55 } // namespace
57 // Minimum time, in seconds, from start up before we must issue an update query.
58 static const int kSbTimerStartIntervalSecMin = 60;
60 // Maximum time, in seconds, from start up before we must issue an update query.
61 static const int kSbTimerStartIntervalSecMax = 300;
63 // The maximum time, in seconds, to wait for a response to an update request.
64 static const int kSbMaxUpdateWaitSec = 30;
66 // Maximum back off multiplier.
67 static const int kSbMaxBackOff = 8;
69 // The default SBProtocolManagerFactory.
70 class SBProtocolManagerFactoryImpl : public SBProtocolManagerFactory {
71 public:
72 SBProtocolManagerFactoryImpl() { }
73 virtual ~SBProtocolManagerFactoryImpl() { }
74 virtual SafeBrowsingProtocolManager* CreateProtocolManager(
75 SafeBrowsingProtocolManagerDelegate* delegate,
76 net::URLRequestContextGetter* request_context_getter,
77 const SafeBrowsingProtocolConfig& config) OVERRIDE {
78 return new SafeBrowsingProtocolManager(
79 delegate, request_context_getter, config);
81 private:
82 DISALLOW_COPY_AND_ASSIGN(SBProtocolManagerFactoryImpl);
85 // SafeBrowsingProtocolManager implementation ----------------------------------
87 // static
88 SBProtocolManagerFactory* SafeBrowsingProtocolManager::factory_ = NULL;
90 // static
91 SafeBrowsingProtocolManager* SafeBrowsingProtocolManager::Create(
92 SafeBrowsingProtocolManagerDelegate* delegate,
93 net::URLRequestContextGetter* request_context_getter,
94 const SafeBrowsingProtocolConfig& config) {
95 if (!factory_)
96 factory_ = new SBProtocolManagerFactoryImpl();
97 return factory_->CreateProtocolManager(
98 delegate, request_context_getter, config);
101 SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
102 SafeBrowsingProtocolManagerDelegate* delegate,
103 net::URLRequestContextGetter* request_context_getter,
104 const SafeBrowsingProtocolConfig& config)
105 : delegate_(delegate),
106 request_type_(NO_REQUEST),
107 update_error_count_(0),
108 gethash_error_count_(0),
109 update_back_off_mult_(1),
110 gethash_back_off_mult_(1),
111 next_update_interval_(base::TimeDelta::FromSeconds(
112 base::RandInt(kSbTimerStartIntervalSecMin,
113 kSbTimerStartIntervalSecMax))),
114 update_state_(FIRST_REQUEST),
115 chunk_pending_to_write_(false),
116 version_(config.version),
117 update_size_(0),
118 client_name_(config.client_name),
119 request_context_getter_(request_context_getter),
120 url_prefix_(config.url_prefix),
121 backup_update_reason_(BACKUP_UPDATE_REASON_MAX),
122 disable_auto_update_(config.disable_auto_update),
123 url_fetcher_id_(0) {
124 DCHECK(!url_prefix_.empty());
126 backup_url_prefixes_[BACKUP_UPDATE_REASON_CONNECT] =
127 config.backup_connect_error_url_prefix;
128 backup_url_prefixes_[BACKUP_UPDATE_REASON_HTTP] =
129 config.backup_http_error_url_prefix;
130 backup_url_prefixes_[BACKUP_UPDATE_REASON_NETWORK] =
131 config.backup_network_error_url_prefix;
133 // Set the backoff multiplier fuzz to a random value between 0 and 1.
134 back_off_fuzz_ = static_cast<float>(base::RandDouble());
135 if (version_.empty())
136 version_ = SafeBrowsingProtocolManagerHelper::Version();
139 // static
140 void SafeBrowsingProtocolManager::RecordGetHashResult(
141 bool is_download, ResultType result_type) {
142 if (is_download) {
143 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type,
144 GET_HASH_RESULT_MAX);
145 } else {
146 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type,
147 GET_HASH_RESULT_MAX);
151 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const {
152 return update_timer_.IsRunning();
155 SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
156 // Delete in-progress SafeBrowsing requests.
157 STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
158 hash_requests_.end());
159 hash_requests_.clear();
162 // We can only have one update or chunk request outstanding, but there may be
163 // multiple GetHash requests pending since we don't want to serialize them and
164 // slow down the user.
165 void SafeBrowsingProtocolManager::GetFullHash(
166 const std::vector<SBPrefix>& prefixes,
167 FullHashCallback callback,
168 bool is_download) {
169 DCHECK(CalledOnValidThread());
170 // If we are in GetHash backoff, we need to check if we're past the next
171 // allowed time. If we are, we can proceed with the request. If not, we are
172 // required to return empty results (i.e. treat the page as safe).
173 if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
174 std::vector<SBFullHashResult> full_hashes;
175 callback.Run(full_hashes, false);
176 return;
178 GURL gethash_url = GetHashUrl();
179 net::URLFetcher* fetcher = net::URLFetcher::Create(
180 url_fetcher_id_++, gethash_url, net::URLFetcher::POST, this);
181 hash_requests_[fetcher] = FullHashDetails(callback, is_download);
183 std::string get_hash;
184 SafeBrowsingProtocolParser parser;
185 parser.FormatGetHash(prefixes, &get_hash);
187 fetcher->SetLoadFlags(net::LOAD_DISABLE_CACHE);
188 fetcher->SetRequestContext(request_context_getter_.get());
189 fetcher->SetUploadData("text/plain", get_hash);
190 fetcher->Start();
193 void SafeBrowsingProtocolManager::GetNextUpdate() {
194 DCHECK(CalledOnValidThread());
195 if (!request_.get() && request_type_ == NO_REQUEST)
196 IssueUpdateRequest();
199 // net::URLFetcherDelegate implementation ----------------------------------
201 // All SafeBrowsing request responses are handled here.
202 // TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
203 // chunk should retry the download and parse of that chunk (and
204 // what back off / how many times to try), and if that effects the
205 // update back off. For now, a failed parse of the chunk means we
206 // drop it. This isn't so bad because the next UPDATE_REQUEST we
207 // do will report all the chunks we have. If that chunk is still
208 // required, the SafeBrowsing servers will tell us to get it again.
209 void SafeBrowsingProtocolManager::OnURLFetchComplete(
210 const net::URLFetcher* source) {
211 DCHECK(CalledOnValidThread());
212 scoped_ptr<const net::URLFetcher> fetcher;
213 bool parsed_ok = true;
215 HashRequests::iterator it = hash_requests_.find(source);
216 if (it != hash_requests_.end()) {
217 // GetHash response.
218 fetcher.reset(it->first);
219 const FullHashDetails& details = it->second;
220 std::vector<SBFullHashResult> full_hashes;
221 bool can_cache = false;
222 if (source->GetStatus().is_success() &&
223 (source->GetResponseCode() == 200 ||
224 source->GetResponseCode() == 204)) {
225 // For tracking our GetHash false positive (204) rate, compared to real
226 // (200) responses.
227 if (source->GetResponseCode() == 200)
228 RecordGetHashResult(details.is_download, GET_HASH_STATUS_200);
229 else
230 RecordGetHashResult(details.is_download, GET_HASH_STATUS_204);
231 can_cache = true;
232 gethash_error_count_ = 0;
233 gethash_back_off_mult_ = 1;
234 SafeBrowsingProtocolParser parser;
235 std::string data;
236 source->GetResponseAsString(&data);
237 parsed_ok = parser.ParseGetHash(
238 data.data(),
239 static_cast<int>(data.length()),
240 &full_hashes);
241 if (!parsed_ok) {
242 full_hashes.clear();
243 // TODO(cbentzel): Should can_cache be set to false here?
245 } else {
246 HandleGetHashError(Time::Now());
247 if (source->GetStatus().status() == net::URLRequestStatus::FAILED) {
248 VLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
249 << " failed with error: " << source->GetStatus().error();
250 } else {
251 VLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
252 << " failed with error: " << source->GetResponseCode();
256 // Invoke the callback with full_hashes, even if there was a parse error or
257 // an error response code (in which case full_hashes will be empty). The
258 // caller can't be blocked indefinitely.
259 details.callback.Run(full_hashes, can_cache);
261 hash_requests_.erase(it);
262 } else {
263 // Update or chunk response.
264 fetcher.reset(request_.release());
266 if (request_type_ == UPDATE_REQUEST ||
267 request_type_ == BACKUP_UPDATE_REQUEST) {
268 if (!fetcher.get()) {
269 // We've timed out waiting for an update response, so we've cancelled
270 // the update request and scheduled a new one. Ignore this response.
271 return;
274 // Cancel the update response timeout now that we have the response.
275 timeout_timer_.Stop();
278 net::URLRequestStatus status = source->GetStatus();
279 if (status.is_success() && source->GetResponseCode() == 200) {
280 // We have data from the SafeBrowsing service.
281 std::string data;
282 source->GetResponseAsString(&data);
283 parsed_ok = HandleServiceResponse(
284 source->GetURL(), data.data(), static_cast<int>(data.length()));
285 if (!parsed_ok) {
286 VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
287 << " failed parse.";
288 chunk_request_urls_.clear();
289 if (request_type_ == UPDATE_REQUEST &&
290 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_HTTP)) {
291 return;
293 UpdateFinished(false);
296 switch (request_type_) {
297 case CHUNK_REQUEST:
298 if (parsed_ok) {
299 chunk_request_urls_.pop_front();
300 if (chunk_request_urls_.empty() && !chunk_pending_to_write_)
301 UpdateFinished(true);
303 break;
304 case UPDATE_REQUEST:
305 case BACKUP_UPDATE_REQUEST:
306 if (chunk_request_urls_.empty() && parsed_ok) {
307 // We are up to date since the servers gave us nothing new, so we
308 // are done with this update cycle.
309 UpdateFinished(true);
311 break;
312 case NO_REQUEST:
313 // This can happen if HandleServiceResponse fails above.
314 break;
315 default:
316 NOTREACHED();
317 break;
319 } else {
320 if (status.status() == net::URLRequestStatus::FAILED) {
321 VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
322 << " failed with error: " << source->GetStatus().error();
323 } else {
324 VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
325 << " failed with error: " << source->GetResponseCode();
327 if (request_type_ == CHUNK_REQUEST) {
328 // The SafeBrowsing service error, or very bad response code: back off.
329 chunk_request_urls_.clear();
330 } else if (request_type_ == UPDATE_REQUEST) {
331 BackupUpdateReason backup_update_reason = BACKUP_UPDATE_REASON_MAX;
332 if (status.is_success()) {
333 backup_update_reason = BACKUP_UPDATE_REASON_HTTP;
334 } else {
335 switch (status.error()) {
336 case net::ERR_INTERNET_DISCONNECTED:
337 case net::ERR_NETWORK_CHANGED:
338 backup_update_reason = BACKUP_UPDATE_REASON_NETWORK;
339 break;
340 default:
341 backup_update_reason = BACKUP_UPDATE_REASON_CONNECT;
342 break;
345 if (backup_update_reason != BACKUP_UPDATE_REASON_MAX &&
346 IssueBackupUpdateRequest(backup_update_reason)) {
347 return;
350 UpdateFinished(false);
354 // Get the next chunk if available.
355 IssueChunkRequest();
358 bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url,
359 const char* data,
360 int length) {
361 DCHECK(CalledOnValidThread());
362 SafeBrowsingProtocolParser parser;
364 switch (request_type_) {
365 case UPDATE_REQUEST:
366 case BACKUP_UPDATE_REQUEST: {
367 int next_update_sec = -1;
368 bool reset = false;
369 scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
370 new std::vector<SBChunkDelete>);
371 std::vector<ChunkUrl> chunk_urls;
372 if (!parser.ParseUpdate(data, length, &next_update_sec,
373 &reset, chunk_deletes.get(), &chunk_urls)) {
374 return false;
377 base::TimeDelta next_update_interval =
378 base::TimeDelta::FromSeconds(next_update_sec);
379 last_update_ = Time::Now();
381 if (update_state_ == FIRST_REQUEST)
382 update_state_ = SECOND_REQUEST;
383 else if (update_state_ == SECOND_REQUEST)
384 update_state_ = NORMAL_REQUEST;
386 // New time for the next update.
387 if (next_update_interval > base::TimeDelta()) {
388 next_update_interval_ = next_update_interval;
389 } else if (update_state_ == SECOND_REQUEST) {
390 next_update_interval_ = base::TimeDelta::FromSeconds(
391 base::RandInt(15, 45));
394 // New chunks to download.
395 if (!chunk_urls.empty()) {
396 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
397 for (size_t i = 0; i < chunk_urls.size(); ++i)
398 chunk_request_urls_.push_back(chunk_urls[i]);
401 // Handle the case were the SafeBrowsing service tells us to dump our
402 // database.
403 if (reset) {
404 delegate_->ResetDatabase();
405 return true;
408 // Chunks to delete from our storage. Pass ownership of
409 // |chunk_deletes|.
410 if (!chunk_deletes->empty())
411 delegate_->DeleteChunks(chunk_deletes.release());
413 break;
415 case CHUNK_REQUEST: {
416 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
417 base::Time::Now() - chunk_request_start_);
419 const ChunkUrl chunk_url = chunk_request_urls_.front();
420 scoped_ptr<SBChunkList> chunks(new SBChunkList);
421 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
422 update_size_ += length;
423 if (!parser.ParseChunk(chunk_url.list_name, data, length,
424 chunks.get())) {
425 #ifndef NDEBUG
426 std::string data_str;
427 data_str.assign(data, length);
428 std::string encoded_chunk;
429 base::Base64Encode(data_str, &encoded_chunk);
430 VLOG(1) << "ParseChunk error for chunk: " << chunk_url.url
431 << ", Base64Encode(data): " << encoded_chunk
432 << ", length: " << length;
433 #endif
434 return false;
437 // Chunks to add to storage. Pass ownership of |chunks|.
438 if (!chunks->empty()) {
439 chunk_pending_to_write_ = true;
440 delegate_->AddChunks(
441 chunk_url.list_name, chunks.release(),
442 base::Bind(&SafeBrowsingProtocolManager::OnAddChunksComplete,
443 base::Unretained(this)));
446 break;
449 default:
450 return false;
453 return true;
456 void SafeBrowsingProtocolManager::Initialize() {
457 DCHECK(CalledOnValidThread());
458 // Don't want to hit the safe browsing servers on build/chrome bots.
459 scoped_ptr<base::Environment> env(base::Environment::Create());
460 if (env->HasVar(env_vars::kHeadless))
461 return;
462 ScheduleNextUpdate(false /* no back off */);
465 void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
466 DCHECK(CalledOnValidThread());
467 if (disable_auto_update_) {
468 // Unschedule any current timer.
469 update_timer_.Stop();
470 return;
472 // Reschedule with the new update.
473 base::TimeDelta next_update_interval = GetNextUpdateInterval(back_off);
474 ForceScheduleNextUpdate(next_update_interval);
477 void SafeBrowsingProtocolManager::ForceScheduleNextUpdate(
478 base::TimeDelta interval) {
479 DCHECK(CalledOnValidThread());
480 DCHECK(interval >= base::TimeDelta());
481 // Unschedule any current timer.
482 update_timer_.Stop();
483 update_timer_.Start(FROM_HERE, interval, this,
484 &SafeBrowsingProtocolManager::GetNextUpdate);
487 // According to section 5 of the SafeBrowsing protocol specification, we must
488 // back off after a certain number of errors. We only change |next_update_sec_|
489 // when we receive a response from the SafeBrowsing service.
490 base::TimeDelta SafeBrowsingProtocolManager::GetNextUpdateInterval(
491 bool back_off) {
492 DCHECK(CalledOnValidThread());
493 DCHECK(next_update_interval_ > base::TimeDelta());
494 base::TimeDelta next = next_update_interval_;
495 if (back_off) {
496 next = GetNextBackOffInterval(&update_error_count_, &update_back_off_mult_);
497 } else {
498 // Successful response means error reset.
499 update_error_count_ = 0;
500 update_back_off_mult_ = 1;
502 return next;
505 base::TimeDelta SafeBrowsingProtocolManager::GetNextBackOffInterval(
506 int* error_count, int* multiplier) const {
507 DCHECK(CalledOnValidThread());
508 DCHECK(multiplier && error_count);
509 (*error_count)++;
510 if (*error_count > 1 && *error_count < 6) {
511 base::TimeDelta next = base::TimeDelta::FromMinutes(
512 *multiplier * (1 + back_off_fuzz_) * 30);
513 *multiplier *= 2;
514 if (*multiplier > kSbMaxBackOff)
515 *multiplier = kSbMaxBackOff;
516 return next;
518 if (*error_count >= 6)
519 return base::TimeDelta::FromHours(8);
520 return base::TimeDelta::FromMinutes(1);
523 // This request requires getting a list of all the chunks for each list from the
524 // database asynchronously. The request will be issued when we're called back in
525 // OnGetChunksComplete.
526 // TODO(paulg): We should get this at start up and maintain a ChunkRange cache
527 // to avoid hitting the database with each update request. On the
528 // otherhand, this request will only occur ~20-30 minutes so there
529 // isn't that much overhead. Measure!
530 void SafeBrowsingProtocolManager::IssueUpdateRequest() {
531 DCHECK(CalledOnValidThread());
532 request_type_ = UPDATE_REQUEST;
533 delegate_->UpdateStarted();
534 delegate_->GetChunks(
535 base::Bind(&SafeBrowsingProtocolManager::OnGetChunksComplete,
536 base::Unretained(this)));
539 // The backup request can run immediately since the chunks have already been
540 // retrieved from the DB.
541 bool SafeBrowsingProtocolManager::IssueBackupUpdateRequest(
542 BackupUpdateReason backup_update_reason) {
543 DCHECK(CalledOnValidThread());
544 DCHECK_EQ(request_type_, UPDATE_REQUEST);
545 DCHECK(backup_update_reason >= 0 &&
546 backup_update_reason < BACKUP_UPDATE_REASON_MAX);
547 if (backup_url_prefixes_[backup_update_reason].empty())
548 return false;
549 request_type_ = BACKUP_UPDATE_REQUEST;
550 backup_update_reason_ = backup_update_reason;
552 GURL backup_update_url = BackupUpdateUrl(backup_update_reason);
553 request_.reset(net::URLFetcher::Create(
554 url_fetcher_id_++, backup_update_url, net::URLFetcher::POST, this));
555 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
556 request_->SetRequestContext(request_context_getter_.get());
557 request_->SetUploadData("text/plain", update_list_data_);
558 request_->Start();
560 // Begin the update request timeout.
561 timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
562 this,
563 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
565 return true;
568 void SafeBrowsingProtocolManager::IssueChunkRequest() {
569 DCHECK(CalledOnValidThread());
570 // We are only allowed to have one request outstanding at any time. Also,
571 // don't get the next url until the previous one has been written to disk so
572 // that we don't use too much memory.
573 if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
574 return;
576 ChunkUrl next_chunk = chunk_request_urls_.front();
577 DCHECK(!next_chunk.url.empty());
578 GURL chunk_url = NextChunkUrl(next_chunk.url);
579 request_type_ = CHUNK_REQUEST;
580 request_.reset(net::URLFetcher::Create(
581 url_fetcher_id_++, chunk_url, net::URLFetcher::GET, this));
582 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
583 request_->SetRequestContext(request_context_getter_.get());
584 chunk_request_start_ = base::Time::Now();
585 request_->Start();
588 void SafeBrowsingProtocolManager::OnGetChunksComplete(
589 const std::vector<SBListChunkRanges>& lists, bool database_error) {
590 DCHECK(CalledOnValidThread());
591 DCHECK_EQ(request_type_, UPDATE_REQUEST);
592 DCHECK(update_list_data_.empty());
593 if (database_error) {
594 // The update was not successful, but don't back off.
595 UpdateFinished(false, false);
596 return;
599 // Format our stored chunks:
600 bool found_malware = false;
601 bool found_phishing = false;
602 for (size_t i = 0; i < lists.size(); ++i) {
603 update_list_data_.append(FormatList(lists[i]));
604 if (lists[i].name == safe_browsing_util::kPhishingList)
605 found_phishing = true;
607 if (lists[i].name == safe_browsing_util::kMalwareList)
608 found_malware = true;
611 // If we have an empty database, let the server know we want data for these
612 // lists.
613 if (!found_phishing)
614 update_list_data_.append(FormatList(
615 SBListChunkRanges(safe_browsing_util::kPhishingList)));
617 if (!found_malware)
618 update_list_data_.append(FormatList(
619 SBListChunkRanges(safe_browsing_util::kMalwareList)));
621 // Large requests are (probably) a sign of database corruption.
622 // Record stats to inform decisions about whether to automate
623 // deletion of such databases. http://crbug.com/120219
624 UMA_HISTOGRAM_COUNTS("SB2.UpdateRequestSize", update_list_data_.size());
626 GURL update_url = UpdateUrl();
627 request_.reset(net::URLFetcher::Create(
628 url_fetcher_id_++, update_url, net::URLFetcher::POST, this));
629 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
630 request_->SetRequestContext(request_context_getter_.get());
631 request_->SetUploadData("text/plain", update_list_data_);
632 request_->Start();
634 // Begin the update request timeout.
635 timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
636 this,
637 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
640 // If we haven't heard back from the server with an update response, this method
641 // will run. Close the current update session and schedule another update.
642 void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
643 DCHECK(CalledOnValidThread());
644 DCHECK(request_type_ == UPDATE_REQUEST ||
645 request_type_ == BACKUP_UPDATE_REQUEST);
646 request_.reset();
647 if (request_type_ == UPDATE_REQUEST &&
648 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_CONNECT)) {
649 return;
651 UpdateFinished(false);
654 void SafeBrowsingProtocolManager::OnAddChunksComplete() {
655 DCHECK(CalledOnValidThread());
656 chunk_pending_to_write_ = false;
658 if (chunk_request_urls_.empty()) {
659 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
660 UpdateFinished(true);
661 } else {
662 IssueChunkRequest();
666 // static
667 std::string SafeBrowsingProtocolManager::FormatList(
668 const SBListChunkRanges& list) {
669 std::string formatted_results;
670 formatted_results.append(list.name);
671 formatted_results.append(";");
672 if (!list.adds.empty()) {
673 formatted_results.append("a:" + list.adds);
674 if (!list.subs.empty())
675 formatted_results.append(":");
677 if (!list.subs.empty()) {
678 formatted_results.append("s:" + list.subs);
680 formatted_results.append("\n");
682 return formatted_results;
685 void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
686 DCHECK(CalledOnValidThread());
687 base::TimeDelta next = GetNextBackOffInterval(
688 &gethash_error_count_, &gethash_back_off_mult_);
689 next_gethash_time_ = now + next;
692 void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
693 UpdateFinished(success, !success);
696 void SafeBrowsingProtocolManager::UpdateFinished(bool success, bool back_off) {
697 DCHECK(CalledOnValidThread());
698 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
699 update_size_ = 0;
700 bool update_success = success || request_type_ == CHUNK_REQUEST;
701 if (backup_update_reason_ == BACKUP_UPDATE_REASON_MAX) {
702 RecordUpdateResult(
703 update_success ? UPDATE_RESULT_SUCCESS : UPDATE_RESULT_FAIL);
704 } else {
705 UpdateResult update_result = static_cast<UpdateResult>(
706 UPDATE_RESULT_BACKUP_START +
707 (static_cast<int>(backup_update_reason_) * 2) +
708 update_success);
709 RecordUpdateResult(update_result);
711 backup_update_reason_ = BACKUP_UPDATE_REASON_MAX;
712 request_type_ = NO_REQUEST;
713 update_list_data_.clear();
714 delegate_->UpdateFinished(success);
715 ScheduleNextUpdate(back_off);
718 GURL SafeBrowsingProtocolManager::UpdateUrl() const {
719 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
720 url_prefix_, "downloads", client_name_, version_, additional_query_);
721 return GURL(url);
724 GURL SafeBrowsingProtocolManager::BackupUpdateUrl(
725 BackupUpdateReason backup_update_reason) const {
726 DCHECK(backup_update_reason >= 0 &&
727 backup_update_reason < BACKUP_UPDATE_REASON_MAX);
728 DCHECK(!backup_url_prefixes_[backup_update_reason].empty());
729 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
730 backup_url_prefixes_[backup_update_reason], "downloads", client_name_,
731 version_, additional_query_);
732 return GURL(url);
735 GURL SafeBrowsingProtocolManager::GetHashUrl() const {
736 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
737 url_prefix_, "gethash", client_name_, version_, additional_query_);
738 return GURL(url);
741 GURL SafeBrowsingProtocolManager::NextChunkUrl(const std::string& url) const {
742 DCHECK(CalledOnValidThread());
743 std::string next_url;
744 if (!StartsWithASCII(url, "http://", false) &&
745 !StartsWithASCII(url, "https://", false)) {
746 // Use https if we updated via https, otherwise http (useful for testing).
747 if (StartsWithASCII(url_prefix_, "https://", false))
748 next_url.append("https://");
749 else
750 next_url.append("http://");
751 next_url.append(url);
752 } else {
753 next_url = url;
755 if (!additional_query_.empty()) {
756 if (next_url.find("?") != std::string::npos) {
757 next_url.append("&");
758 } else {
759 next_url.append("?");
761 next_url.append(additional_query_);
763 return GURL(next_url);
766 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails()
767 : callback(),
768 is_download(false) {
771 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails(
772 FullHashCallback callback, bool is_download)
773 : callback(callback),
774 is_download(is_download) {
777 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {
780 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {