Merge Chromium + Blink git repositories
[chromium-blink-merge.git] / components / rappor / log_uploader.cc
blob97ad3e4fbc58064582e07a6a265e7b319b266581
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "components/rappor/log_uploader.h"
7 #include "base/metrics/histogram_macros.h"
8 #include "base/metrics/sparse_histogram.h"
9 #include "components/data_use_measurement/core/data_use_user_data.h"
10 #include "net/base/load_flags.h"
11 #include "net/base/net_errors.h"
12 #include "net/url_request/url_fetcher.h"
14 namespace {
16 // The delay, in seconds, between uploading when there are queued logs to send.
17 const int kUnsentLogsIntervalSeconds = 3;
19 // When uploading metrics to the server fails, we progressively wait longer and
20 // longer before sending the next log. This backoff process helps reduce load
21 // on a server that is having issues.
22 // The following is the multiplier we use to expand that inter-log duration.
23 const double kBackoffMultiplier = 1.1;
25 // The maximum backoff multiplier.
26 const int kMaxBackoffIntervalSeconds = 60 * 60;
28 // The maximum number of unsent logs we will keep.
29 // TODO(holte): Limit based on log size instead.
30 const size_t kMaxQueuedLogs = 10;
32 enum DiscardReason {
33 UPLOAD_SUCCESS,
34 UPLOAD_REJECTED,
35 QUEUE_OVERFLOW,
36 NUM_DISCARD_REASONS
39 void RecordDiscardReason(DiscardReason reason) {
40 UMA_HISTOGRAM_ENUMERATION("Rappor.DiscardReason",
41 reason,
42 NUM_DISCARD_REASONS);
45 } // namespace
47 namespace rappor {
49 LogUploader::LogUploader(const GURL& server_url,
50 const std::string& mime_type,
51 net::URLRequestContextGetter* request_context)
52 : server_url_(server_url),
53 mime_type_(mime_type),
54 request_context_(request_context),
55 is_running_(false),
56 has_callback_pending_(false),
57 upload_interval_(base::TimeDelta::FromSeconds(
58 kUnsentLogsIntervalSeconds)) {
61 LogUploader::~LogUploader() {}
63 void LogUploader::Start() {
64 is_running_ = true;
65 StartScheduledUpload();
68 void LogUploader::Stop() {
69 is_running_ = false;
70 // Rather than interrupting the current upload, just let it finish/fail and
71 // then inhibit any retry attempts.
74 void LogUploader::QueueLog(const std::string& log) {
75 queued_logs_.push(log);
76 // Don't drop logs yet if an upload is in progress. They will be dropped
77 // when it finishes.
78 if (!has_callback_pending_)
79 DropExcessLogs();
80 StartScheduledUpload();
83 void LogUploader::DropExcessLogs() {
84 while (queued_logs_.size() > kMaxQueuedLogs) {
85 DVLOG(2) << "Dropping excess log.";
86 RecordDiscardReason(QUEUE_OVERFLOW);
87 queued_logs_.pop();
91 bool LogUploader::IsUploadScheduled() const {
92 return upload_timer_.IsRunning();
95 void LogUploader::ScheduleNextUpload(base::TimeDelta interval) {
96 upload_timer_.Start(
97 FROM_HERE, interval, this, &LogUploader::StartScheduledUpload);
100 bool LogUploader::CanStartUpload() const {
101 return is_running_ &&
102 !queued_logs_.empty() &&
103 !IsUploadScheduled() &&
104 !has_callback_pending_;
107 void LogUploader::StartScheduledUpload() {
108 if (!CanStartUpload())
109 return;
110 DVLOG(2) << "Upload to " << server_url_.spec() << " starting.";
111 has_callback_pending_ = true;
112 current_fetch_ =
113 net::URLFetcher::Create(server_url_, net::URLFetcher::POST, this);
114 data_use_measurement::DataUseUserData::AttachToFetcher(
115 current_fetch_.get(), data_use_measurement::DataUseUserData::RAPPOR);
116 current_fetch_->SetRequestContext(request_context_.get());
117 current_fetch_->SetUploadData(mime_type_, queued_logs_.front());
119 // We already drop cookies server-side, but we might as well strip them out
120 // client-side as well.
121 current_fetch_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
122 net::LOAD_DO_NOT_SEND_COOKIES);
123 current_fetch_->Start();
126 // static
127 base::TimeDelta LogUploader::BackOffUploadInterval(base::TimeDelta interval) {
128 DCHECK_GT(kBackoffMultiplier, 1.0);
129 interval = base::TimeDelta::FromMicroseconds(static_cast<int64>(
130 kBackoffMultiplier * interval.InMicroseconds()));
132 base::TimeDelta max_interval =
133 base::TimeDelta::FromSeconds(kMaxBackoffIntervalSeconds);
134 return interval > max_interval ? max_interval : interval;
137 void LogUploader::OnURLFetchComplete(const net::URLFetcher* source) {
138 // We're not allowed to re-use the existing |URLFetcher|s, so free them here.
139 // Note however that |source| is aliased to the fetcher, so we should be
140 // careful not to delete it too early.
141 DCHECK_EQ(current_fetch_.get(), source);
142 scoped_ptr<net::URLFetcher> fetch(current_fetch_.Pass());
144 const net::URLRequestStatus& request_status = source->GetStatus();
146 const int response_code = source->GetResponseCode();
147 DVLOG(2) << "Upload fetch complete response code: " << response_code;
149 if (request_status.status() != net::URLRequestStatus::SUCCESS) {
150 UMA_HISTOGRAM_SPARSE_SLOWLY("Rappor.FailedUploadErrorCode",
151 -request_status.error());
152 DVLOG(1) << "Rappor server upload failed with error: "
153 << request_status.error() << ": "
154 << net::ErrorToString(request_status.error());
155 DCHECK_EQ(-1, response_code);
156 } else {
157 // Log a histogram to track response success vs. failure rates.
158 UMA_HISTOGRAM_SPARSE_SLOWLY("Rappor.UploadResponseCode", response_code);
161 const bool upload_succeeded = response_code == 200;
163 // Determine whether this log should be retransmitted.
164 DiscardReason reason = NUM_DISCARD_REASONS;
165 if (upload_succeeded) {
166 reason = UPLOAD_SUCCESS;
167 } else if (response_code == 400) {
168 reason = UPLOAD_REJECTED;
171 if (reason != NUM_DISCARD_REASONS) {
172 DVLOG(2) << "Log discarded.";
173 RecordDiscardReason(reason);
174 queued_logs_.pop();
177 DropExcessLogs();
179 // Error 400 indicates a problem with the log, not with the server, so
180 // don't consider that a sign that the server is in trouble.
181 const bool server_is_healthy = upload_succeeded || response_code == 400;
182 OnUploadFinished(server_is_healthy);
185 void LogUploader::OnUploadFinished(bool server_is_healthy) {
186 DCHECK(has_callback_pending_);
187 has_callback_pending_ = false;
188 // If the server is having issues, back off. Otherwise, reset to default.
189 if (!server_is_healthy)
190 upload_interval_ = BackOffUploadInterval(upload_interval_);
191 else
192 upload_interval_ = base::TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds);
194 if (CanStartUpload())
195 ScheduleNextUpload(upload_interval_);
198 } // namespace rappor