ozone: evdev: Sync caps lock LED state to evdev
[chromium-blink-merge.git] / net / quic / congestion_control / tcp_cubic_sender.cc
blob6cfe0fb7b6d7970316997c7fde51ca2c742c4333
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/quic/congestion_control/tcp_cubic_sender.h"
7 #include <algorithm>
9 #include "base/metrics/histogram.h"
10 #include "net/quic/congestion_control/prr_sender.h"
11 #include "net/quic/congestion_control/rtt_stats.h"
12 #include "net/quic/crypto/crypto_protocol.h"
14 using std::max;
15 using std::min;
17 namespace net {
19 namespace {
20 // Constants based on TCP defaults.
21 // The minimum cwnd based on RFC 3782 (TCP NewReno) for cwnd reductions on a
22 // fast retransmission. The cwnd after a timeout is still 1.
23 const QuicPacketCount kMinimumCongestionWindow = 2;
24 const QuicByteCount kMaxSegmentSize = kDefaultTCPMSS;
25 const int kMaxBurstLength = 3;
26 const float kRenoBeta = 0.7f; // Reno backoff factor.
27 const uint32 kDefaultNumConnections = 2; // N-connection emulation.
28 } // namespace
30 TcpCubicSender::TcpCubicSender(const QuicClock* clock,
31 const RttStats* rtt_stats,
32 bool reno,
33 QuicPacketCount initial_tcp_congestion_window,
34 QuicConnectionStats* stats)
35 : hybrid_slow_start_(clock),
36 cubic_(clock),
37 rtt_stats_(rtt_stats),
38 stats_(stats),
39 reno_(reno),
40 num_connections_(kDefaultNumConnections),
41 num_acked_packets_(0),
42 largest_sent_sequence_number_(0),
43 largest_acked_sequence_number_(0),
44 largest_sent_at_last_cutback_(0),
45 congestion_window_(initial_tcp_congestion_window),
46 slowstart_threshold_(std::numeric_limits<uint64>::max()),
47 last_cutback_exited_slowstart_(false),
48 clock_(clock) {
51 TcpCubicSender::~TcpCubicSender() {
52 UMA_HISTOGRAM_COUNTS("Net.QuicSession.FinalTcpCwnd", congestion_window_);
55 void TcpCubicSender::SetFromConfig(const QuicConfig& config,
56 bool is_server,
57 bool using_pacing) {
58 if (is_server) {
59 if (config.HasReceivedConnectionOptions() &&
60 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW10)) {
61 // Initial window experiment.
62 congestion_window_ = 10;
64 if (using_pacing) {
65 // Disable the ack train mode in hystart when pacing is enabled, since it
66 // may be falsely triggered.
67 hybrid_slow_start_.set_ack_train_detection(false);
72 bool TcpCubicSender::ResumeConnectionState(
73 const CachedNetworkParameters& cached_network_params) {
74 // If the previous bandwidth estimate is less than an hour old, store in
75 // preparation for doing bandwidth resumption.
76 int64 seconds_since_estimate =
77 clock_->WallNow().ToUNIXSeconds() - cached_network_params.timestamp();
78 if (seconds_since_estimate > kNumSecondsPerHour) {
79 return false;
82 QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond(
83 cached_network_params.bandwidth_estimate_bytes_per_second());
84 QuicTime::Delta rtt_ms =
85 QuicTime::Delta::FromMilliseconds(cached_network_params.min_rtt_ms());
87 // Make sure CWND is in appropriate range (in case of bad data).
88 QuicPacketCount new_congestion_window =
89 bandwidth.ToBytesPerPeriod(rtt_ms) / kMaxPacketSize;
90 congestion_window_ = max(
91 min(new_congestion_window, kMaxCongestionWindowForBandwidthResumption),
92 kMinCongestionWindowForBandwidthResumption);
94 // TODO(rjshade): Set appropriate CWND when previous connection was in slow
95 // start at time of estimate.
96 return true;
99 void TcpCubicSender::SetNumEmulatedConnections(int num_connections) {
100 num_connections_ = max(1, num_connections);
101 cubic_.SetNumConnections(num_connections_);
104 float TcpCubicSender::RenoBeta() const {
105 // kNConnectionBeta is the backoff factor after loss for our N-connection
106 // emulation, which emulates the effective backoff of an ensemble of N
107 // TCP-Reno connections on a single loss event. The effective multiplier is
108 // computed as:
109 return (num_connections_ - 1 + kRenoBeta) / num_connections_;
112 void TcpCubicSender::OnCongestionEvent(
113 bool rtt_updated,
114 QuicByteCount bytes_in_flight,
115 const CongestionVector& acked_packets,
116 const CongestionVector& lost_packets) {
117 if (rtt_updated && InSlowStart() &&
118 hybrid_slow_start_.ShouldExitSlowStart(rtt_stats_->latest_rtt(),
119 rtt_stats_->min_rtt(),
120 congestion_window_)) {
121 slowstart_threshold_ = congestion_window_;
123 for (CongestionVector::const_iterator it = lost_packets.begin();
124 it != lost_packets.end(); ++it) {
125 OnPacketLost(it->first, bytes_in_flight);
127 for (CongestionVector::const_iterator it = acked_packets.begin();
128 it != acked_packets.end(); ++it) {
129 OnPacketAcked(it->first, it->second.bytes_sent, bytes_in_flight);
133 void TcpCubicSender::OnPacketAcked(
134 QuicPacketSequenceNumber acked_sequence_number,
135 QuicByteCount acked_bytes,
136 QuicByteCount bytes_in_flight) {
137 largest_acked_sequence_number_ = max(acked_sequence_number,
138 largest_acked_sequence_number_);
139 if (InRecovery()) {
140 // PRR is used when in recovery.
141 prr_.OnPacketAcked(acked_bytes);
142 return;
144 MaybeIncreaseCwnd(acked_sequence_number, bytes_in_flight);
145 // TODO(ianswett): Should this even be called when not in slow start?
146 hybrid_slow_start_.OnPacketAcked(acked_sequence_number, InSlowStart());
149 void TcpCubicSender::OnPacketLost(QuicPacketSequenceNumber sequence_number,
150 QuicByteCount bytes_in_flight) {
151 // TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets
152 // already sent should be treated as a single loss event, since it's expected.
153 if (sequence_number <= largest_sent_at_last_cutback_) {
154 if (last_cutback_exited_slowstart_) {
155 ++stats_->slowstart_packets_lost;
157 DVLOG(1) << "Ignoring loss for largest_missing:" << sequence_number
158 << " because it was sent prior to the last CWND cutback.";
159 return;
161 ++stats_->tcp_loss_events;
162 last_cutback_exited_slowstart_ = InSlowStart();
163 if (InSlowStart()) {
164 ++stats_->slowstart_packets_lost;
167 prr_.OnPacketLost(bytes_in_flight);
169 if (reno_) {
170 congestion_window_ = congestion_window_ * RenoBeta();
171 } else {
172 congestion_window_ =
173 cubic_.CongestionWindowAfterPacketLoss(congestion_window_);
175 slowstart_threshold_ = congestion_window_;
176 // Enforce TCP's minimum congestion window of 2*MSS.
177 if (congestion_window_ < kMinimumCongestionWindow) {
178 congestion_window_ = kMinimumCongestionWindow;
180 largest_sent_at_last_cutback_ = largest_sent_sequence_number_;
181 // reset packet count from congestion avoidance mode. We start
182 // counting again when we're out of recovery.
183 num_acked_packets_ = 0;
184 DVLOG(1) << "Incoming loss; congestion window: " << congestion_window_
185 << " slowstart threshold: " << slowstart_threshold_;
188 bool TcpCubicSender::OnPacketSent(QuicTime /*sent_time*/,
189 QuicByteCount /*bytes_in_flight*/,
190 QuicPacketSequenceNumber sequence_number,
191 QuicByteCount bytes,
192 HasRetransmittableData is_retransmittable) {
193 // Only update bytes_in_flight_ for data packets.
194 if (is_retransmittable != HAS_RETRANSMITTABLE_DATA) {
195 return false;
197 if (InRecovery()) {
198 // PRR is used when in recovery.
199 prr_.OnPacketSent(bytes);
201 DCHECK_LT(largest_sent_sequence_number_, sequence_number);
202 largest_sent_sequence_number_ = sequence_number;
203 hybrid_slow_start_.OnPacketSent(sequence_number);
204 return true;
207 QuicTime::Delta TcpCubicSender::TimeUntilSend(
208 QuicTime /* now */,
209 QuicByteCount bytes_in_flight,
210 HasRetransmittableData has_retransmittable_data) const {
211 if (has_retransmittable_data == NO_RETRANSMITTABLE_DATA) {
212 // For TCP we can always send an ACK immediately.
213 return QuicTime::Delta::Zero();
215 if (InRecovery()) {
216 // PRR is used when in recovery.
217 return prr_.TimeUntilSend(GetCongestionWindow(), bytes_in_flight,
218 slowstart_threshold_ * kMaxSegmentSize);
220 if (GetCongestionWindow() > bytes_in_flight) {
221 return QuicTime::Delta::Zero();
223 return QuicTime::Delta::Infinite();
226 QuicBandwidth TcpCubicSender::PacingRate() const {
227 // We pace at twice the rate of the underlying sender's bandwidth estimate
228 // during slow start and 1.25x during congestion avoidance to ensure pacing
229 // doesn't prevent us from filling the window.
230 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt();
231 if (srtt.IsZero()) {
232 srtt = QuicTime::Delta::FromMicroseconds(rtt_stats_->initial_rtt_us());
234 const QuicBandwidth bandwidth =
235 QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
236 return bandwidth.Scale(InSlowStart() ? 2 : 1.25);
239 QuicBandwidth TcpCubicSender::BandwidthEstimate() const {
240 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt();
241 if (srtt.IsZero()) {
242 // If we haven't measured an rtt, the bandwidth estimate is unknown.
243 return QuicBandwidth::Zero();
245 return QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
248 bool TcpCubicSender::HasReliableBandwidthEstimate() const {
249 return !InSlowStart() && !InRecovery() &&
250 !rtt_stats_->smoothed_rtt().IsZero();;
253 QuicTime::Delta TcpCubicSender::RetransmissionDelay() const {
254 if (rtt_stats_->smoothed_rtt().IsZero()) {
255 return QuicTime::Delta::Zero();
257 return rtt_stats_->smoothed_rtt().Add(
258 rtt_stats_->mean_deviation().Multiply(4));
261 QuicByteCount TcpCubicSender::GetCongestionWindow() const {
262 return congestion_window_ * kMaxSegmentSize;
265 bool TcpCubicSender::InSlowStart() const {
266 return congestion_window_ < slowstart_threshold_;
269 QuicByteCount TcpCubicSender::GetSlowStartThreshold() const {
270 return slowstart_threshold_ * kMaxSegmentSize;
273 bool TcpCubicSender::IsCwndLimited(QuicByteCount bytes_in_flight) const {
274 const QuicByteCount congestion_window_bytes = congestion_window_ *
275 kMaxSegmentSize;
276 if (bytes_in_flight >= congestion_window_bytes) {
277 return true;
279 const QuicByteCount max_burst = kMaxBurstLength * kMaxSegmentSize;
280 const QuicByteCount available_bytes =
281 congestion_window_bytes - bytes_in_flight;
282 const bool slow_start_limited = InSlowStart() &&
283 bytes_in_flight > congestion_window_bytes / 2;
284 return slow_start_limited || available_bytes <= max_burst;
287 bool TcpCubicSender::InRecovery() const {
288 return largest_acked_sequence_number_ <= largest_sent_at_last_cutback_ &&
289 largest_acked_sequence_number_ != 0;
292 // Called when we receive an ack. Normal TCP tracks how many packets one ack
293 // represents, but quic has a separate ack for each packet.
294 void TcpCubicSender::MaybeIncreaseCwnd(
295 QuicPacketSequenceNumber acked_sequence_number,
296 QuicByteCount bytes_in_flight) {
297 LOG_IF(DFATAL, InRecovery()) << "Never increase the CWND during recovery.";
298 if (!IsCwndLimited(bytes_in_flight)) {
299 // We don't update the congestion window unless we are close to using the
300 // window we have available.
301 return;
303 if (InSlowStart()) {
304 // TCP slow start, exponential growth, increase by one for each ACK.
305 ++congestion_window_;
306 DVLOG(1) << "Slow start; congestion window: " << congestion_window_
307 << " slowstart threshold: " << slowstart_threshold_;
308 return;
310 // Congestion avoidance
311 if (reno_) {
312 // Classic Reno congestion avoidance.
313 ++num_acked_packets_;
314 // Divide by num_connections to smoothly increase the CWND at a faster
315 // rate than conventional Reno.
316 if (num_acked_packets_ * num_connections_ >= congestion_window_) {
317 ++congestion_window_;
318 num_acked_packets_ = 0;
321 DVLOG(1) << "Reno; congestion window: " << congestion_window_
322 << " slowstart threshold: " << slowstart_threshold_
323 << " congestion window count: " << num_acked_packets_;
324 } else {
325 congestion_window_ = cubic_.CongestionWindowAfterAck(congestion_window_,
326 rtt_stats_->min_rtt());
327 DVLOG(1) << "Cubic; congestion window: " << congestion_window_
328 << " slowstart threshold: " << slowstart_threshold_;
332 void TcpCubicSender::OnRetransmissionTimeout(bool packets_retransmitted) {
333 largest_sent_at_last_cutback_ = 0;
334 if (!packets_retransmitted) {
335 return;
337 cubic_.Reset();
338 hybrid_slow_start_.Restart();
339 slowstart_threshold_ = congestion_window_ / 2;
340 congestion_window_ = kMinimumCongestionWindow;
343 CongestionControlType TcpCubicSender::GetCongestionControlType() const {
344 return reno_ ? kReno : kCubic;
347 } // namespace net