Supervised user whitelists: Cleanup
[chromium-blink-merge.git] / net / quic / congestion_control / tcp_cubic_sender.cc
blob6580ff327783344209aa40145a60561ca9e6a14d
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/quic/congestion_control/tcp_cubic_sender.h"
7 #include <algorithm>
9 #include "base/metrics/histogram.h"
10 #include "net/quic/congestion_control/prr_sender.h"
11 #include "net/quic/congestion_control/rtt_stats.h"
12 #include "net/quic/crypto/crypto_protocol.h"
13 #include "net/quic/proto/cached_network_parameters.pb.h"
15 using std::max;
16 using std::min;
18 namespace net {
20 namespace {
21 // Constants based on TCP defaults.
22 // The minimum cwnd based on RFC 3782 (TCP NewReno) for cwnd reductions on a
23 // fast retransmission. The cwnd after a timeout is still 1.
24 const QuicPacketCount kDefaultMinimumCongestionWindow = 2;
25 const QuicByteCount kMaxSegmentSize = kDefaultTCPMSS;
26 const int kMaxBurstLength = 3;
27 const float kRenoBeta = 0.7f; // Reno backoff factor.
28 const uint32 kDefaultNumConnections = 2; // N-connection emulation.
29 } // namespace
31 TcpCubicSender::TcpCubicSender(const QuicClock* clock,
32 const RttStats* rtt_stats,
33 bool reno,
34 QuicPacketCount initial_tcp_congestion_window,
35 QuicPacketCount max_tcp_congestion_window,
36 QuicConnectionStats* stats)
37 : hybrid_slow_start_(clock),
38 cubic_(clock),
39 rtt_stats_(rtt_stats),
40 stats_(stats),
41 reno_(reno),
42 num_connections_(kDefaultNumConnections),
43 congestion_window_count_(0),
44 largest_sent_sequence_number_(0),
45 largest_acked_sequence_number_(0),
46 largest_sent_at_last_cutback_(0),
47 congestion_window_(initial_tcp_congestion_window),
48 min_congestion_window_(kDefaultMinimumCongestionWindow),
49 min4_mode_(false),
50 slowstart_threshold_(max_tcp_congestion_window),
51 last_cutback_exited_slowstart_(false),
52 max_tcp_congestion_window_(max_tcp_congestion_window),
53 clock_(clock) {
56 TcpCubicSender::~TcpCubicSender() {
57 UMA_HISTOGRAM_COUNTS("Net.QuicSession.FinalTcpCwnd", congestion_window_);
60 void TcpCubicSender::SetFromConfig(const QuicConfig& config,
61 Perspective perspective) {
62 if (perspective == Perspective::IS_SERVER) {
63 if (config.HasReceivedConnectionOptions() &&
64 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW10)) {
65 // Initial window experiment.
66 congestion_window_ = 10;
68 if (config.HasReceivedConnectionOptions() &&
69 ContainsQuicTag(config.ReceivedConnectionOptions(), kMIN1)) {
70 // Min CWND experiment.
71 min_congestion_window_ = 1;
73 if (config.HasReceivedConnectionOptions() &&
74 ContainsQuicTag(config.ReceivedConnectionOptions(), kMIN4)) {
75 // Min CWND of 4 experiment.
76 min4_mode_ = true;
77 min_congestion_window_ = 1;
82 bool TcpCubicSender::ResumeConnectionState(
83 const CachedNetworkParameters& cached_network_params,
84 bool max_bandwidth_resumption) {
85 // If the previous bandwidth estimate is less than an hour old, store in
86 // preparation for doing bandwidth resumption.
87 int64 seconds_since_estimate =
88 clock_->WallNow().ToUNIXSeconds() - cached_network_params.timestamp();
89 if (seconds_since_estimate > kNumSecondsPerHour) {
90 return false;
93 QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond(
94 max_bandwidth_resumption
95 ? cached_network_params.max_bandwidth_estimate_bytes_per_second()
96 : cached_network_params.bandwidth_estimate_bytes_per_second());
97 QuicTime::Delta rtt_ms =
98 QuicTime::Delta::FromMilliseconds(cached_network_params.min_rtt_ms());
100 // Make sure CWND is in appropriate range (in case of bad data).
101 QuicPacketCount new_congestion_window =
102 bandwidth.ToBytesPerPeriod(rtt_ms) / kMaxPacketSize;
103 congestion_window_ = max(min(new_congestion_window, kMaxTcpCongestionWindow),
104 kMinCongestionWindowForBandwidthResumption);
106 // TODO(rjshade): Set appropriate CWND when previous connection was in slow
107 // start at time of estimate.
108 return true;
111 void TcpCubicSender::SetNumEmulatedConnections(int num_connections) {
112 num_connections_ = max(1, num_connections);
113 cubic_.SetNumConnections(num_connections_);
116 void TcpCubicSender::SetMaxCongestionWindow(
117 QuicByteCount max_congestion_window) {
118 max_tcp_congestion_window_ = max_congestion_window / kMaxPacketSize;
121 float TcpCubicSender::RenoBeta() const {
122 // kNConnectionBeta is the backoff factor after loss for our N-connection
123 // emulation, which emulates the effective backoff of an ensemble of N
124 // TCP-Reno connections on a single loss event. The effective multiplier is
125 // computed as:
126 return (num_connections_ - 1 + kRenoBeta) / num_connections_;
129 void TcpCubicSender::OnCongestionEvent(
130 bool rtt_updated,
131 QuicByteCount bytes_in_flight,
132 const CongestionVector& acked_packets,
133 const CongestionVector& lost_packets) {
134 if (rtt_updated && InSlowStart() &&
135 hybrid_slow_start_.ShouldExitSlowStart(rtt_stats_->latest_rtt(),
136 rtt_stats_->min_rtt(),
137 congestion_window_)) {
138 slowstart_threshold_ = congestion_window_;
140 for (CongestionVector::const_iterator it = lost_packets.begin();
141 it != lost_packets.end(); ++it) {
142 OnPacketLost(it->first, bytes_in_flight);
144 for (CongestionVector::const_iterator it = acked_packets.begin();
145 it != acked_packets.end(); ++it) {
146 OnPacketAcked(it->first, it->second.bytes_sent, bytes_in_flight);
150 void TcpCubicSender::OnPacketAcked(
151 QuicPacketSequenceNumber acked_sequence_number,
152 QuicByteCount acked_bytes,
153 QuicByteCount bytes_in_flight) {
154 largest_acked_sequence_number_ = max(acked_sequence_number,
155 largest_acked_sequence_number_);
156 if (InRecovery()) {
157 // PRR is used when in recovery.
158 prr_.OnPacketAcked(acked_bytes);
159 return;
161 MaybeIncreaseCwnd(acked_sequence_number, bytes_in_flight);
162 // TODO(ianswett): Should this even be called when not in slow start?
163 hybrid_slow_start_.OnPacketAcked(acked_sequence_number, InSlowStart());
166 void TcpCubicSender::OnPacketLost(QuicPacketSequenceNumber sequence_number,
167 QuicByteCount bytes_in_flight) {
168 // TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets
169 // already sent should be treated as a single loss event, since it's expected.
170 if (sequence_number <= largest_sent_at_last_cutback_) {
171 if (last_cutback_exited_slowstart_) {
172 ++stats_->slowstart_packets_lost;
174 DVLOG(1) << "Ignoring loss for largest_missing:" << sequence_number
175 << " because it was sent prior to the last CWND cutback.";
176 return;
178 ++stats_->tcp_loss_events;
179 last_cutback_exited_slowstart_ = InSlowStart();
180 if (InSlowStart()) {
181 ++stats_->slowstart_packets_lost;
184 prr_.OnPacketLost(bytes_in_flight);
186 if (reno_) {
187 congestion_window_ = congestion_window_ * RenoBeta();
188 } else {
189 congestion_window_ =
190 cubic_.CongestionWindowAfterPacketLoss(congestion_window_);
192 slowstart_threshold_ = congestion_window_;
193 // Enforce a minimum congestion window.
194 if (congestion_window_ < min_congestion_window_) {
195 congestion_window_ = min_congestion_window_;
197 largest_sent_at_last_cutback_ = largest_sent_sequence_number_;
198 // reset packet count from congestion avoidance mode. We start
199 // counting again when we're out of recovery.
200 congestion_window_count_ = 0;
201 DVLOG(1) << "Incoming loss; congestion window: " << congestion_window_
202 << " slowstart threshold: " << slowstart_threshold_;
205 bool TcpCubicSender::OnPacketSent(QuicTime /*sent_time*/,
206 QuicByteCount /*bytes_in_flight*/,
207 QuicPacketSequenceNumber sequence_number,
208 QuicByteCount bytes,
209 HasRetransmittableData is_retransmittable) {
210 if (InSlowStart()) {
211 ++(stats_->slowstart_packets_sent);
214 // Only update bytes_in_flight_ for data packets.
215 if (is_retransmittable != HAS_RETRANSMITTABLE_DATA) {
216 return false;
218 if (InRecovery()) {
219 // PRR is used when in recovery.
220 prr_.OnPacketSent(bytes);
222 DCHECK_LT(largest_sent_sequence_number_, sequence_number);
223 largest_sent_sequence_number_ = sequence_number;
224 hybrid_slow_start_.OnPacketSent(sequence_number);
225 return true;
228 QuicTime::Delta TcpCubicSender::TimeUntilSend(
229 QuicTime /* now */,
230 QuicByteCount bytes_in_flight,
231 HasRetransmittableData has_retransmittable_data) const {
232 if (has_retransmittable_data == NO_RETRANSMITTABLE_DATA) {
233 // For TCP we can always send an ACK immediately.
234 return QuicTime::Delta::Zero();
236 if (InRecovery()) {
237 // PRR is used when in recovery.
238 return prr_.TimeUntilSend(GetCongestionWindow(), bytes_in_flight,
239 slowstart_threshold_ * kMaxSegmentSize);
241 if (GetCongestionWindow() > bytes_in_flight) {
242 return QuicTime::Delta::Zero();
244 if (min4_mode_ && bytes_in_flight < 4 * kMaxSegmentSize) {
245 return QuicTime::Delta::Zero();
247 return QuicTime::Delta::Infinite();
250 QuicBandwidth TcpCubicSender::PacingRate() const {
251 // We pace at twice the rate of the underlying sender's bandwidth estimate
252 // during slow start and 1.25x during congestion avoidance to ensure pacing
253 // doesn't prevent us from filling the window.
254 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt();
255 if (srtt.IsZero()) {
256 srtt = QuicTime::Delta::FromMicroseconds(rtt_stats_->initial_rtt_us());
258 const QuicBandwidth bandwidth =
259 QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
260 return bandwidth.Scale(InSlowStart() ? 2 : 1.25);
263 QuicBandwidth TcpCubicSender::BandwidthEstimate() const {
264 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt();
265 if (srtt.IsZero()) {
266 // If we haven't measured an rtt, the bandwidth estimate is unknown.
267 return QuicBandwidth::Zero();
269 return QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
272 bool TcpCubicSender::HasReliableBandwidthEstimate() const {
273 return !InSlowStart() && !InRecovery() &&
274 !rtt_stats_->smoothed_rtt().IsZero();;
277 QuicTime::Delta TcpCubicSender::RetransmissionDelay() const {
278 if (rtt_stats_->smoothed_rtt().IsZero()) {
279 return QuicTime::Delta::Zero();
281 return rtt_stats_->smoothed_rtt().Add(
282 rtt_stats_->mean_deviation().Multiply(4));
285 QuicByteCount TcpCubicSender::GetCongestionWindow() const {
286 return congestion_window_ * kMaxSegmentSize;
289 bool TcpCubicSender::InSlowStart() const {
290 return congestion_window_ < slowstart_threshold_;
293 QuicByteCount TcpCubicSender::GetSlowStartThreshold() const {
294 return slowstart_threshold_ * kMaxSegmentSize;
297 bool TcpCubicSender::IsCwndLimited(QuicByteCount bytes_in_flight) const {
298 const QuicByteCount congestion_window_bytes = congestion_window_ *
299 kMaxSegmentSize;
300 if (bytes_in_flight >= congestion_window_bytes) {
301 return true;
303 const QuicByteCount max_burst = kMaxBurstLength * kMaxSegmentSize;
304 const QuicByteCount available_bytes =
305 congestion_window_bytes - bytes_in_flight;
306 const bool slow_start_limited = InSlowStart() &&
307 bytes_in_flight > congestion_window_bytes / 2;
308 return slow_start_limited || available_bytes <= max_burst;
311 bool TcpCubicSender::InRecovery() const {
312 return largest_acked_sequence_number_ <= largest_sent_at_last_cutback_ &&
313 largest_acked_sequence_number_ != 0;
316 // Called when we receive an ack. Normal TCP tracks how many packets one ack
317 // represents, but quic has a separate ack for each packet.
318 void TcpCubicSender::MaybeIncreaseCwnd(
319 QuicPacketSequenceNumber acked_sequence_number,
320 QuicByteCount bytes_in_flight) {
321 LOG_IF(DFATAL, InRecovery()) << "Never increase the CWND during recovery.";
322 if (!IsCwndLimited(bytes_in_flight)) {
323 // We don't update the congestion window unless we are close to using the
324 // window we have available.
325 return;
327 if (InSlowStart()) {
328 // congestion_window_cnt is the number of acks since last change of snd_cwnd
329 if (congestion_window_ < max_tcp_congestion_window_) {
330 // TCP slow start, exponential growth, increase by one for each ACK.
331 ++congestion_window_;
333 DVLOG(1) << "Slow start; congestion window: " << congestion_window_
334 << " slowstart threshold: " << slowstart_threshold_;
335 return;
337 if (congestion_window_ >= max_tcp_congestion_window_) {
338 return;
340 // Congestion avoidance
341 if (reno_) {
342 // Classic Reno congestion avoidance.
343 ++congestion_window_count_;
344 // Divide by num_connections to smoothly increase the CWND at a faster
345 // rate than conventional Reno.
346 if (congestion_window_count_ * num_connections_ >= congestion_window_) {
347 ++congestion_window_;
348 congestion_window_count_ = 0;
351 DVLOG(1) << "Reno; congestion window: " << congestion_window_
352 << " slowstart threshold: " << slowstart_threshold_
353 << " congestion window count: " << congestion_window_count_;
354 } else {
355 congestion_window_ = min(max_tcp_congestion_window_,
356 cubic_.CongestionWindowAfterAck(
357 congestion_window_, rtt_stats_->min_rtt()));
358 DVLOG(1) << "Cubic; congestion window: " << congestion_window_
359 << " slowstart threshold: " << slowstart_threshold_;
363 void TcpCubicSender::OnRetransmissionTimeout(bool packets_retransmitted) {
364 largest_sent_at_last_cutback_ = 0;
365 if (!packets_retransmitted) {
366 return;
368 cubic_.Reset();
369 hybrid_slow_start_.Restart();
370 slowstart_threshold_ = congestion_window_ / 2;
371 congestion_window_ = min_congestion_window_;
374 CongestionControlType TcpCubicSender::GetCongestionControlType() const {
375 return reno_ ? kReno : kCubic;
378 } // namespace net