Merge Chromium + Blink git repositories
[chromium-blink-merge.git] / net / quic / congestion_control / tcp_cubic_sender.cc
blob352886ca455e69f450c9a00aeeeed9af3a2f09ba
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/quic/congestion_control/tcp_cubic_sender.h"
7 #include <algorithm>
9 #include "base/metrics/histogram_macros.h"
10 #include "net/quic/congestion_control/prr_sender.h"
11 #include "net/quic/congestion_control/rtt_stats.h"
12 #include "net/quic/crypto/crypto_protocol.h"
13 #include "net/quic/proto/cached_network_parameters.pb.h"
14 #include "net/quic/quic_flags.h"
16 using std::max;
17 using std::min;
19 namespace net {
21 namespace {
22 // Constants based on TCP defaults.
23 // The minimum cwnd based on RFC 3782 (TCP NewReno) for cwnd reductions on a
24 // fast retransmission. The cwnd after a timeout is still 1.
25 const QuicPacketCount kDefaultMinimumCongestionWindow = 2;
26 const QuicByteCount kMaxSegmentSize = kDefaultTCPMSS;
27 const QuicByteCount kMaxBurstBytes = 3 * kMaxSegmentSize;
28 const float kRenoBeta = 0.7f; // Reno backoff factor.
29 const uint32 kDefaultNumConnections = 2; // N-connection emulation.
30 } // namespace
32 TcpCubicSender::TcpCubicSender(const QuicClock* clock,
33 const RttStats* rtt_stats,
34 bool reno,
35 QuicPacketCount initial_tcp_congestion_window,
36 QuicPacketCount max_tcp_congestion_window,
37 QuicConnectionStats* stats)
38 : cubic_(clock),
39 rtt_stats_(rtt_stats),
40 stats_(stats),
41 reno_(reno),
42 num_connections_(kDefaultNumConnections),
43 congestion_window_count_(0),
44 largest_sent_packet_number_(0),
45 largest_acked_packet_number_(0),
46 largest_sent_at_last_cutback_(0),
47 congestion_window_(initial_tcp_congestion_window),
48 min_congestion_window_(kDefaultMinimumCongestionWindow),
49 min4_mode_(false),
50 slowstart_threshold_(max_tcp_congestion_window),
51 last_cutback_exited_slowstart_(false),
52 max_tcp_congestion_window_(max_tcp_congestion_window),
53 clock_(clock) {}
55 TcpCubicSender::~TcpCubicSender() {
56 UMA_HISTOGRAM_COUNTS("Net.QuicSession.FinalTcpCwnd", congestion_window_);
59 void TcpCubicSender::SetFromConfig(const QuicConfig& config,
60 Perspective perspective) {
61 if (perspective == Perspective::IS_SERVER) {
62 if (config.HasReceivedConnectionOptions() &&
63 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW03)) {
64 // Initial window experiment.
65 congestion_window_ = 3;
67 if (config.HasReceivedConnectionOptions() &&
68 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW10)) {
69 // Initial window experiment.
70 congestion_window_ = 10;
72 if (config.HasReceivedConnectionOptions() &&
73 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW20)) {
74 // Initial window experiment.
75 congestion_window_ = 20;
77 if (config.HasReceivedConnectionOptions() &&
78 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW50)) {
79 // Initial window experiment.
80 congestion_window_ = 50;
82 if (config.HasReceivedConnectionOptions() &&
83 ContainsQuicTag(config.ReceivedConnectionOptions(), kMIN1)) {
84 // Min CWND experiment.
85 min_congestion_window_ = 1;
87 if (config.HasReceivedConnectionOptions() &&
88 ContainsQuicTag(config.ReceivedConnectionOptions(), kMIN4)) {
89 // Min CWND of 4 experiment.
90 min4_mode_ = true;
91 min_congestion_window_ = 1;
96 void TcpCubicSender::ResumeConnectionState(
97 const CachedNetworkParameters& cached_network_params,
98 bool max_bandwidth_resumption) {
99 QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond(
100 max_bandwidth_resumption
101 ? cached_network_params.max_bandwidth_estimate_bytes_per_second()
102 : cached_network_params.bandwidth_estimate_bytes_per_second());
103 QuicTime::Delta rtt_ms =
104 QuicTime::Delta::FromMilliseconds(cached_network_params.min_rtt_ms());
106 // Make sure CWND is in appropriate range (in case of bad data).
107 QuicPacketCount new_congestion_window =
108 bandwidth.ToBytesPerPeriod(rtt_ms) / kMaxPacketSize;
109 congestion_window_ = max(min(new_congestion_window, kMaxCongestionWindow),
110 kMinCongestionWindowForBandwidthResumption);
113 void TcpCubicSender::SetNumEmulatedConnections(int num_connections) {
114 num_connections_ = max(1, num_connections);
115 cubic_.SetNumConnections(num_connections_);
118 void TcpCubicSender::SetMaxCongestionWindow(
119 QuicByteCount max_congestion_window) {
120 max_tcp_congestion_window_ = max_congestion_window / kMaxPacketSize;
123 float TcpCubicSender::RenoBeta() const {
124 // kNConnectionBeta is the backoff factor after loss for our N-connection
125 // emulation, which emulates the effective backoff of an ensemble of N
126 // TCP-Reno connections on a single loss event. The effective multiplier is
127 // computed as:
128 return (num_connections_ - 1 + kRenoBeta) / num_connections_;
131 void TcpCubicSender::OnCongestionEvent(
132 bool rtt_updated,
133 QuicByteCount bytes_in_flight,
134 const CongestionVector& acked_packets,
135 const CongestionVector& lost_packets) {
136 if (rtt_updated && InSlowStart() &&
137 hybrid_slow_start_.ShouldExitSlowStart(rtt_stats_->latest_rtt(),
138 rtt_stats_->min_rtt(),
139 congestion_window_)) {
140 slowstart_threshold_ = congestion_window_;
142 for (CongestionVector::const_iterator it = lost_packets.begin();
143 it != lost_packets.end(); ++it) {
144 OnPacketLost(it->first, bytes_in_flight);
146 for (CongestionVector::const_iterator it = acked_packets.begin();
147 it != acked_packets.end(); ++it) {
148 OnPacketAcked(it->first, it->second.bytes_sent, bytes_in_flight);
152 void TcpCubicSender::OnPacketAcked(QuicPacketNumber acked_packet_number,
153 QuicByteCount acked_bytes,
154 QuicByteCount bytes_in_flight) {
155 largest_acked_packet_number_ =
156 max(acked_packet_number, largest_acked_packet_number_);
157 if (InRecovery()) {
158 // PRR is used when in recovery.
159 prr_.OnPacketAcked(acked_bytes);
160 return;
162 MaybeIncreaseCwnd(acked_packet_number, bytes_in_flight);
163 // TODO(ianswett): Should this even be called when not in slow start?
164 hybrid_slow_start_.OnPacketAcked(acked_packet_number, InSlowStart());
167 void TcpCubicSender::OnPacketLost(QuicPacketNumber packet_number,
168 QuicByteCount bytes_in_flight) {
169 // TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets
170 // already sent should be treated as a single loss event, since it's expected.
171 if (packet_number <= largest_sent_at_last_cutback_) {
172 if (last_cutback_exited_slowstart_) {
173 ++stats_->slowstart_packets_lost;
175 DVLOG(1) << "Ignoring loss for largest_missing:" << packet_number
176 << " because it was sent prior to the last CWND cutback.";
177 return;
179 ++stats_->tcp_loss_events;
180 last_cutback_exited_slowstart_ = InSlowStart();
181 if (InSlowStart()) {
182 ++stats_->slowstart_packets_lost;
185 prr_.OnPacketLost(bytes_in_flight);
187 if (reno_) {
188 congestion_window_ = congestion_window_ * RenoBeta();
189 } else {
190 congestion_window_ =
191 cubic_.CongestionWindowAfterPacketLoss(congestion_window_);
193 slowstart_threshold_ = congestion_window_;
194 // Enforce a minimum congestion window.
195 if (congestion_window_ < min_congestion_window_) {
196 congestion_window_ = min_congestion_window_;
198 largest_sent_at_last_cutback_ = largest_sent_packet_number_;
199 // reset packet count from congestion avoidance mode. We start
200 // counting again when we're out of recovery.
201 congestion_window_count_ = 0;
202 DVLOG(1) << "Incoming loss; congestion window: " << congestion_window_
203 << " slowstart threshold: " << slowstart_threshold_;
206 bool TcpCubicSender::OnPacketSent(QuicTime /*sent_time*/,
207 QuicByteCount /*bytes_in_flight*/,
208 QuicPacketNumber packet_number,
209 QuicByteCount bytes,
210 HasRetransmittableData is_retransmittable) {
211 if (InSlowStart()) {
212 ++(stats_->slowstart_packets_sent);
215 // Only update bytes_in_flight_ for data packets.
216 if (is_retransmittable != HAS_RETRANSMITTABLE_DATA) {
217 return false;
219 if (InRecovery()) {
220 // PRR is used when in recovery.
221 prr_.OnPacketSent(bytes);
223 DCHECK_LT(largest_sent_packet_number_, packet_number);
224 largest_sent_packet_number_ = packet_number;
225 hybrid_slow_start_.OnPacketSent(packet_number);
226 return true;
229 QuicTime::Delta TcpCubicSender::TimeUntilSend(
230 QuicTime /* now */,
231 QuicByteCount bytes_in_flight,
232 HasRetransmittableData has_retransmittable_data) const {
233 if (has_retransmittable_data == NO_RETRANSMITTABLE_DATA) {
234 // For TCP we can always send an ACK immediately.
235 return QuicTime::Delta::Zero();
237 if (InRecovery()) {
238 // PRR is used when in recovery.
239 return prr_.TimeUntilSend(GetCongestionWindow(), bytes_in_flight,
240 slowstart_threshold_ * kMaxSegmentSize);
242 if (GetCongestionWindow() > bytes_in_flight) {
243 return QuicTime::Delta::Zero();
245 if (min4_mode_ && bytes_in_flight < 4 * kMaxSegmentSize) {
246 return QuicTime::Delta::Zero();
248 return QuicTime::Delta::Infinite();
251 QuicBandwidth TcpCubicSender::PacingRate() const {
252 // We pace at twice the rate of the underlying sender's bandwidth estimate
253 // during slow start and 1.25x during congestion avoidance to ensure pacing
254 // doesn't prevent us from filling the window.
255 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt();
256 if (srtt.IsZero()) {
257 srtt = QuicTime::Delta::FromMicroseconds(rtt_stats_->initial_rtt_us());
259 const QuicBandwidth bandwidth =
260 QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
261 return bandwidth.Scale(InSlowStart() ? 2 : 1.25);
264 QuicBandwidth TcpCubicSender::BandwidthEstimate() const {
265 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt();
266 if (srtt.IsZero()) {
267 // If we haven't measured an rtt, the bandwidth estimate is unknown.
268 return QuicBandwidth::Zero();
270 return QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
273 QuicTime::Delta TcpCubicSender::RetransmissionDelay() const {
274 if (rtt_stats_->smoothed_rtt().IsZero()) {
275 return QuicTime::Delta::Zero();
277 return rtt_stats_->smoothed_rtt().Add(
278 rtt_stats_->mean_deviation().Multiply(4));
281 QuicByteCount TcpCubicSender::GetCongestionWindow() const {
282 return congestion_window_ * kMaxSegmentSize;
285 bool TcpCubicSender::InSlowStart() const {
286 return congestion_window_ < slowstart_threshold_;
289 QuicByteCount TcpCubicSender::GetSlowStartThreshold() const {
290 return slowstart_threshold_ * kMaxSegmentSize;
293 bool TcpCubicSender::IsCwndLimited(QuicByteCount bytes_in_flight) const {
294 const QuicByteCount congestion_window_bytes = GetCongestionWindow();
295 if (bytes_in_flight >= congestion_window_bytes) {
296 return true;
298 const QuicByteCount available_bytes =
299 congestion_window_bytes - bytes_in_flight;
300 const bool slow_start_limited = InSlowStart() &&
301 bytes_in_flight > congestion_window_bytes / 2;
302 return slow_start_limited || available_bytes <= kMaxBurstBytes;
305 bool TcpCubicSender::InRecovery() const {
306 return largest_acked_packet_number_ <= largest_sent_at_last_cutback_ &&
307 largest_acked_packet_number_ != 0;
310 // Called when we receive an ack. Normal TCP tracks how many packets one ack
311 // represents, but quic has a separate ack for each packet.
312 void TcpCubicSender::MaybeIncreaseCwnd(QuicPacketNumber acked_packet_number,
313 QuicByteCount bytes_in_flight) {
314 LOG_IF(DFATAL, InRecovery()) << "Never increase the CWND during recovery.";
315 if (!IsCwndLimited(bytes_in_flight)) {
316 // Do not increase the congestion window unless the sender is close to using
317 // the current window.
318 if (FLAGS_reset_cubic_epoch_when_app_limited) {
319 cubic_.OnApplicationLimited();
321 return;
323 if (congestion_window_ >= max_tcp_congestion_window_) {
324 return;
326 if (InSlowStart()) {
327 // TCP slow start, exponential growth, increase by one for each ACK.
328 ++congestion_window_;
329 DVLOG(1) << "Slow start; congestion window: " << congestion_window_
330 << " slowstart threshold: " << slowstart_threshold_;
331 return;
333 // Congestion avoidance
334 if (reno_) {
335 // Classic Reno congestion avoidance.
336 ++congestion_window_count_;
337 // Divide by num_connections to smoothly increase the CWND at a faster
338 // rate than conventional Reno.
339 if (congestion_window_count_ * num_connections_ >= congestion_window_) {
340 ++congestion_window_;
341 congestion_window_count_ = 0;
344 DVLOG(1) << "Reno; congestion window: " << congestion_window_
345 << " slowstart threshold: " << slowstart_threshold_
346 << " congestion window count: " << congestion_window_count_;
347 } else {
348 congestion_window_ = min(max_tcp_congestion_window_,
349 cubic_.CongestionWindowAfterAck(
350 congestion_window_, rtt_stats_->min_rtt()));
351 DVLOG(1) << "Cubic; congestion window: " << congestion_window_
352 << " slowstart threshold: " << slowstart_threshold_;
356 void TcpCubicSender::OnRetransmissionTimeout(bool packets_retransmitted) {
357 largest_sent_at_last_cutback_ = 0;
358 if (!packets_retransmitted) {
359 return;
361 cubic_.Reset();
362 hybrid_slow_start_.Restart();
363 slowstart_threshold_ = congestion_window_ / 2;
364 congestion_window_ = min_congestion_window_;
367 CongestionControlType TcpCubicSender::GetCongestionControlType() const {
368 return reno_ ? kReno : kCubic;
371 } // namespace net