Sort unlaunched apps on app list start page by apps grid order.
[chromium-blink-merge.git] / net / quic / congestion_control / tcp_cubic_sender.cc
blob8abedc827ed64157eb2ee40a1d9fa62cc0255b99
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/quic/congestion_control/tcp_cubic_sender.h"
7 #include <algorithm>
9 #include "base/metrics/histogram.h"
10 #include "net/quic/congestion_control/prr_sender.h"
11 #include "net/quic/congestion_control/rtt_stats.h"
12 #include "net/quic/crypto/crypto_protocol.h"
14 using std::max;
15 using std::min;
17 namespace net {
19 namespace {
20 // Constants based on TCP defaults.
21 // The minimum cwnd based on RFC 3782 (TCP NewReno) for cwnd reductions on a
22 // fast retransmission. The cwnd after a timeout is still 1.
23 const QuicPacketCount kMinimumCongestionWindow = 2;
24 const QuicByteCount kMaxSegmentSize = kDefaultTCPMSS;
25 const int kMaxBurstLength = 3;
26 const float kRenoBeta = 0.7f; // Reno backoff factor.
27 const uint32 kDefaultNumConnections = 2; // N-connection emulation.
28 } // namespace
30 TcpCubicSender::TcpCubicSender(const QuicClock* clock,
31 const RttStats* rtt_stats,
32 bool reno,
33 QuicPacketCount initial_tcp_congestion_window,
34 QuicPacketCount max_tcp_congestion_window,
35 QuicConnectionStats* stats)
36 : hybrid_slow_start_(clock),
37 cubic_(clock, stats),
38 rtt_stats_(rtt_stats),
39 stats_(stats),
40 reno_(reno),
41 num_connections_(kDefaultNumConnections),
42 congestion_window_count_(0),
43 largest_sent_sequence_number_(0),
44 largest_acked_sequence_number_(0),
45 largest_sent_at_last_cutback_(0),
46 congestion_window_(initial_tcp_congestion_window),
47 previous_congestion_window_(0),
48 slowstart_threshold_(max_tcp_congestion_window),
49 previous_slowstart_threshold_(0),
50 last_cutback_exited_slowstart_(false),
51 max_tcp_congestion_window_(max_tcp_congestion_window),
52 clock_(clock) {}
54 TcpCubicSender::~TcpCubicSender() {
55 UMA_HISTOGRAM_COUNTS("Net.QuicSession.FinalTcpCwnd", congestion_window_);
58 void TcpCubicSender::SetFromConfig(const QuicConfig& config,
59 bool is_server,
60 bool using_pacing) {
61 if (is_server) {
62 if (config.HasReceivedConnectionOptions() &&
63 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW10)) {
64 // Initial window experiment.
65 congestion_window_ = 10;
67 if (using_pacing) {
68 // Disable the ack train mode in hystart when pacing is enabled, since it
69 // may be falsely triggered.
70 hybrid_slow_start_.set_ack_train_detection(false);
75 bool TcpCubicSender::ResumeConnectionState(
76 const CachedNetworkParameters& cached_network_params) {
77 // If the previous bandwidth estimate is less than an hour old, store in
78 // preparation for doing bandwidth resumption.
79 int64 seconds_since_estimate =
80 clock_->WallNow().ToUNIXSeconds() - cached_network_params.timestamp();
81 if (seconds_since_estimate > kNumSecondsPerHour) {
82 return false;
85 QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond(
86 cached_network_params.bandwidth_estimate_bytes_per_second());
87 QuicTime::Delta rtt_ms =
88 QuicTime::Delta::FromMilliseconds(cached_network_params.min_rtt_ms());
90 // Make sure CWND is in appropriate range (in case of bad data).
91 QuicPacketCount new_congestion_window =
92 bandwidth.ToBytesPerPeriod(rtt_ms) / kMaxPacketSize;
93 congestion_window_ = max(min(new_congestion_window, kMaxTcpCongestionWindow),
94 kMinCongestionWindowForBandwidthResumption);
96 // TODO(rjshade): Set appropriate CWND when previous connection was in slow
97 // start at time of estimate.
98 return true;
101 void TcpCubicSender::SetNumEmulatedConnections(int num_connections) {
102 num_connections_ = max(1, num_connections);
103 cubic_.SetNumConnections(num_connections_);
106 float TcpCubicSender::RenoBeta() const {
107 // kNConnectionBeta is the backoff factor after loss for our N-connection
108 // emulation, which emulates the effective backoff of an ensemble of N
109 // TCP-Reno connections on a single loss event. The effective multiplier is
110 // computed as:
111 return (num_connections_ - 1 + kRenoBeta) / num_connections_;
114 void TcpCubicSender::OnCongestionEvent(
115 bool rtt_updated,
116 QuicByteCount bytes_in_flight,
117 const CongestionVector& acked_packets,
118 const CongestionVector& lost_packets) {
119 if (rtt_updated && InSlowStart() &&
120 hybrid_slow_start_.ShouldExitSlowStart(rtt_stats_->latest_rtt(),
121 rtt_stats_->min_rtt(),
122 congestion_window_)) {
123 slowstart_threshold_ = congestion_window_;
125 for (CongestionVector::const_iterator it = lost_packets.begin();
126 it != lost_packets.end(); ++it) {
127 OnPacketLost(it->first, bytes_in_flight);
129 for (CongestionVector::const_iterator it = acked_packets.begin();
130 it != acked_packets.end(); ++it) {
131 OnPacketAcked(it->first, it->second.bytes_sent, bytes_in_flight);
135 void TcpCubicSender::OnPacketAcked(
136 QuicPacketSequenceNumber acked_sequence_number,
137 QuicByteCount acked_bytes,
138 QuicByteCount bytes_in_flight) {
139 largest_acked_sequence_number_ = max(acked_sequence_number,
140 largest_acked_sequence_number_);
141 // As soon as a packet is acked, ensure we're no longer in RTO mode.
142 previous_congestion_window_ = 0;
143 if (InRecovery()) {
144 // PRR is used when in recovery.
145 prr_.OnPacketAcked(acked_bytes);
146 return;
148 MaybeIncreaseCwnd(acked_sequence_number, bytes_in_flight);
149 // TODO(ianswett): Should this even be called when not in slow start?
150 hybrid_slow_start_.OnPacketAcked(acked_sequence_number, InSlowStart());
153 void TcpCubicSender::OnPacketLost(QuicPacketSequenceNumber sequence_number,
154 QuicByteCount bytes_in_flight) {
155 // TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets
156 // already sent should be treated as a single loss event, since it's expected.
157 if (sequence_number <= largest_sent_at_last_cutback_) {
158 if (last_cutback_exited_slowstart_) {
159 ++stats_->slowstart_packets_lost;
161 DVLOG(1) << "Ignoring loss for largest_missing:" << sequence_number
162 << " because it was sent prior to the last CWND cutback.";
163 return;
165 ++stats_->tcp_loss_events;
166 last_cutback_exited_slowstart_ = InSlowStart();
167 if (InSlowStart()) {
168 ++stats_->slowstart_packets_lost;
171 prr_.OnPacketLost(bytes_in_flight);
173 if (reno_) {
174 congestion_window_ = congestion_window_ * RenoBeta();
175 } else {
176 congestion_window_ =
177 cubic_.CongestionWindowAfterPacketLoss(congestion_window_);
179 slowstart_threshold_ = congestion_window_;
180 // Enforce TCP's minimum congestion window of 2*MSS.
181 if (congestion_window_ < kMinimumCongestionWindow) {
182 congestion_window_ = kMinimumCongestionWindow;
184 largest_sent_at_last_cutback_ = largest_sent_sequence_number_;
185 // reset packet count from congestion avoidance mode. We start
186 // counting again when we're out of recovery.
187 congestion_window_count_ = 0;
188 DVLOG(1) << "Incoming loss; congestion window: " << congestion_window_
189 << " slowstart threshold: " << slowstart_threshold_;
192 bool TcpCubicSender::OnPacketSent(QuicTime /*sent_time*/,
193 QuicByteCount /*bytes_in_flight*/,
194 QuicPacketSequenceNumber sequence_number,
195 QuicByteCount bytes,
196 HasRetransmittableData is_retransmittable) {
197 // Only update bytes_in_flight_ for data packets.
198 if (is_retransmittable != HAS_RETRANSMITTABLE_DATA) {
199 return false;
201 if (InRecovery()) {
202 // PRR is used when in recovery.
203 prr_.OnPacketSent(bytes);
205 DCHECK_LT(largest_sent_sequence_number_, sequence_number);
206 largest_sent_sequence_number_ = sequence_number;
207 hybrid_slow_start_.OnPacketSent(sequence_number);
208 return true;
211 QuicTime::Delta TcpCubicSender::TimeUntilSend(
212 QuicTime /* now */,
213 QuicByteCount bytes_in_flight,
214 HasRetransmittableData has_retransmittable_data) const {
215 if (has_retransmittable_data == NO_RETRANSMITTABLE_DATA) {
216 // For TCP we can always send an ACK immediately.
217 return QuicTime::Delta::Zero();
219 if (InRecovery()) {
220 // PRR is used when in recovery.
221 return prr_.TimeUntilSend(GetCongestionWindow(), bytes_in_flight,
222 slowstart_threshold_);
224 if (GetCongestionWindow() > bytes_in_flight) {
225 return QuicTime::Delta::Zero();
227 return QuicTime::Delta::Infinite();
230 QuicBandwidth TcpCubicSender::PacingRate() const {
231 // We pace at twice the rate of the underlying sender's bandwidth estimate
232 // during slow start and 1.25x during congestion avoidance to ensure pacing
233 // doesn't prevent us from filling the window.
234 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt();
235 if (srtt.IsZero()) {
236 srtt = QuicTime::Delta::FromMicroseconds(rtt_stats_->initial_rtt_us());
238 const QuicBandwidth bandwidth =
239 QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
240 return bandwidth.Scale(InSlowStart() ? 2 : 1.25);
243 QuicBandwidth TcpCubicSender::BandwidthEstimate() const {
244 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt();
245 if (srtt.IsZero()) {
246 // If we haven't measured an rtt, the bandwidth estimate is unknown.
247 return QuicBandwidth::Zero();
249 return QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
252 bool TcpCubicSender::HasReliableBandwidthEstimate() const {
253 return !InSlowStart() && !InRecovery() &&
254 !rtt_stats_->smoothed_rtt().IsZero();;
257 QuicTime::Delta TcpCubicSender::RetransmissionDelay() const {
258 if (rtt_stats_->smoothed_rtt().IsZero()) {
259 return QuicTime::Delta::Zero();
261 return rtt_stats_->smoothed_rtt().Add(
262 rtt_stats_->mean_deviation().Multiply(4));
265 QuicByteCount TcpCubicSender::GetCongestionWindow() const {
266 return congestion_window_ * kMaxSegmentSize;
269 bool TcpCubicSender::InSlowStart() const {
270 return congestion_window_ < slowstart_threshold_;
273 QuicByteCount TcpCubicSender::GetSlowStartThreshold() const {
274 return slowstart_threshold_ * kMaxSegmentSize;
277 bool TcpCubicSender::IsCwndLimited(QuicByteCount bytes_in_flight) const {
278 const QuicByteCount congestion_window_bytes = congestion_window_ *
279 kMaxSegmentSize;
280 if (bytes_in_flight >= congestion_window_bytes) {
281 return true;
283 const QuicByteCount max_burst = kMaxBurstLength * kMaxSegmentSize;
284 const QuicByteCount available_bytes =
285 congestion_window_bytes - bytes_in_flight;
286 const bool slow_start_limited = InSlowStart() &&
287 bytes_in_flight > congestion_window_bytes / 2;
288 return slow_start_limited || available_bytes <= max_burst;
291 bool TcpCubicSender::InRecovery() const {
292 return largest_acked_sequence_number_ <= largest_sent_at_last_cutback_ &&
293 largest_acked_sequence_number_ != 0;
296 // Called when we receive an ack. Normal TCP tracks how many packets one ack
297 // represents, but quic has a separate ack for each packet.
298 void TcpCubicSender::MaybeIncreaseCwnd(
299 QuicPacketSequenceNumber acked_sequence_number,
300 QuicByteCount bytes_in_flight) {
301 LOG_IF(DFATAL, InRecovery()) << "Never increase the CWND during recovery.";
302 if (!IsCwndLimited(bytes_in_flight)) {
303 // We don't update the congestion window unless we are close to using the
304 // window we have available.
305 return;
307 if (InSlowStart()) {
308 // congestion_window_cnt is the number of acks since last change of snd_cwnd
309 if (congestion_window_ < max_tcp_congestion_window_) {
310 // TCP slow start, exponential growth, increase by one for each ACK.
311 ++congestion_window_;
313 DVLOG(1) << "Slow start; congestion window: " << congestion_window_
314 << " slowstart threshold: " << slowstart_threshold_;
315 return;
317 if (congestion_window_ >= max_tcp_congestion_window_) {
318 return;
320 // Congestion avoidance
321 if (reno_) {
322 // Classic Reno congestion avoidance.
323 ++congestion_window_count_;
324 // Divide by num_connections to smoothly increase the CWND at a faster
325 // rate than conventional Reno.
326 if (congestion_window_count_ * num_connections_ >= congestion_window_) {
327 ++congestion_window_;
328 congestion_window_count_ = 0;
331 DVLOG(1) << "Reno; congestion window: " << congestion_window_
332 << " slowstart threshold: " << slowstart_threshold_
333 << " congestion window count: " << congestion_window_count_;
334 } else {
335 congestion_window_ = min(max_tcp_congestion_window_,
336 cubic_.CongestionWindowAfterAck(
337 congestion_window_, rtt_stats_->min_rtt()));
338 DVLOG(1) << "Cubic; congestion window: " << congestion_window_
339 << " slowstart threshold: " << slowstart_threshold_;
343 void TcpCubicSender::OnRetransmissionTimeout(bool packets_retransmitted) {
344 largest_sent_at_last_cutback_ = 0;
345 if (!packets_retransmitted) {
346 return;
348 cubic_.Reset();
349 hybrid_slow_start_.Restart();
350 // Only reduce ssthresh once over multiple retransmissions.
351 if (previous_congestion_window_ != 0) {
352 return;
354 previous_slowstart_threshold_ = slowstart_threshold_;
355 slowstart_threshold_ = congestion_window_ / 2;
356 previous_congestion_window_ = congestion_window_;
357 congestion_window_ = kMinimumCongestionWindow;
360 void TcpCubicSender::RevertRetransmissionTimeout() {
361 if (previous_congestion_window_ == 0) {
362 LOG(DFATAL) << "No previous congestion window to revert to.";
363 return;
365 congestion_window_ = previous_congestion_window_;
366 slowstart_threshold_ = previous_slowstart_threshold_;
367 previous_congestion_window_ = 0;
370 CongestionControlType TcpCubicSender::GetCongestionControlType() const {
371 return reno_ ? kReno : kCubic;
374 } // namespace net