Merge Chromium + Blink git repositories
[chromium-blink-merge.git] / net / quic / congestion_control / cubic_bytes.cc
blobae35ae4a695a9c01889adae8fda4acef30a60fa9
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/quic/congestion_control/cubic_bytes.h"
7 #include <stdint.h>
8 #include <algorithm>
9 #include <cmath>
11 #include "base/basictypes.h"
12 #include "base/logging.h"
13 #include "net/quic/quic_protocol.h"
15 using std::max;
17 namespace net {
19 namespace {
21 // Constants based on TCP defaults.
22 // The following constants are in 2^10 fractions of a second instead of ms to
23 // allow a 10 shift right to divide.
24 const int kCubeScale = 40; // 1024*1024^3 (first 1024 is from 0.100^3)
25 // where 0.100 is 100 ms which is the scaling
26 // round trip time.
27 const int kCubeCongestionWindowScale = 410;
28 // The cube factor for packets in bytes.
29 const uint64 kCubeFactor = (UINT64_C(1) << kCubeScale) /
30 kCubeCongestionWindowScale / kDefaultTCPMSS;
32 const uint32 kDefaultNumConnections = 2;
33 const float kBeta = 0.7f; // Default Cubic backoff factor.
34 // Additional backoff factor when loss occurs in the concave part of the Cubic
35 // curve. This additional backoff factor is expected to give up bandwidth to
36 // new concurrent flows and speed up convergence.
37 const float kBetaLastMax = 0.85f;
39 } // namespace
41 CubicBytes::CubicBytes(const QuicClock* clock)
42 : clock_(clock),
43 num_connections_(kDefaultNumConnections),
44 epoch_(QuicTime::Zero()),
45 last_update_time_(QuicTime::Zero()) {
46 Reset();
49 void CubicBytes::SetNumConnections(int num_connections) {
50 num_connections_ = num_connections;
53 float CubicBytes::Alpha() const {
54 // TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that
55 // beta here is a cwnd multiplier, and is equal to 1-beta from the paper.
56 // We derive the equivalent alpha for an N-connection emulation as:
57 const float beta = Beta();
58 return 3 * num_connections_ * num_connections_ * (1 - beta) / (1 + beta);
61 float CubicBytes::Beta() const {
62 // kNConnectionBeta is the backoff factor after loss for our N-connection
63 // emulation, which emulates the effective backoff of an ensemble of N
64 // TCP-Reno connections on a single loss event. The effective multiplier is
65 // computed as:
66 return (num_connections_ - 1 + kBeta) / num_connections_;
69 void CubicBytes::Reset() {
70 epoch_ = QuicTime::Zero(); // Reset time.
71 last_update_time_ = QuicTime::Zero(); // Reset time.
72 last_congestion_window_ = 0;
73 last_max_congestion_window_ = 0;
74 acked_bytes_count_ = 0;
75 estimated_tcp_congestion_window_ = 0;
76 origin_point_congestion_window_ = 0;
77 time_to_origin_point_ = 0;
78 last_target_congestion_window_ = 0;
81 void CubicBytes::OnApplicationLimited() {
82 // When sender is not using the available congestion window, the window does
83 // not grow. But to be RTT-independent, Cubic assumes that the sender has been
84 // using the entire window during the time since the beginning of the current
85 // "epoch" (the end of the last loss recovery period). Since
86 // application-limited periods break this assumption, we reset the epoch when
87 // in such a period. This reset effectively freezes congestion window growth
88 // through application-limited periods and allows Cubic growth to continue
89 // when the entire window is being used.
90 epoch_ = QuicTime::Zero();
93 QuicByteCount CubicBytes::CongestionWindowAfterPacketLoss(
94 QuicByteCount current_congestion_window) {
95 if (current_congestion_window < last_max_congestion_window_) {
96 // We never reached the old max, so assume we are competing with another
97 // flow. Use our extra back off factor to allow the other flow to go up.
98 last_max_congestion_window_ =
99 static_cast<int>(kBetaLastMax * current_congestion_window);
100 } else {
101 last_max_congestion_window_ = current_congestion_window;
103 epoch_ = QuicTime::Zero(); // Reset time.
104 return static_cast<int>(current_congestion_window * Beta());
107 QuicByteCount CubicBytes::CongestionWindowAfterAck(
108 QuicByteCount acked_bytes,
109 QuicByteCount current_congestion_window,
110 QuicTime::Delta delay_min) {
111 acked_bytes_count_ += acked_bytes;
112 QuicTime current_time = clock_->ApproximateNow();
114 // Cubic is "independent" of RTT, the update is limited by the time elapsed.
115 if (last_congestion_window_ == current_congestion_window &&
116 (current_time.Subtract(last_update_time_) <= MaxCubicTimeInterval())) {
117 return max(last_target_congestion_window_,
118 estimated_tcp_congestion_window_);
120 last_congestion_window_ = current_congestion_window;
121 last_update_time_ = current_time;
123 if (!epoch_.IsInitialized()) {
124 // First ACK after a loss event.
125 DVLOG(1) << "Start of epoch";
126 epoch_ = current_time; // Start of epoch.
127 acked_bytes_count_ = acked_bytes; // Reset count.
128 // Reset estimated_tcp_congestion_window_ to be in sync with cubic.
129 estimated_tcp_congestion_window_ = current_congestion_window;
130 if (last_max_congestion_window_ <= current_congestion_window) {
131 time_to_origin_point_ = 0;
132 origin_point_congestion_window_ = current_congestion_window;
133 } else {
134 time_to_origin_point_ =
135 static_cast<uint32>(cbrt(kCubeFactor * (last_max_congestion_window_ -
136 current_congestion_window)));
137 origin_point_congestion_window_ = last_max_congestion_window_;
140 // Change the time unit from microseconds to 2^10 fractions per second. Take
141 // the round trip time in account. This is done to allow us to use shift as a
142 // divide operator.
143 int64 elapsed_time =
144 (current_time.Add(delay_min).Subtract(epoch_).ToMicroseconds() << 10) /
145 kNumMicrosPerSecond;
147 int64 offset = time_to_origin_point_ - elapsed_time;
148 QuicByteCount delta_congestion_window =
149 ((kCubeCongestionWindowScale * offset * offset * offset) >> kCubeScale) *
150 kDefaultTCPMSS;
152 QuicByteCount target_congestion_window =
153 origin_point_congestion_window_ - delta_congestion_window;
155 DCHECK_LT(0u, estimated_tcp_congestion_window_);
156 // Increase the window by Alpha * 1 MSS of bytes every time we ack an
157 // estimated tcp window of bytes.
158 estimated_tcp_congestion_window_ += acked_bytes_count_ *
159 (Alpha() * kDefaultTCPMSS) /
160 estimated_tcp_congestion_window_;
161 acked_bytes_count_ = 0;
163 // We have a new cubic congestion window.
164 last_target_congestion_window_ = target_congestion_window;
166 // Compute target congestion_window based on cubic target and estimated TCP
167 // congestion_window, use highest (fastest).
168 if (target_congestion_window < estimated_tcp_congestion_window_) {
169 target_congestion_window = estimated_tcp_congestion_window_;
172 DVLOG(1) << "Final target congestion_window: " << target_congestion_window;
173 return target_congestion_window;
176 } // namespace net