1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/quic/congestion_control/cubic.h"
11 #include "base/basictypes.h"
12 #include "base/logging.h"
13 #include "net/quic/quic_flags.h"
14 #include "net/quic/quic_protocol.h"
15 #include "net/quic/quic_time.h"
23 // Constants based on TCP defaults.
24 // The following constants are in 2^10 fractions of a second instead of ms to
25 // allow a 10 shift right to divide.
26 const int kCubeScale
= 40; // 1024*1024^3 (first 1024 is from 0.100^3)
27 // where 0.100 is 100 ms which is the scaling
29 const int kCubeCongestionWindowScale
= 410;
30 const uint64 kCubeFactor
= (UINT64_C(1) << kCubeScale
) /
31 kCubeCongestionWindowScale
;
33 const uint32 kDefaultNumConnections
= 2;
34 const float kBeta
= 0.7f
; // Default Cubic backoff factor.
35 // Additional backoff factor when loss occurs in the concave part of the Cubic
36 // curve. This additional backoff factor is expected to give up bandwidth to
37 // new concurrent flows and speed up convergence.
38 const float kBetaLastMax
= 0.85f
;
42 Cubic::Cubic(const QuicClock
* clock
)
44 num_connections_(kDefaultNumConnections
),
45 epoch_(QuicTime::Zero()),
46 last_update_time_(QuicTime::Zero()) {
50 void Cubic::SetNumConnections(int num_connections
) {
51 num_connections_
= num_connections
;
54 float Cubic::Alpha() const {
55 // TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that
56 // beta here is a cwnd multiplier, and is equal to 1-beta from the paper.
57 // We derive the equivalent alpha for an N-connection emulation as:
58 const float beta
= Beta();
59 return 3 * num_connections_
* num_connections_
* (1 - beta
) / (1 + beta
);
62 float Cubic::Beta() const {
63 // kNConnectionBeta is the backoff factor after loss for our N-connection
64 // emulation, which emulates the effective backoff of an ensemble of N
65 // TCP-Reno connections on a single loss event. The effective multiplier is
67 return (num_connections_
- 1 + kBeta
) / num_connections_
;
71 epoch_
= QuicTime::Zero(); // Reset time.
72 last_update_time_
= QuicTime::Zero(); // Reset time.
73 last_congestion_window_
= 0;
74 last_max_congestion_window_
= 0;
75 acked_packets_count_
= 0;
76 estimated_tcp_congestion_window_
= 0;
77 origin_point_congestion_window_
= 0;
78 time_to_origin_point_
= 0;
79 last_target_congestion_window_
= 0;
82 void Cubic::OnApplicationLimited() {
83 // When sender is not using the available congestion window, the window does
84 // not grow. But to be RTT-independent, Cubic assumes that the sender has been
85 // using the entire window during the time since the beginning of the current
86 // "epoch" (the end of the last loss recovery period). Since
87 // application-limited periods break this assumption, we reset the epoch when
88 // in such a period. This reset effectively freezes congestion window growth
89 // through application-limited periods and allows Cubic growth to continue
90 // when the entire window is being used.
91 epoch_
= QuicTime::Zero();
94 QuicPacketCount
Cubic::CongestionWindowAfterPacketLoss(
95 QuicPacketCount current_congestion_window
) {
96 if (current_congestion_window
< last_max_congestion_window_
) {
97 // We never reached the old max, so assume we are competing with another
98 // flow. Use our extra back off factor to allow the other flow to go up.
99 last_max_congestion_window_
=
100 static_cast<int>(kBetaLastMax
* current_congestion_window
);
102 last_max_congestion_window_
= current_congestion_window
;
104 epoch_
= QuicTime::Zero(); // Reset time.
105 return static_cast<int>(current_congestion_window
* Beta());
108 QuicPacketCount
Cubic::CongestionWindowAfterAck(
109 QuicPacketCount current_congestion_window
,
110 QuicTime::Delta delay_min
) {
111 acked_packets_count_
+= 1; // Packets acked.
112 QuicTime current_time
= clock_
->ApproximateNow();
114 // Cubic is "independent" of RTT, the update is limited by the time elapsed.
115 if (last_congestion_window_
== current_congestion_window
&&
116 (current_time
.Subtract(last_update_time_
) <= MaxCubicTimeInterval())) {
117 return max(last_target_congestion_window_
,
118 estimated_tcp_congestion_window_
);
120 last_congestion_window_
= current_congestion_window
;
121 last_update_time_
= current_time
;
123 if (!epoch_
.IsInitialized()) {
124 // First ACK after a loss event.
125 DVLOG(1) << "Start of epoch";
126 epoch_
= current_time
; // Start of epoch.
127 acked_packets_count_
= 1; // Reset count.
128 // Reset estimated_tcp_congestion_window_ to be in sync with cubic.
129 estimated_tcp_congestion_window_
= current_congestion_window
;
130 if (last_max_congestion_window_
<= current_congestion_window
) {
131 time_to_origin_point_
= 0;
132 origin_point_congestion_window_
= current_congestion_window
;
134 time_to_origin_point_
=
135 static_cast<uint32
>(cbrt(kCubeFactor
* (last_max_congestion_window_
-
136 current_congestion_window
)));
137 origin_point_congestion_window_
= last_max_congestion_window_
;
140 // Change the time unit from microseconds to 2^10 fractions per second. Take
141 // the round trip time in account. This is done to allow us to use shift as a
144 (current_time
.Add(delay_min
).Subtract(epoch_
).ToMicroseconds() << 10) /
147 int64 offset
= time_to_origin_point_
- elapsed_time
;
148 QuicPacketCount delta_congestion_window
= (kCubeCongestionWindowScale
149 * offset
* offset
* offset
) >> kCubeScale
;
151 QuicPacketCount target_congestion_window
=
152 origin_point_congestion_window_
- delta_congestion_window
;
154 DCHECK_LT(0u, estimated_tcp_congestion_window_
);
155 // With dynamic beta/alpha based on number of active streams, it is possible
156 // for the required_ack_count to become much lower than acked_packets_count_
157 // suddenly, leading to more than one iteration through the following loop.
159 // Update estimated TCP congestion_window.
160 QuicPacketCount required_ack_count
= static_cast<QuicPacketCount
>(
161 estimated_tcp_congestion_window_
/ Alpha());
162 if (acked_packets_count_
< required_ack_count
) {
165 acked_packets_count_
-= required_ack_count
;
166 estimated_tcp_congestion_window_
++;
169 // We have a new cubic congestion window.
170 last_target_congestion_window_
= target_congestion_window
;
172 // Compute target congestion_window based on cubic target and estimated TCP
173 // congestion_window, use highest (fastest).
174 if (target_congestion_window
< estimated_tcp_congestion_window_
) {
175 target_congestion_window
= estimated_tcp_congestion_window_
;
178 DVLOG(1) << "Final target congestion_window: " << target_congestion_window
;
179 return target_congestion_window
;