[Cronet] Delay StartNetLog and StopNetLog until native request context is initialized
[chromium-blink-merge.git] / net / quic / congestion_control / tcp_cubic_bytes_sender_test.cc
blob63b5232ad5e5efdc66f48b65127d50a3f3e5e28f
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/quic/congestion_control/tcp_cubic_bytes_sender.h"
7 #include <algorithm>
9 #include "base/logging.h"
10 #include "base/memory/scoped_ptr.h"
11 #include "net/quic/congestion_control/rtt_stats.h"
12 #include "net/quic/crypto/crypto_protocol.h"
13 #include "net/quic/quic_protocol.h"
14 #include "net/quic/quic_utils.h"
15 #include "net/quic/test_tools/mock_clock.h"
16 #include "net/quic/test_tools/quic_config_peer.h"
17 #include "testing/gtest/include/gtest/gtest.h"
19 namespace net {
20 namespace test {
22 // TODO(ianswett): A number of theses tests were written with the assumption of
23 // an initial CWND of 10. They have carefully calculated values which should be
24 // updated to be based on kInitialCongestionWindowInsecure.
25 const uint32 kInitialCongestionWindowPackets = 10;
26 const uint32 kDefaultWindowTCP =
27 kInitialCongestionWindowPackets * kDefaultTCPMSS;
28 const float kRenoBeta = 0.7f; // Reno backoff factor.
30 class TcpCubicBytesSenderPeer : public TcpCubicBytesSender {
31 public:
32 TcpCubicBytesSenderPeer(const QuicClock* clock, bool reno)
33 : TcpCubicBytesSender(clock,
34 &rtt_stats_,
35 reno,
36 kInitialCongestionWindowPackets,
37 kMaxTcpCongestionWindow,
38 &stats_) {}
40 const HybridSlowStart& hybrid_slow_start() const {
41 return hybrid_slow_start_;
44 float GetRenoBeta() const { return RenoBeta(); }
46 RttStats rtt_stats_;
47 QuicConnectionStats stats_;
50 class TcpCubicBytesSenderTest : public ::testing::Test {
51 protected:
52 TcpCubicBytesSenderTest()
53 : one_ms_(QuicTime::Delta::FromMilliseconds(1)),
54 sender_(new TcpCubicBytesSenderPeer(&clock_, true)),
55 sequence_number_(1),
56 acked_sequence_number_(0),
57 bytes_in_flight_(0) {
58 standard_packet_.bytes_sent = kDefaultTCPMSS;
61 int SendAvailableSendWindow() {
62 // Send as long as TimeUntilSend returns Zero.
63 int packets_sent = 0;
64 bool can_send = sender_->TimeUntilSend(clock_.Now(), bytes_in_flight_,
65 HAS_RETRANSMITTABLE_DATA).IsZero();
66 while (can_send) {
67 sender_->OnPacketSent(clock_.Now(), bytes_in_flight_, sequence_number_++,
68 kDefaultTCPMSS, HAS_RETRANSMITTABLE_DATA);
69 ++packets_sent;
70 bytes_in_flight_ += kDefaultTCPMSS;
71 can_send = sender_->TimeUntilSend(clock_.Now(), bytes_in_flight_,
72 HAS_RETRANSMITTABLE_DATA).IsZero();
74 return packets_sent;
77 // Normal is that TCP acks every other segment.
78 void AckNPackets(int n) {
79 sender_->rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(60),
80 QuicTime::Delta::Zero(), clock_.Now());
81 SendAlgorithmInterface::CongestionVector acked_packets;
82 SendAlgorithmInterface::CongestionVector lost_packets;
83 for (int i = 0; i < n; ++i) {
84 ++acked_sequence_number_;
85 acked_packets.push_back(
86 std::make_pair(acked_sequence_number_, standard_packet_));
88 sender_->OnCongestionEvent(true, bytes_in_flight_, acked_packets,
89 lost_packets);
90 bytes_in_flight_ -= n * kDefaultTCPMSS;
91 clock_.AdvanceTime(one_ms_);
94 void LoseNPackets(int n) {
95 SendAlgorithmInterface::CongestionVector acked_packets;
96 SendAlgorithmInterface::CongestionVector lost_packets;
97 for (int i = 0; i < n; ++i) {
98 ++acked_sequence_number_;
99 lost_packets.push_back(
100 std::make_pair(acked_sequence_number_, standard_packet_));
102 sender_->OnCongestionEvent(false, bytes_in_flight_, acked_packets,
103 lost_packets);
104 bytes_in_flight_ -= n * kDefaultTCPMSS;
107 // Does not increment acked_sequence_number_.
108 void LosePacket(QuicPacketSequenceNumber sequence_number) {
109 SendAlgorithmInterface::CongestionVector acked_packets;
110 SendAlgorithmInterface::CongestionVector lost_packets;
111 lost_packets.push_back(std::make_pair(sequence_number, standard_packet_));
112 sender_->OnCongestionEvent(false, bytes_in_flight_, acked_packets,
113 lost_packets);
114 bytes_in_flight_ -= kDefaultTCPMSS;
117 const QuicTime::Delta one_ms_;
118 MockClock clock_;
119 scoped_ptr<TcpCubicBytesSenderPeer> sender_;
120 QuicPacketSequenceNumber sequence_number_;
121 QuicPacketSequenceNumber acked_sequence_number_;
122 QuicByteCount bytes_in_flight_;
123 TransmissionInfo standard_packet_;
126 TEST_F(TcpCubicBytesSenderTest, SimpleSender) {
127 // At startup make sure we are at the default.
128 EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
129 // At startup make sure we can send.
130 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), 0,
131 HAS_RETRANSMITTABLE_DATA).IsZero());
132 // Make sure we can send.
133 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), 0,
134 HAS_RETRANSMITTABLE_DATA).IsZero());
135 // And that window is un-affected.
136 EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
138 // Fill the send window with data, then verify that we can't send.
139 SendAvailableSendWindow();
140 EXPECT_FALSE(sender_->TimeUntilSend(clock_.Now(),
141 sender_->GetCongestionWindow(),
142 HAS_RETRANSMITTABLE_DATA).IsZero());
145 TEST_F(TcpCubicBytesSenderTest, ApplicationLimitedSlowStart) {
146 // Send exactly 10 packets and ensure the CWND ends at 14 packets.
147 const int kNumberOfAcks = 5;
148 // At startup make sure we can send.
149 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), 0,
150 HAS_RETRANSMITTABLE_DATA).IsZero());
151 // Make sure we can send.
152 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), 0,
153 HAS_RETRANSMITTABLE_DATA).IsZero());
155 SendAvailableSendWindow();
156 for (int i = 0; i < kNumberOfAcks; ++i) {
157 AckNPackets(2);
159 QuicByteCount bytes_to_send = sender_->GetCongestionWindow();
160 // It's expected 2 acks will arrive when the bytes_in_flight are greater than
161 // half the CWND.
162 EXPECT_EQ(kDefaultWindowTCP + kDefaultTCPMSS * 2 * 2, bytes_to_send);
165 TEST_F(TcpCubicBytesSenderTest, ExponentialSlowStart) {
166 const int kNumberOfAcks = 20;
167 // At startup make sure we can send.
168 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), 0,
169 HAS_RETRANSMITTABLE_DATA).IsZero());
170 EXPECT_FALSE(sender_->HasReliableBandwidthEstimate());
171 EXPECT_EQ(QuicBandwidth::Zero(), sender_->BandwidthEstimate());
172 // Make sure we can send.
173 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), 0,
174 HAS_RETRANSMITTABLE_DATA).IsZero());
176 for (int i = 0; i < kNumberOfAcks; ++i) {
177 // Send our full send window.
178 SendAvailableSendWindow();
179 AckNPackets(2);
181 const QuicByteCount cwnd = sender_->GetCongestionWindow();
182 EXPECT_EQ(kDefaultWindowTCP + kDefaultTCPMSS * 2 * kNumberOfAcks, cwnd);
183 EXPECT_FALSE(sender_->HasReliableBandwidthEstimate());
184 EXPECT_EQ(QuicBandwidth::FromBytesAndTimeDelta(
185 cwnd, sender_->rtt_stats_.smoothed_rtt()),
186 sender_->BandwidthEstimate());
189 TEST_F(TcpCubicBytesSenderTest, SlowStartAckTrain) {
190 sender_->SetNumEmulatedConnections(1);
192 // Make sure that we fall out of slow start when we send ACK train longer
193 // than half the RTT, in this test case 30ms, which is more than 30 calls to
194 // Ack2Packets in one round.
195 // Since we start at 10 packet first round will be 5 second round 10 etc
196 // Hence we should pass 30 at 65 = 5 + 10 + 20 + 30.
197 const int kNumberOfAcks = 65;
198 for (int i = 0; i < kNumberOfAcks; ++i) {
199 // Send our full send window.
200 SendAvailableSendWindow();
201 AckNPackets(2);
203 QuicByteCount expected_send_window =
204 kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
205 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
207 // We should now have fallen out of slow start.
208 // Testing Reno phase.
209 // We should need 140(65*2+10) ACK:ed packets before increasing window by
210 // one.
211 for (int i = 0; i < 69; ++i) {
212 SendAvailableSendWindow();
213 AckNPackets(2);
214 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
216 SendAvailableSendWindow();
217 AckNPackets(2);
218 QuicByteCount expected_ss_tresh = expected_send_window;
219 expected_send_window += kDefaultTCPMSS;
220 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
221 EXPECT_EQ(expected_ss_tresh, sender_->GetSlowStartThreshold());
223 // Now RTO and ensure slow start gets reset.
224 EXPECT_TRUE(sender_->hybrid_slow_start().started());
225 sender_->OnRetransmissionTimeout(true);
226 EXPECT_FALSE(sender_->hybrid_slow_start().started());
227 EXPECT_EQ(2 * kDefaultTCPMSS, sender_->GetCongestionWindow());
228 EXPECT_EQ(expected_send_window / 2, sender_->GetSlowStartThreshold());
231 TEST_F(TcpCubicBytesSenderTest, SlowStartPacketLoss) {
232 sender_->SetNumEmulatedConnections(1);
233 const int kNumberOfAcks = 10;
234 for (int i = 0; i < kNumberOfAcks; ++i) {
235 // Send our full send window.
236 SendAvailableSendWindow();
237 AckNPackets(2);
239 SendAvailableSendWindow();
240 QuicByteCount expected_send_window =
241 kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
242 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
244 // Lose a packet to exit slow start.
245 LoseNPackets(1);
246 size_t packets_in_recovery_window = expected_send_window / kDefaultTCPMSS;
248 // We should now have fallen out of slow start with a reduced window.
249 expected_send_window *= kRenoBeta;
250 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
252 // Recovery phase. We need to ack every packet in the recovery window before
253 // we exit recovery.
254 size_t number_of_packets_in_window = expected_send_window / kDefaultTCPMSS;
255 DVLOG(1) << "number_packets: " << number_of_packets_in_window;
256 AckNPackets(packets_in_recovery_window);
257 SendAvailableSendWindow();
258 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
260 // We need to ack an entire window before we increase CWND by 1.
261 AckNPackets(number_of_packets_in_window - 2);
262 SendAvailableSendWindow();
263 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
265 // Next ack should increase cwnd by 1.
266 AckNPackets(1);
267 expected_send_window += kDefaultTCPMSS;
268 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
270 // Now RTO and ensure slow start gets reset.
271 EXPECT_TRUE(sender_->hybrid_slow_start().started());
272 sender_->OnRetransmissionTimeout(true);
273 EXPECT_FALSE(sender_->hybrid_slow_start().started());
276 TEST_F(TcpCubicBytesSenderTest, NoPRRWhenLessThanOnePacketInFlight) {
277 SendAvailableSendWindow();
278 LoseNPackets(kInitialCongestionWindowPackets - 1);
279 AckNPackets(1);
280 // PRR will allow 2 packets for every ack during recovery.
281 EXPECT_EQ(2, SendAvailableSendWindow());
282 // Simulate abandoning all packets by supplying a bytes_in_flight of 0.
283 // PRR should now allow a packet to be sent, even though prr's state variables
284 // believe it has sent enough packets.
285 EXPECT_EQ(QuicTime::Delta::Zero(),
286 sender_->TimeUntilSend(clock_.Now(), 0, HAS_RETRANSMITTABLE_DATA));
289 TEST_F(TcpCubicBytesSenderTest, SlowStartPacketLossPRR) {
290 sender_->SetNumEmulatedConnections(1);
291 // Test based on the first example in RFC6937.
292 // Ack 10 packets in 5 acks to raise the CWND to 20, as in the example.
293 const int kNumberOfAcks = 5;
294 for (int i = 0; i < kNumberOfAcks; ++i) {
295 // Send our full send window.
296 SendAvailableSendWindow();
297 AckNPackets(2);
299 SendAvailableSendWindow();
300 QuicByteCount expected_send_window =
301 kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
302 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
304 LoseNPackets(1);
306 // We should now have fallen out of slow start with a reduced window.
307 size_t send_window_before_loss = expected_send_window;
308 expected_send_window *= kRenoBeta;
309 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
311 // Testing TCP proportional rate reduction.
312 // We should send packets paced over the received acks for the remaining
313 // outstanding packets. The number of packets before we exit recovery is the
314 // original CWND minus the packet that has been lost and the one which
315 // triggered the loss.
316 size_t remaining_packets_in_recovery =
317 send_window_before_loss / kDefaultTCPMSS - 2;
319 for (size_t i = 0; i < remaining_packets_in_recovery; ++i) {
320 AckNPackets(1);
321 SendAvailableSendWindow();
322 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
325 // We need to ack another window before we increase CWND by 1.
326 size_t number_of_packets_in_window = expected_send_window / kDefaultTCPMSS;
327 for (size_t i = 0; i < number_of_packets_in_window; ++i) {
328 AckNPackets(1);
329 EXPECT_EQ(1, SendAvailableSendWindow());
330 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
333 AckNPackets(1);
334 expected_send_window += kDefaultTCPMSS;
335 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
338 TEST_F(TcpCubicBytesSenderTest, SlowStartBurstPacketLossPRR) {
339 sender_->SetNumEmulatedConnections(1);
340 // Test based on the second example in RFC6937, though we also implement
341 // forward acknowledgements, so the first two incoming acks will trigger
342 // PRR immediately.
343 // Ack 20 packets in 10 acks to raise the CWND to 30.
344 const int kNumberOfAcks = 10;
345 for (int i = 0; i < kNumberOfAcks; ++i) {
346 // Send our full send window.
347 SendAvailableSendWindow();
348 AckNPackets(2);
350 SendAvailableSendWindow();
351 QuicByteCount expected_send_window =
352 kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
353 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
355 // Lose one more than the congestion window reduction, so that after loss,
356 // bytes_in_flight is lesser than the congestion window.
357 size_t send_window_after_loss = kRenoBeta * expected_send_window;
358 size_t num_packets_to_lose =
359 (expected_send_window - send_window_after_loss) / kDefaultTCPMSS + 1;
360 LoseNPackets(num_packets_to_lose);
361 // Immediately after the loss, ensure at least one packet can be sent.
362 // Losses without subsequent acks can occur with timer based loss detection.
363 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), bytes_in_flight_,
364 HAS_RETRANSMITTABLE_DATA).IsZero());
365 AckNPackets(1);
367 // We should now have fallen out of slow start with a reduced window.
368 expected_send_window *= kRenoBeta;
369 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
371 // Only 2 packets should be allowed to be sent, per PRR-SSRB.
372 EXPECT_EQ(2, SendAvailableSendWindow());
374 // Ack the next packet, which triggers another loss.
375 LoseNPackets(1);
376 AckNPackets(1);
378 // Send 2 packets to simulate PRR-SSRB.
379 EXPECT_EQ(2, SendAvailableSendWindow());
381 // Ack the next packet, which triggers another loss.
382 LoseNPackets(1);
383 AckNPackets(1);
385 // Send 2 packets to simulate PRR-SSRB.
386 EXPECT_EQ(2, SendAvailableSendWindow());
388 // Exit recovery and return to sending at the new rate.
389 for (int i = 0; i < kNumberOfAcks; ++i) {
390 AckNPackets(1);
391 EXPECT_EQ(1, SendAvailableSendWindow());
395 TEST_F(TcpCubicBytesSenderTest, RTOCongestionWindow) {
396 EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
397 // Expect the window to decrease to the minimum once the RTO fires and slow
398 // start threshold to be set to 1/2 of the CWND.
399 sender_->OnRetransmissionTimeout(true);
400 EXPECT_EQ(2 * kDefaultTCPMSS, sender_->GetCongestionWindow());
401 EXPECT_EQ(5u * kDefaultTCPMSS, sender_->GetSlowStartThreshold());
404 TEST_F(TcpCubicBytesSenderTest, RTOCongestionWindowNoRetransmission) {
405 EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
407 // Expect the window to remain unchanged if the RTO fires but no packets are
408 // retransmitted.
409 sender_->OnRetransmissionTimeout(false);
410 EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
413 TEST_F(TcpCubicBytesSenderTest, RetransmissionDelay) {
414 const int64 kRttMs = 10;
415 const int64 kDeviationMs = 3;
416 EXPECT_EQ(QuicTime::Delta::Zero(), sender_->RetransmissionDelay());
418 sender_->rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(kRttMs),
419 QuicTime::Delta::Zero(), clock_.Now());
421 // Initial value is to set the median deviation to half of the initial rtt,
422 // the median in then multiplied by a factor of 4 and finally the smoothed rtt
423 // is added which is the initial rtt.
424 QuicTime::Delta expected_delay =
425 QuicTime::Delta::FromMilliseconds(kRttMs + kRttMs / 2 * 4);
426 EXPECT_EQ(expected_delay, sender_->RetransmissionDelay());
428 for (int i = 0; i < 100; ++i) {
429 // Run to make sure that we converge.
430 sender_->rtt_stats_.UpdateRtt(
431 QuicTime::Delta::FromMilliseconds(kRttMs + kDeviationMs),
432 QuicTime::Delta::Zero(), clock_.Now());
433 sender_->rtt_stats_.UpdateRtt(
434 QuicTime::Delta::FromMilliseconds(kRttMs - kDeviationMs),
435 QuicTime::Delta::Zero(), clock_.Now());
437 expected_delay = QuicTime::Delta::FromMilliseconds(kRttMs + kDeviationMs * 4);
439 EXPECT_NEAR(kRttMs, sender_->rtt_stats_.smoothed_rtt().ToMilliseconds(), 1);
440 EXPECT_NEAR(expected_delay.ToMilliseconds(),
441 sender_->RetransmissionDelay().ToMilliseconds(), 1);
442 EXPECT_EQ(
443 static_cast<int64>(sender_->GetCongestionWindow() * kNumMicrosPerSecond /
444 sender_->rtt_stats_.smoothed_rtt().ToMicroseconds()),
445 sender_->BandwidthEstimate().ToBytesPerSecond());
448 TEST_F(TcpCubicBytesSenderTest, MultipleLossesInOneWindow) {
449 SendAvailableSendWindow();
450 const QuicByteCount initial_window = sender_->GetCongestionWindow();
451 LosePacket(acked_sequence_number_ + 1);
452 const QuicByteCount post_loss_window = sender_->GetCongestionWindow();
453 EXPECT_GT(initial_window, post_loss_window);
454 LosePacket(acked_sequence_number_ + 3);
455 EXPECT_EQ(post_loss_window, sender_->GetCongestionWindow());
456 LosePacket(sequence_number_ - 1);
457 EXPECT_EQ(post_loss_window, sender_->GetCongestionWindow());
459 // Lose a later packet and ensure the window decreases.
460 LosePacket(sequence_number_);
461 EXPECT_GT(post_loss_window, sender_->GetCongestionWindow());
464 TEST_F(TcpCubicBytesSenderTest, DontTrackAckPackets) {
465 // Send a packet with no retransmittable data, and ensure it's not tracked.
466 EXPECT_FALSE(sender_->OnPacketSent(clock_.Now(), bytes_in_flight_,
467 sequence_number_++, kDefaultTCPMSS,
468 NO_RETRANSMITTABLE_DATA));
470 // Send a data packet with retransmittable data, and ensure it is tracked.
471 EXPECT_TRUE(sender_->OnPacketSent(clock_.Now(), bytes_in_flight_,
472 sequence_number_++, kDefaultTCPMSS,
473 HAS_RETRANSMITTABLE_DATA));
476 TEST_F(TcpCubicBytesSenderTest, ConfigureMaxInitialWindow) {
477 QuicConfig config;
479 // Verify that kCOPT: kIW10 forces the congestion window to the default of 10.
480 QuicTagVector options;
481 options.push_back(kIW10);
482 QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
483 sender_->SetFromConfig(config, Perspective::IS_SERVER,
484 /* using_pacing= */ false);
485 EXPECT_EQ(10u * kDefaultTCPMSS, sender_->GetCongestionWindow());
488 TEST_F(TcpCubicBytesSenderTest, DisableAckTrainDetectionWithPacing) {
489 EXPECT_TRUE(sender_->hybrid_slow_start().ack_train_detection());
491 QuicConfig config;
492 sender_->SetFromConfig(config, Perspective::IS_SERVER,
493 /* using_pacing= */ true);
494 EXPECT_FALSE(sender_->hybrid_slow_start().ack_train_detection());
497 TEST_F(TcpCubicBytesSenderTest, 2ConnectionCongestionAvoidanceAtEndOfRecovery) {
498 sender_->SetNumEmulatedConnections(2);
499 // Ack 10 packets in 5 acks to raise the CWND to 20.
500 const int kNumberOfAcks = 5;
501 for (int i = 0; i < kNumberOfAcks; ++i) {
502 // Send our full send window.
503 SendAvailableSendWindow();
504 AckNPackets(2);
506 SendAvailableSendWindow();
507 QuicByteCount expected_send_window =
508 kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
509 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
511 LoseNPackets(1);
513 // We should now have fallen out of slow start with a reduced window.
514 expected_send_window = expected_send_window * sender_->GetRenoBeta();
515 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
517 // No congestion window growth should occur in recovery phase, i.e., until the
518 // currently outstanding 20 packets are acked.
519 for (int i = 0; i < 10; ++i) {
520 // Send our full send window.
521 SendAvailableSendWindow();
522 EXPECT_TRUE(sender_->InRecovery());
523 AckNPackets(2);
524 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
526 EXPECT_FALSE(sender_->InRecovery());
528 // Out of recovery now. Congestion window should not grow for half an RTT.
529 size_t packets_in_send_window = expected_send_window / kDefaultTCPMSS;
530 SendAvailableSendWindow();
531 AckNPackets(packets_in_send_window / 2 - 2);
532 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
534 // Next ack should increase congestion window by 1MSS.
535 SendAvailableSendWindow();
536 AckNPackets(2);
537 expected_send_window += kDefaultTCPMSS;
538 packets_in_send_window += 1;
539 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
541 // Congestion window should remain steady again for half an RTT.
542 SendAvailableSendWindow();
543 AckNPackets(packets_in_send_window / 2 - 1);
544 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
546 // Next ack should cause congestion window to grow by 1MSS.
547 SendAvailableSendWindow();
548 AckNPackets(2);
549 expected_send_window += kDefaultTCPMSS;
550 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
553 TEST_F(TcpCubicBytesSenderTest, 1ConnectionCongestionAvoidanceAtEndOfRecovery) {
554 sender_->SetNumEmulatedConnections(1);
555 // Ack 10 packets in 5 acks to raise the CWND to 20.
556 const int kNumberOfAcks = 5;
557 for (int i = 0; i < kNumberOfAcks; ++i) {
558 // Send our full send window.
559 SendAvailableSendWindow();
560 AckNPackets(2);
562 SendAvailableSendWindow();
563 QuicByteCount expected_send_window =
564 kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
565 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
567 LoseNPackets(1);
569 // We should now have fallen out of slow start with a reduced window.
570 expected_send_window *= kRenoBeta;
571 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
573 // No congestion window growth should occur in recovery phase, i.e., until the
574 // currently outstanding 20 packets are acked.
575 for (int i = 0; i < 10; ++i) {
576 // Send our full send window.
577 SendAvailableSendWindow();
578 EXPECT_TRUE(sender_->InRecovery());
579 AckNPackets(2);
580 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
582 EXPECT_FALSE(sender_->InRecovery());
584 // Out of recovery now. Congestion window should not grow during RTT.
585 for (uint64 i = 0; i < expected_send_window / kDefaultTCPMSS - 2; i += 2) {
586 // Send our full send window.
587 SendAvailableSendWindow();
588 AckNPackets(2);
589 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
592 // Next ack should cause congestion window to grow by 1MSS.
593 SendAvailableSendWindow();
594 AckNPackets(2);
595 expected_send_window += kDefaultTCPMSS;
596 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
599 TEST_F(TcpCubicBytesSenderTest, BandwidthResumption) {
600 // Test that when provided with CachedNetworkParameters and opted in to the
601 // bandwidth resumption experiment, that the TcpCubicSender sets initial CWND
602 // appropriately.
604 // Set some common values.
605 CachedNetworkParameters cached_network_params;
606 const QuicPacketCount kNumberOfPackets = 123;
607 const int kBandwidthEstimateBytesPerSecond =
608 kNumberOfPackets * kDefaultTCPMSS;
609 cached_network_params.set_bandwidth_estimate_bytes_per_second(
610 kBandwidthEstimateBytesPerSecond);
611 cached_network_params.set_min_rtt_ms(1000);
613 // Ensure that an old estimate is not used for bandwidth resumption.
614 cached_network_params.set_timestamp(clock_.WallNow().ToUNIXSeconds() -
615 (kNumSecondsPerHour + 1));
616 EXPECT_FALSE(sender_->ResumeConnectionState(cached_network_params, false));
617 EXPECT_EQ(10u * kDefaultTCPMSS, sender_->GetCongestionWindow());
619 // If the estimate is new enough, make sure it is used.
620 cached_network_params.set_timestamp(clock_.WallNow().ToUNIXSeconds() -
621 (kNumSecondsPerHour - 1));
622 EXPECT_TRUE(sender_->ResumeConnectionState(cached_network_params, false));
623 EXPECT_EQ(kNumberOfPackets * kDefaultTCPMSS, sender_->GetCongestionWindow());
625 // Resumed CWND is limited to be in a sensible range.
626 cached_network_params.set_bandwidth_estimate_bytes_per_second(
627 (kMaxTcpCongestionWindow + 1) * kDefaultTCPMSS);
628 EXPECT_TRUE(sender_->ResumeConnectionState(cached_network_params, false));
629 EXPECT_EQ(kMaxTcpCongestionWindow * kDefaultTCPMSS,
630 sender_->GetCongestionWindow());
632 cached_network_params.set_bandwidth_estimate_bytes_per_second(
633 (kMinCongestionWindowForBandwidthResumption - 1) * kDefaultTCPMSS);
634 EXPECT_TRUE(sender_->ResumeConnectionState(cached_network_params, false));
635 EXPECT_EQ(kMinCongestionWindowForBandwidthResumption * kDefaultTCPMSS,
636 sender_->GetCongestionWindow());
638 // Resume to the max value.
639 cached_network_params.set_max_bandwidth_estimate_bytes_per_second(
640 (kMinCongestionWindowForBandwidthResumption + 10) * kDefaultTCPMSS);
641 EXPECT_TRUE(sender_->ResumeConnectionState(cached_network_params, true));
642 EXPECT_EQ((kMinCongestionWindowForBandwidthResumption + 10) * kDefaultTCPMSS,
643 sender_->GetCongestionWindow());
646 } // namespace test
647 } // namespace net