Pin Chrome's shortcut to the Win10 Start menu on install and OS upgrade.
[chromium-blink-merge.git] / net / quic / congestion_control / tcp_cubic_bytes_sender_test.cc
blobe58a29c1a9fd7687f2f5d0b301c050f75c479796
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/quic/congestion_control/tcp_cubic_bytes_sender.h"
7 #include <algorithm>
9 #include "base/logging.h"
10 #include "base/memory/scoped_ptr.h"
11 #include "net/quic/congestion_control/rtt_stats.h"
12 #include "net/quic/crypto/crypto_protocol.h"
13 #include "net/quic/proto/cached_network_parameters.pb.h"
14 #include "net/quic/quic_protocol.h"
15 #include "net/quic/quic_utils.h"
16 #include "net/quic/test_tools/mock_clock.h"
17 #include "net/quic/test_tools/quic_config_peer.h"
18 #include "testing/gtest/include/gtest/gtest.h"
20 namespace net {
21 namespace test {
23 // TODO(ianswett): A number of theses tests were written with the assumption of
24 // an initial CWND of 10. They have carefully calculated values which should be
25 // updated to be based on kInitialCongestionWindowInsecure.
26 const uint32 kInitialCongestionWindowPackets = 10;
27 const uint32 kDefaultWindowTCP =
28 kInitialCongestionWindowPackets * kDefaultTCPMSS;
29 const float kRenoBeta = 0.7f; // Reno backoff factor.
31 class TcpCubicBytesSenderPeer : public TcpCubicBytesSender {
32 public:
33 TcpCubicBytesSenderPeer(const QuicClock* clock, bool reno)
34 : TcpCubicBytesSender(clock,
35 &rtt_stats_,
36 reno,
37 kInitialCongestionWindowPackets,
38 kMaxCongestionWindow,
39 &stats_) {}
41 const HybridSlowStart& hybrid_slow_start() const {
42 return hybrid_slow_start_;
45 float GetRenoBeta() const { return RenoBeta(); }
47 RttStats rtt_stats_;
48 QuicConnectionStats stats_;
51 class TcpCubicBytesSenderTest : public ::testing::Test {
52 protected:
53 TcpCubicBytesSenderTest()
54 : one_ms_(QuicTime::Delta::FromMilliseconds(1)),
55 sender_(new TcpCubicBytesSenderPeer(&clock_, true)),
56 sequence_number_(1),
57 acked_sequence_number_(0),
58 bytes_in_flight_(0) {
59 standard_packet_.bytes_sent = kDefaultTCPMSS;
62 int SendAvailableSendWindow() {
63 // Send as long as TimeUntilSend returns Zero.
64 int packets_sent = 0;
65 bool can_send = sender_->TimeUntilSend(clock_.Now(), bytes_in_flight_,
66 HAS_RETRANSMITTABLE_DATA).IsZero();
67 while (can_send) {
68 sender_->OnPacketSent(clock_.Now(), bytes_in_flight_, sequence_number_++,
69 kDefaultTCPMSS, HAS_RETRANSMITTABLE_DATA);
70 ++packets_sent;
71 bytes_in_flight_ += kDefaultTCPMSS;
72 can_send = sender_->TimeUntilSend(clock_.Now(), bytes_in_flight_,
73 HAS_RETRANSMITTABLE_DATA).IsZero();
75 return packets_sent;
78 // Normal is that TCP acks every other segment.
79 void AckNPackets(int n) {
80 sender_->rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(60),
81 QuicTime::Delta::Zero(), clock_.Now());
82 SendAlgorithmInterface::CongestionVector acked_packets;
83 SendAlgorithmInterface::CongestionVector lost_packets;
84 for (int i = 0; i < n; ++i) {
85 ++acked_sequence_number_;
86 acked_packets.push_back(
87 std::make_pair(acked_sequence_number_, standard_packet_));
89 sender_->OnCongestionEvent(true, bytes_in_flight_, acked_packets,
90 lost_packets);
91 bytes_in_flight_ -= n * kDefaultTCPMSS;
92 clock_.AdvanceTime(one_ms_);
95 void LoseNPackets(int n) {
96 SendAlgorithmInterface::CongestionVector acked_packets;
97 SendAlgorithmInterface::CongestionVector lost_packets;
98 for (int i = 0; i < n; ++i) {
99 ++acked_sequence_number_;
100 lost_packets.push_back(
101 std::make_pair(acked_sequence_number_, standard_packet_));
103 sender_->OnCongestionEvent(false, bytes_in_flight_, acked_packets,
104 lost_packets);
105 bytes_in_flight_ -= n * kDefaultTCPMSS;
108 // Does not increment acked_sequence_number_.
109 void LosePacket(QuicPacketSequenceNumber sequence_number) {
110 SendAlgorithmInterface::CongestionVector acked_packets;
111 SendAlgorithmInterface::CongestionVector lost_packets;
112 lost_packets.push_back(std::make_pair(sequence_number, standard_packet_));
113 sender_->OnCongestionEvent(false, bytes_in_flight_, acked_packets,
114 lost_packets);
115 bytes_in_flight_ -= kDefaultTCPMSS;
118 const QuicTime::Delta one_ms_;
119 MockClock clock_;
120 scoped_ptr<TcpCubicBytesSenderPeer> sender_;
121 QuicPacketSequenceNumber sequence_number_;
122 QuicPacketSequenceNumber acked_sequence_number_;
123 QuicByteCount bytes_in_flight_;
124 TransmissionInfo standard_packet_;
127 TEST_F(TcpCubicBytesSenderTest, SimpleSender) {
128 // At startup make sure we are at the default.
129 EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
130 // At startup make sure we can send.
131 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), 0,
132 HAS_RETRANSMITTABLE_DATA).IsZero());
133 // Make sure we can send.
134 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), 0,
135 HAS_RETRANSMITTABLE_DATA).IsZero());
136 // And that window is un-affected.
137 EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
139 // Fill the send window with data, then verify that we can't send.
140 SendAvailableSendWindow();
141 EXPECT_FALSE(sender_->TimeUntilSend(clock_.Now(),
142 sender_->GetCongestionWindow(),
143 HAS_RETRANSMITTABLE_DATA).IsZero());
146 TEST_F(TcpCubicBytesSenderTest, ApplicationLimitedSlowStart) {
147 // Send exactly 10 packets and ensure the CWND ends at 14 packets.
148 const int kNumberOfAcks = 5;
149 // At startup make sure we can send.
150 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), 0,
151 HAS_RETRANSMITTABLE_DATA).IsZero());
152 // Make sure we can send.
153 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), 0,
154 HAS_RETRANSMITTABLE_DATA).IsZero());
156 SendAvailableSendWindow();
157 for (int i = 0; i < kNumberOfAcks; ++i) {
158 AckNPackets(2);
160 QuicByteCount bytes_to_send = sender_->GetCongestionWindow();
161 // It's expected 2 acks will arrive when the bytes_in_flight are greater than
162 // half the CWND.
163 EXPECT_EQ(kDefaultWindowTCP + kDefaultTCPMSS * 2 * 2, bytes_to_send);
166 TEST_F(TcpCubicBytesSenderTest, ExponentialSlowStart) {
167 const int kNumberOfAcks = 20;
168 // At startup make sure we can send.
169 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), 0,
170 HAS_RETRANSMITTABLE_DATA).IsZero());
171 EXPECT_EQ(QuicBandwidth::Zero(), sender_->BandwidthEstimate());
172 // Make sure we can send.
173 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), 0,
174 HAS_RETRANSMITTABLE_DATA).IsZero());
176 for (int i = 0; i < kNumberOfAcks; ++i) {
177 // Send our full send window.
178 SendAvailableSendWindow();
179 AckNPackets(2);
181 const QuicByteCount cwnd = sender_->GetCongestionWindow();
182 EXPECT_EQ(kDefaultWindowTCP + kDefaultTCPMSS * 2 * kNumberOfAcks, cwnd);
183 EXPECT_EQ(QuicBandwidth::FromBytesAndTimeDelta(
184 cwnd, sender_->rtt_stats_.smoothed_rtt()),
185 sender_->BandwidthEstimate());
188 TEST_F(TcpCubicBytesSenderTest, SlowStartPacketLoss) {
189 sender_->SetNumEmulatedConnections(1);
190 const int kNumberOfAcks = 10;
191 for (int i = 0; i < kNumberOfAcks; ++i) {
192 // Send our full send window.
193 SendAvailableSendWindow();
194 AckNPackets(2);
196 SendAvailableSendWindow();
197 QuicByteCount expected_send_window =
198 kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
199 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
201 // Lose a packet to exit slow start.
202 LoseNPackets(1);
203 size_t packets_in_recovery_window = expected_send_window / kDefaultTCPMSS;
205 // We should now have fallen out of slow start with a reduced window.
206 expected_send_window *= kRenoBeta;
207 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
209 // Recovery phase. We need to ack every packet in the recovery window before
210 // we exit recovery.
211 size_t number_of_packets_in_window = expected_send_window / kDefaultTCPMSS;
212 DVLOG(1) << "number_packets: " << number_of_packets_in_window;
213 AckNPackets(packets_in_recovery_window);
214 SendAvailableSendWindow();
215 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
217 // We need to ack an entire window before we increase CWND by 1.
218 AckNPackets(number_of_packets_in_window - 2);
219 SendAvailableSendWindow();
220 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
222 // Next ack should increase cwnd by 1.
223 AckNPackets(1);
224 expected_send_window += kDefaultTCPMSS;
225 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
227 // Now RTO and ensure slow start gets reset.
228 EXPECT_TRUE(sender_->hybrid_slow_start().started());
229 sender_->OnRetransmissionTimeout(true);
230 EXPECT_FALSE(sender_->hybrid_slow_start().started());
233 TEST_F(TcpCubicBytesSenderTest, NoPRRWhenLessThanOnePacketInFlight) {
234 SendAvailableSendWindow();
235 LoseNPackets(kInitialCongestionWindowPackets - 1);
236 AckNPackets(1);
237 // PRR will allow 2 packets for every ack during recovery.
238 EXPECT_EQ(2, SendAvailableSendWindow());
239 // Simulate abandoning all packets by supplying a bytes_in_flight of 0.
240 // PRR should now allow a packet to be sent, even though prr's state variables
241 // believe it has sent enough packets.
242 EXPECT_EQ(QuicTime::Delta::Zero(),
243 sender_->TimeUntilSend(clock_.Now(), 0, HAS_RETRANSMITTABLE_DATA));
246 TEST_F(TcpCubicBytesSenderTest, SlowStartPacketLossPRR) {
247 sender_->SetNumEmulatedConnections(1);
248 // Test based on the first example in RFC6937.
249 // Ack 10 packets in 5 acks to raise the CWND to 20, as in the example.
250 const int kNumberOfAcks = 5;
251 for (int i = 0; i < kNumberOfAcks; ++i) {
252 // Send our full send window.
253 SendAvailableSendWindow();
254 AckNPackets(2);
256 SendAvailableSendWindow();
257 QuicByteCount expected_send_window =
258 kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
259 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
261 LoseNPackets(1);
263 // We should now have fallen out of slow start with a reduced window.
264 size_t send_window_before_loss = expected_send_window;
265 expected_send_window *= kRenoBeta;
266 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
268 // Testing TCP proportional rate reduction.
269 // We should send packets paced over the received acks for the remaining
270 // outstanding packets. The number of packets before we exit recovery is the
271 // original CWND minus the packet that has been lost and the one which
272 // triggered the loss.
273 size_t remaining_packets_in_recovery =
274 send_window_before_loss / kDefaultTCPMSS - 2;
276 for (size_t i = 0; i < remaining_packets_in_recovery; ++i) {
277 AckNPackets(1);
278 SendAvailableSendWindow();
279 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
282 // We need to ack another window before we increase CWND by 1.
283 size_t number_of_packets_in_window = expected_send_window / kDefaultTCPMSS;
284 for (size_t i = 0; i < number_of_packets_in_window; ++i) {
285 AckNPackets(1);
286 EXPECT_EQ(1, SendAvailableSendWindow());
287 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
290 AckNPackets(1);
291 expected_send_window += kDefaultTCPMSS;
292 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
295 TEST_F(TcpCubicBytesSenderTest, SlowStartBurstPacketLossPRR) {
296 sender_->SetNumEmulatedConnections(1);
297 // Test based on the second example in RFC6937, though we also implement
298 // forward acknowledgements, so the first two incoming acks will trigger
299 // PRR immediately.
300 // Ack 20 packets in 10 acks to raise the CWND to 30.
301 const int kNumberOfAcks = 10;
302 for (int i = 0; i < kNumberOfAcks; ++i) {
303 // Send our full send window.
304 SendAvailableSendWindow();
305 AckNPackets(2);
307 SendAvailableSendWindow();
308 QuicByteCount expected_send_window =
309 kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
310 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
312 // Lose one more than the congestion window reduction, so that after loss,
313 // bytes_in_flight is lesser than the congestion window.
314 size_t send_window_after_loss = kRenoBeta * expected_send_window;
315 size_t num_packets_to_lose =
316 (expected_send_window - send_window_after_loss) / kDefaultTCPMSS + 1;
317 LoseNPackets(num_packets_to_lose);
318 // Immediately after the loss, ensure at least one packet can be sent.
319 // Losses without subsequent acks can occur with timer based loss detection.
320 EXPECT_TRUE(sender_->TimeUntilSend(clock_.Now(), bytes_in_flight_,
321 HAS_RETRANSMITTABLE_DATA).IsZero());
322 AckNPackets(1);
324 // We should now have fallen out of slow start with a reduced window.
325 expected_send_window *= kRenoBeta;
326 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
328 // Only 2 packets should be allowed to be sent, per PRR-SSRB.
329 EXPECT_EQ(2, SendAvailableSendWindow());
331 // Ack the next packet, which triggers another loss.
332 LoseNPackets(1);
333 AckNPackets(1);
335 // Send 2 packets to simulate PRR-SSRB.
336 EXPECT_EQ(2, SendAvailableSendWindow());
338 // Ack the next packet, which triggers another loss.
339 LoseNPackets(1);
340 AckNPackets(1);
342 // Send 2 packets to simulate PRR-SSRB.
343 EXPECT_EQ(2, SendAvailableSendWindow());
345 // Exit recovery and return to sending at the new rate.
346 for (int i = 0; i < kNumberOfAcks; ++i) {
347 AckNPackets(1);
348 EXPECT_EQ(1, SendAvailableSendWindow());
352 TEST_F(TcpCubicBytesSenderTest, RTOCongestionWindow) {
353 EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
354 // Expect the window to decrease to the minimum once the RTO fires and slow
355 // start threshold to be set to 1/2 of the CWND.
356 sender_->OnRetransmissionTimeout(true);
357 EXPECT_EQ(2 * kDefaultTCPMSS, sender_->GetCongestionWindow());
358 EXPECT_EQ(5u * kDefaultTCPMSS, sender_->GetSlowStartThreshold());
361 TEST_F(TcpCubicBytesSenderTest, RTOCongestionWindowNoRetransmission) {
362 EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
364 // Expect the window to remain unchanged if the RTO fires but no packets are
365 // retransmitted.
366 sender_->OnRetransmissionTimeout(false);
367 EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
370 TEST_F(TcpCubicBytesSenderTest, RetransmissionDelay) {
371 const int64 kRttMs = 10;
372 const int64 kDeviationMs = 3;
373 EXPECT_EQ(QuicTime::Delta::Zero(), sender_->RetransmissionDelay());
375 sender_->rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(kRttMs),
376 QuicTime::Delta::Zero(), clock_.Now());
378 // Initial value is to set the median deviation to half of the initial rtt,
379 // the median in then multiplied by a factor of 4 and finally the smoothed rtt
380 // is added which is the initial rtt.
381 QuicTime::Delta expected_delay =
382 QuicTime::Delta::FromMilliseconds(kRttMs + kRttMs / 2 * 4);
383 EXPECT_EQ(expected_delay, sender_->RetransmissionDelay());
385 for (int i = 0; i < 100; ++i) {
386 // Run to make sure that we converge.
387 sender_->rtt_stats_.UpdateRtt(
388 QuicTime::Delta::FromMilliseconds(kRttMs + kDeviationMs),
389 QuicTime::Delta::Zero(), clock_.Now());
390 sender_->rtt_stats_.UpdateRtt(
391 QuicTime::Delta::FromMilliseconds(kRttMs - kDeviationMs),
392 QuicTime::Delta::Zero(), clock_.Now());
394 expected_delay = QuicTime::Delta::FromMilliseconds(kRttMs + kDeviationMs * 4);
396 EXPECT_NEAR(kRttMs, sender_->rtt_stats_.smoothed_rtt().ToMilliseconds(), 1);
397 EXPECT_NEAR(expected_delay.ToMilliseconds(),
398 sender_->RetransmissionDelay().ToMilliseconds(), 1);
399 EXPECT_EQ(
400 static_cast<int64>(sender_->GetCongestionWindow() * kNumMicrosPerSecond /
401 sender_->rtt_stats_.smoothed_rtt().ToMicroseconds()),
402 sender_->BandwidthEstimate().ToBytesPerSecond());
405 TEST_F(TcpCubicBytesSenderTest, MultipleLossesInOneWindow) {
406 SendAvailableSendWindow();
407 const QuicByteCount initial_window = sender_->GetCongestionWindow();
408 LosePacket(acked_sequence_number_ + 1);
409 const QuicByteCount post_loss_window = sender_->GetCongestionWindow();
410 EXPECT_GT(initial_window, post_loss_window);
411 LosePacket(acked_sequence_number_ + 3);
412 EXPECT_EQ(post_loss_window, sender_->GetCongestionWindow());
413 LosePacket(sequence_number_ - 1);
414 EXPECT_EQ(post_loss_window, sender_->GetCongestionWindow());
416 // Lose a later packet and ensure the window decreases.
417 LosePacket(sequence_number_);
418 EXPECT_GT(post_loss_window, sender_->GetCongestionWindow());
421 TEST_F(TcpCubicBytesSenderTest, DontTrackAckPackets) {
422 // Send a packet with no retransmittable data, and ensure it's not tracked.
423 EXPECT_FALSE(sender_->OnPacketSent(clock_.Now(), bytes_in_flight_,
424 sequence_number_++, kDefaultTCPMSS,
425 NO_RETRANSMITTABLE_DATA));
427 // Send a data packet with retransmittable data, and ensure it is tracked.
428 EXPECT_TRUE(sender_->OnPacketSent(clock_.Now(), bytes_in_flight_,
429 sequence_number_++, kDefaultTCPMSS,
430 HAS_RETRANSMITTABLE_DATA));
433 TEST_F(TcpCubicBytesSenderTest, ConfigureMaxInitialWindow) {
434 QuicConfig config;
436 // Verify that kCOPT: kIW10 forces the congestion window to the default of 10.
437 QuicTagVector options;
438 options.push_back(kIW10);
439 QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
440 sender_->SetFromConfig(config, Perspective::IS_SERVER);
441 EXPECT_EQ(10u * kDefaultTCPMSS, sender_->GetCongestionWindow());
444 TEST_F(TcpCubicBytesSenderTest, 2ConnectionCongestionAvoidanceAtEndOfRecovery) {
445 sender_->SetNumEmulatedConnections(2);
446 // Ack 10 packets in 5 acks to raise the CWND to 20.
447 const int kNumberOfAcks = 5;
448 for (int i = 0; i < kNumberOfAcks; ++i) {
449 // Send our full send window.
450 SendAvailableSendWindow();
451 AckNPackets(2);
453 SendAvailableSendWindow();
454 QuicByteCount expected_send_window =
455 kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
456 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
458 LoseNPackets(1);
460 // We should now have fallen out of slow start with a reduced window.
461 expected_send_window = expected_send_window * sender_->GetRenoBeta();
462 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
464 // No congestion window growth should occur in recovery phase, i.e., until the
465 // currently outstanding 20 packets are acked.
466 for (int i = 0; i < 10; ++i) {
467 // Send our full send window.
468 SendAvailableSendWindow();
469 EXPECT_TRUE(sender_->InRecovery());
470 AckNPackets(2);
471 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
473 EXPECT_FALSE(sender_->InRecovery());
475 // Out of recovery now. Congestion window should not grow for half an RTT.
476 size_t packets_in_send_window = expected_send_window / kDefaultTCPMSS;
477 SendAvailableSendWindow();
478 AckNPackets(packets_in_send_window / 2 - 2);
479 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
481 // Next ack should increase congestion window by 1MSS.
482 SendAvailableSendWindow();
483 AckNPackets(2);
484 expected_send_window += kDefaultTCPMSS;
485 packets_in_send_window += 1;
486 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
488 // Congestion window should remain steady again for half an RTT.
489 SendAvailableSendWindow();
490 AckNPackets(packets_in_send_window / 2 - 1);
491 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
493 // Next ack should cause congestion window to grow by 1MSS.
494 SendAvailableSendWindow();
495 AckNPackets(2);
496 expected_send_window += kDefaultTCPMSS;
497 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
500 TEST_F(TcpCubicBytesSenderTest, 1ConnectionCongestionAvoidanceAtEndOfRecovery) {
501 sender_->SetNumEmulatedConnections(1);
502 // Ack 10 packets in 5 acks to raise the CWND to 20.
503 const int kNumberOfAcks = 5;
504 for (int i = 0; i < kNumberOfAcks; ++i) {
505 // Send our full send window.
506 SendAvailableSendWindow();
507 AckNPackets(2);
509 SendAvailableSendWindow();
510 QuicByteCount expected_send_window =
511 kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
512 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
514 LoseNPackets(1);
516 // We should now have fallen out of slow start with a reduced window.
517 expected_send_window *= kRenoBeta;
518 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
520 // No congestion window growth should occur in recovery phase, i.e., until the
521 // currently outstanding 20 packets are acked.
522 for (int i = 0; i < 10; ++i) {
523 // Send our full send window.
524 SendAvailableSendWindow();
525 EXPECT_TRUE(sender_->InRecovery());
526 AckNPackets(2);
527 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
529 EXPECT_FALSE(sender_->InRecovery());
531 // Out of recovery now. Congestion window should not grow during RTT.
532 for (uint64 i = 0; i < expected_send_window / kDefaultTCPMSS - 2; i += 2) {
533 // Send our full send window.
534 SendAvailableSendWindow();
535 AckNPackets(2);
536 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
539 // Next ack should cause congestion window to grow by 1MSS.
540 SendAvailableSendWindow();
541 AckNPackets(2);
542 expected_send_window += kDefaultTCPMSS;
543 EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
546 TEST_F(TcpCubicBytesSenderTest, BandwidthResumption) {
547 // Test that when provided with CachedNetworkParameters and opted in to the
548 // bandwidth resumption experiment, that the TcpCubicSender sets initial CWND
549 // appropriately.
551 // Set some common values.
552 CachedNetworkParameters cached_network_params;
553 const QuicPacketCount kNumberOfPackets = 123;
554 const int kBandwidthEstimateBytesPerSecond =
555 kNumberOfPackets * kDefaultTCPMSS;
556 cached_network_params.set_bandwidth_estimate_bytes_per_second(
557 kBandwidthEstimateBytesPerSecond);
558 cached_network_params.set_min_rtt_ms(1000);
560 // Make sure that a bandwidth estimate results in a changed CWND.
561 cached_network_params.set_timestamp(clock_.WallNow().ToUNIXSeconds() -
562 (kNumSecondsPerHour - 1));
563 sender_->ResumeConnectionState(cached_network_params, false);
564 EXPECT_EQ(kNumberOfPackets * kDefaultTCPMSS, sender_->GetCongestionWindow());
566 // Resumed CWND is limited to be in a sensible range.
567 cached_network_params.set_bandwidth_estimate_bytes_per_second(
568 (kMaxCongestionWindow + 1) * kDefaultTCPMSS);
569 sender_->ResumeConnectionState(cached_network_params, false);
570 EXPECT_EQ(kMaxCongestionWindow * kDefaultTCPMSS,
571 sender_->GetCongestionWindow());
573 cached_network_params.set_bandwidth_estimate_bytes_per_second(
574 (kMinCongestionWindowForBandwidthResumption - 1) * kDefaultTCPMSS);
575 sender_->ResumeConnectionState(cached_network_params, false);
576 EXPECT_EQ(kMinCongestionWindowForBandwidthResumption * kDefaultTCPMSS,
577 sender_->GetCongestionWindow());
579 // Resume to the max value.
580 cached_network_params.set_max_bandwidth_estimate_bytes_per_second(
581 (kMinCongestionWindowForBandwidthResumption + 10) * kDefaultTCPMSS);
582 sender_->ResumeConnectionState(cached_network_params, true);
583 EXPECT_EQ((kMinCongestionWindowForBandwidthResumption + 10) * kDefaultTCPMSS,
584 sender_->GetCongestionWindow());
587 TEST_F(TcpCubicBytesSenderTest, PaceBelowCWND) {
588 QuicConfig config;
590 // Verify that kCOPT: kMIN4 forces the min CWND to 1 packet, but allows up
591 // to 4 to be sent.
592 QuicTagVector options;
593 options.push_back(kMIN4);
594 QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
595 sender_->SetFromConfig(config, Perspective::IS_SERVER);
596 sender_->OnRetransmissionTimeout(true);
597 EXPECT_EQ(kDefaultTCPMSS, sender_->GetCongestionWindow());
598 EXPECT_TRUE(sender_->TimeUntilSend(QuicTime::Zero(), kDefaultTCPMSS,
599 HAS_RETRANSMITTABLE_DATA).IsZero());
600 EXPECT_TRUE(sender_->TimeUntilSend(QuicTime::Zero(), 2 * kDefaultTCPMSS,
601 HAS_RETRANSMITTABLE_DATA).IsZero());
602 EXPECT_TRUE(sender_->TimeUntilSend(QuicTime::Zero(), 3 * kDefaultTCPMSS,
603 HAS_RETRANSMITTABLE_DATA).IsZero());
604 EXPECT_FALSE(sender_->TimeUntilSend(QuicTime::Zero(), 4 * kDefaultTCPMSS,
605 HAS_RETRANSMITTABLE_DATA).IsZero());
608 } // namespace test
609 } // namespace net