Rewrite AndroidSyncSettings to be significantly simpler.
[chromium-blink-merge.git] / net / quic / reliable_quic_stream.cc
blob8f34cf5e796a3bac75481456dcdfbfcdfc84f00b
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/quic/reliable_quic_stream.h"
7 #include "base/logging.h"
8 #include "base/profiler/scoped_tracker.h"
9 #include "net/quic/iovector.h"
10 #include "net/quic/quic_flow_controller.h"
11 #include "net/quic/quic_session.h"
12 #include "net/quic/quic_write_blocked_list.h"
14 using base::StringPiece;
15 using std::min;
16 using std::string;
18 namespace net {
20 #define ENDPOINT (is_server_ ? "Server: " : " Client: ")
22 namespace {
24 struct iovec MakeIovec(StringPiece data) {
25 struct iovec iov = {const_cast<char*>(data.data()),
26 static_cast<size_t>(data.size())};
27 return iov;
30 size_t GetInitialStreamFlowControlWindowToSend(QuicSession* session) {
31 return session->config()->GetInitialStreamFlowControlWindowToSend();
34 size_t GetReceivedFlowControlWindow(QuicSession* session) {
35 if (session->config()->HasReceivedInitialStreamFlowControlWindowBytes()) {
36 return session->config()->ReceivedInitialStreamFlowControlWindowBytes();
39 return kMinimumFlowControlSendWindow;
42 } // namespace
44 // Wrapper that aggregates OnAckNotifications for packets sent using
45 // WriteOrBufferData and delivers them to the original
46 // QuicAckNotifier::DelegateInterface after all bytes written using
47 // WriteOrBufferData are acked. This level of indirection is
48 // necessary because the delegate interface provides no mechanism that
49 // WriteOrBufferData can use to inform it that the write required
50 // multiple WritevData calls or that only part of the data has been
51 // sent out by the time ACKs start arriving.
52 class ReliableQuicStream::ProxyAckNotifierDelegate
53 : public QuicAckNotifier::DelegateInterface {
54 public:
55 explicit ProxyAckNotifierDelegate(DelegateInterface* delegate)
56 : delegate_(delegate),
57 pending_acks_(0),
58 wrote_last_data_(false),
59 num_retransmitted_packets_(0),
60 num_retransmitted_bytes_(0) {
63 void OnAckNotification(int num_retransmitted_packets,
64 int num_retransmitted_bytes,
65 QuicTime::Delta delta_largest_observed) override {
66 DCHECK_LT(0, pending_acks_);
67 --pending_acks_;
68 num_retransmitted_packets_ += num_retransmitted_packets;
69 num_retransmitted_bytes_ += num_retransmitted_bytes;
71 if (wrote_last_data_ && pending_acks_ == 0) {
72 delegate_->OnAckNotification(num_retransmitted_packets_,
73 num_retransmitted_bytes_,
74 delta_largest_observed);
78 void WroteData(bool last_data) {
79 DCHECK(!wrote_last_data_);
80 ++pending_acks_;
81 wrote_last_data_ = last_data;
84 protected:
85 // Delegates are ref counted.
86 ~ProxyAckNotifierDelegate() override {}
88 private:
89 // Original delegate. delegate_->OnAckNotification will be called when:
90 // wrote_last_data_ == true and pending_acks_ == 0
91 scoped_refptr<DelegateInterface> delegate_;
93 // Number of outstanding acks.
94 int pending_acks_;
96 // True if no pending writes remain.
97 bool wrote_last_data_;
99 // Accumulators.
100 int num_original_packets_;
101 int num_original_bytes_;
102 int num_retransmitted_packets_;
103 int num_retransmitted_bytes_;
105 DISALLOW_COPY_AND_ASSIGN(ProxyAckNotifierDelegate);
108 ReliableQuicStream::PendingData::PendingData(
109 string data_in, scoped_refptr<ProxyAckNotifierDelegate> delegate_in)
110 : data(data_in), delegate(delegate_in) {
113 ReliableQuicStream::PendingData::~PendingData() {
116 ReliableQuicStream::ReliableQuicStream(QuicStreamId id, QuicSession* session)
117 : sequencer_(this),
118 id_(id),
119 session_(session),
120 stream_bytes_read_(0),
121 stream_bytes_written_(0),
122 stream_error_(QUIC_STREAM_NO_ERROR),
123 connection_error_(QUIC_NO_ERROR),
124 read_side_closed_(false),
125 write_side_closed_(false),
126 fin_buffered_(false),
127 fin_sent_(false),
128 fin_received_(false),
129 rst_sent_(false),
130 rst_received_(false),
131 fec_policy_(FEC_PROTECT_OPTIONAL),
132 is_server_(session_->is_server()),
133 flow_controller_(
134 session_->connection(), id_, is_server_,
135 GetReceivedFlowControlWindow(session),
136 GetInitialStreamFlowControlWindowToSend(session),
137 GetInitialStreamFlowControlWindowToSend(session)),
138 connection_flow_controller_(session_->flow_controller()),
139 stream_contributes_to_connection_flow_control_(true) {
142 ReliableQuicStream::~ReliableQuicStream() {
145 void ReliableQuicStream::OnStreamFrame(const QuicStreamFrame& frame) {
146 if (read_side_closed_) {
147 DVLOG(1) << ENDPOINT << "Ignoring frame " << frame.stream_id;
148 // We don't want to be reading: blackhole the data.
149 return;
152 if (frame.stream_id != id_) {
153 session_->connection()->SendConnectionClose(QUIC_INTERNAL_ERROR);
154 return;
157 if (frame.fin) {
158 fin_received_ = true;
161 // This count include duplicate data received.
162 size_t frame_payload_size = frame.data.TotalBufferSize();
163 stream_bytes_read_ += frame_payload_size;
165 // Flow control is interested in tracking highest received offset.
166 if (MaybeIncreaseHighestReceivedOffset(frame.offset + frame_payload_size)) {
167 // As the highest received offset has changed, we should check to see if
168 // this is a violation of flow control.
169 if (flow_controller_.FlowControlViolation() ||
170 connection_flow_controller_->FlowControlViolation()) {
171 session_->connection()->SendConnectionClose(
172 QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA);
173 return;
177 sequencer_.OnStreamFrame(frame);
180 int ReliableQuicStream::num_frames_received() const {
181 return sequencer_.num_frames_received();
184 int ReliableQuicStream::num_early_frames_received() const {
185 return sequencer_.num_early_frames_received();
188 int ReliableQuicStream::num_duplicate_frames_received() const {
189 return sequencer_.num_duplicate_frames_received();
192 void ReliableQuicStream::OnStreamReset(const QuicRstStreamFrame& frame) {
193 rst_received_ = true;
194 MaybeIncreaseHighestReceivedOffset(frame.byte_offset);
196 stream_error_ = frame.error_code;
197 CloseWriteSide();
198 CloseReadSide();
201 void ReliableQuicStream::OnConnectionClosed(QuicErrorCode error,
202 bool from_peer) {
203 if (read_side_closed_ && write_side_closed_) {
204 return;
206 if (error != QUIC_NO_ERROR) {
207 stream_error_ = QUIC_STREAM_CONNECTION_ERROR;
208 connection_error_ = error;
211 CloseWriteSide();
212 CloseReadSide();
215 void ReliableQuicStream::OnFinRead() {
216 DCHECK(sequencer_.IsClosed());
217 fin_received_ = true;
218 CloseReadSide();
221 void ReliableQuicStream::Reset(QuicRstStreamErrorCode error) {
222 DCHECK_NE(QUIC_STREAM_NO_ERROR, error);
223 stream_error_ = error;
224 // Sending a RstStream results in calling CloseStream.
225 session()->SendRstStream(id(), error, stream_bytes_written_);
226 rst_sent_ = true;
229 void ReliableQuicStream::CloseConnection(QuicErrorCode error) {
230 // TODO(vadimt): Remove ScopedTracker below once crbug.com/422516 is fixed.
231 tracked_objects::ScopedTracker tracking_profile(
232 FROM_HERE_WITH_EXPLICIT_FUNCTION(
233 "422516 ReliableQuicStream::CloseConnection"));
235 session()->connection()->SendConnectionClose(error);
238 void ReliableQuicStream::CloseConnectionWithDetails(QuicErrorCode error,
239 const string& details) {
240 session()->connection()->SendConnectionCloseWithDetails(error, details);
243 QuicVersion ReliableQuicStream::version() const {
244 return session()->connection()->version();
247 void ReliableQuicStream::WriteOrBufferData(
248 StringPiece data,
249 bool fin,
250 QuicAckNotifier::DelegateInterface* ack_notifier_delegate) {
251 if (data.empty() && !fin) {
252 LOG(DFATAL) << "data.empty() && !fin";
253 return;
256 if (fin_buffered_) {
257 LOG(DFATAL) << "Fin already buffered";
258 return;
261 scoped_refptr<ProxyAckNotifierDelegate> proxy_delegate;
262 if (ack_notifier_delegate != nullptr) {
263 proxy_delegate = new ProxyAckNotifierDelegate(ack_notifier_delegate);
266 QuicConsumedData consumed_data(0, false);
267 fin_buffered_ = fin;
269 if (queued_data_.empty()) {
270 struct iovec iov(MakeIovec(data));
271 consumed_data = WritevData(&iov, 1, fin, proxy_delegate.get());
272 DCHECK_LE(consumed_data.bytes_consumed, data.length());
275 bool write_completed;
276 // If there's unconsumed data or an unconsumed fin, queue it.
277 if (consumed_data.bytes_consumed < data.length() ||
278 (fin && !consumed_data.fin_consumed)) {
279 StringPiece remainder(data.substr(consumed_data.bytes_consumed));
280 queued_data_.push_back(PendingData(remainder.as_string(), proxy_delegate));
281 write_completed = false;
282 } else {
283 write_completed = true;
286 if ((proxy_delegate.get() != nullptr) &&
287 (consumed_data.bytes_consumed > 0 || consumed_data.fin_consumed)) {
288 proxy_delegate->WroteData(write_completed);
292 void ReliableQuicStream::OnCanWrite() {
293 bool fin = false;
294 while (!queued_data_.empty()) {
295 PendingData* pending_data = &queued_data_.front();
296 ProxyAckNotifierDelegate* delegate = pending_data->delegate.get();
297 if (queued_data_.size() == 1 && fin_buffered_) {
298 fin = true;
300 struct iovec iov(MakeIovec(pending_data->data));
301 QuicConsumedData consumed_data = WritevData(&iov, 1, fin, delegate);
302 if (consumed_data.bytes_consumed == pending_data->data.size() &&
303 fin == consumed_data.fin_consumed) {
304 queued_data_.pop_front();
305 if (delegate != nullptr) {
306 delegate->WroteData(true);
308 } else {
309 if (consumed_data.bytes_consumed > 0) {
310 pending_data->data.erase(0, consumed_data.bytes_consumed);
311 if (delegate != nullptr) {
312 delegate->WroteData(false);
315 break;
320 void ReliableQuicStream::MaybeSendBlocked() {
321 flow_controller_.MaybeSendBlocked();
322 if (!stream_contributes_to_connection_flow_control_) {
323 return;
325 connection_flow_controller_->MaybeSendBlocked();
326 // If we are connection level flow control blocked, then add the stream
327 // to the write blocked list. It will be given a chance to write when a
328 // connection level WINDOW_UPDATE arrives.
329 if (connection_flow_controller_->IsBlocked() &&
330 !flow_controller_.IsBlocked()) {
331 session_->MarkWriteBlocked(id(), EffectivePriority());
335 QuicConsumedData ReliableQuicStream::WritevData(
336 const struct iovec* iov,
337 int iov_count,
338 bool fin,
339 QuicAckNotifier::DelegateInterface* ack_notifier_delegate) {
340 if (write_side_closed_) {
341 DLOG(ERROR) << ENDPOINT << "Attempt to write when the write side is closed";
342 return QuicConsumedData(0, false);
345 // How much data we want to write.
346 size_t write_length = TotalIovecLength(iov, iov_count);
348 // A FIN with zero data payload should not be flow control blocked.
349 bool fin_with_zero_data = (fin && write_length == 0);
351 if (flow_controller_.IsEnabled()) {
352 // How much data we are allowed to write from flow control.
353 QuicByteCount send_window = flow_controller_.SendWindowSize();
354 if (stream_contributes_to_connection_flow_control_) {
355 send_window =
356 min(send_window, connection_flow_controller_->SendWindowSize());
359 if (send_window == 0 && !fin_with_zero_data) {
360 // Quick return if we can't send anything.
361 MaybeSendBlocked();
362 return QuicConsumedData(0, false);
365 if (write_length > send_window) {
366 // Don't send the FIN if we aren't going to send all the data.
367 fin = false;
369 // Writing more data would be a violation of flow control.
370 write_length = static_cast<size_t>(send_window);
374 // Fill an IOVector with bytes from the iovec.
375 IOVector data;
376 data.AppendIovecAtMostBytes(iov, iov_count, write_length);
378 QuicConsumedData consumed_data = session()->WritevData(
379 id(), data, stream_bytes_written_, fin, GetFecProtection(),
380 ack_notifier_delegate);
381 stream_bytes_written_ += consumed_data.bytes_consumed;
383 AddBytesSent(consumed_data.bytes_consumed);
385 if (consumed_data.bytes_consumed == write_length) {
386 if (!fin_with_zero_data) {
387 MaybeSendBlocked();
389 if (fin && consumed_data.fin_consumed) {
390 fin_sent_ = true;
391 CloseWriteSide();
392 } else if (fin && !consumed_data.fin_consumed) {
393 session_->MarkWriteBlocked(id(), EffectivePriority());
395 } else {
396 session_->MarkWriteBlocked(id(), EffectivePriority());
398 return consumed_data;
401 FecProtection ReliableQuicStream::GetFecProtection() {
402 return fec_policy_ == FEC_PROTECT_ALWAYS ? MUST_FEC_PROTECT : MAY_FEC_PROTECT;
405 void ReliableQuicStream::CloseReadSide() {
406 if (read_side_closed_) {
407 return;
409 DVLOG(1) << ENDPOINT << "Done reading from stream " << id();
411 read_side_closed_ = true;
412 if (write_side_closed_) {
413 DVLOG(1) << ENDPOINT << "Closing stream: " << id();
414 session_->CloseStream(id());
418 void ReliableQuicStream::CloseWriteSide() {
419 if (write_side_closed_) {
420 return;
422 DVLOG(1) << ENDPOINT << "Done writing to stream " << id();
424 write_side_closed_ = true;
425 if (read_side_closed_) {
426 DVLOG(1) << ENDPOINT << "Closing stream: " << id();
427 session_->CloseStream(id());
431 bool ReliableQuicStream::HasBufferedData() const {
432 return !queued_data_.empty();
435 void ReliableQuicStream::OnClose() {
436 CloseReadSide();
437 CloseWriteSide();
439 if (!fin_sent_ && !rst_sent_) {
440 // For flow control accounting, we must tell the peer how many bytes we have
441 // written on this stream before termination. Done here if needed, using a
442 // RST frame.
443 DVLOG(1) << ENDPOINT << "Sending RST in OnClose: " << id();
444 session_->SendRstStream(id(), QUIC_RST_ACKNOWLEDGEMENT,
445 stream_bytes_written_);
446 rst_sent_ = true;
449 // We are closing the stream and will not process any further incoming bytes.
450 // As there may be more bytes in flight and we need to ensure that both
451 // endpoints have the same connection level flow control state, mark all
452 // unreceived or buffered bytes as consumed.
453 QuicByteCount bytes_to_consume =
454 flow_controller_.highest_received_byte_offset() -
455 flow_controller_.bytes_consumed();
456 AddBytesConsumed(bytes_to_consume);
459 void ReliableQuicStream::OnWindowUpdateFrame(
460 const QuicWindowUpdateFrame& frame) {
461 if (!flow_controller_.IsEnabled()) {
462 DLOG(DFATAL) << "Flow control not enabled! " << version();
463 return;
465 if (flow_controller_.UpdateSendWindowOffset(frame.byte_offset)) {
466 // We can write again!
467 // TODO(rjshade): This does not respect priorities (e.g. multiple
468 // outstanding POSTs are unblocked on arrival of
469 // SHLO with initial window).
470 // As long as the connection is not flow control blocked, we can write!
471 OnCanWrite();
475 bool ReliableQuicStream::MaybeIncreaseHighestReceivedOffset(
476 QuicStreamOffset new_offset) {
477 if (!flow_controller_.IsEnabled()) {
478 return false;
480 uint64 increment =
481 new_offset - flow_controller_.highest_received_byte_offset();
482 if (!flow_controller_.UpdateHighestReceivedOffset(new_offset)) {
483 return false;
486 // If |new_offset| increased the stream flow controller's highest received
487 // offset, then we need to increase the connection flow controller's value
488 // by the incremental difference.
489 if (stream_contributes_to_connection_flow_control_) {
490 connection_flow_controller_->UpdateHighestReceivedOffset(
491 connection_flow_controller_->highest_received_byte_offset() +
492 increment);
494 return true;
497 void ReliableQuicStream::AddBytesSent(QuicByteCount bytes) {
498 if (flow_controller_.IsEnabled()) {
499 flow_controller_.AddBytesSent(bytes);
500 if (stream_contributes_to_connection_flow_control_) {
501 connection_flow_controller_->AddBytesSent(bytes);
506 void ReliableQuicStream::AddBytesConsumed(QuicByteCount bytes) {
507 if (flow_controller_.IsEnabled()) {
508 // Only adjust stream level flow controller if we are still reading.
509 if (!read_side_closed_) {
510 flow_controller_.AddBytesConsumed(bytes);
513 if (stream_contributes_to_connection_flow_control_) {
514 connection_flow_controller_->AddBytesConsumed(bytes);
519 void ReliableQuicStream::UpdateSendWindowOffset(QuicStreamOffset new_window) {
520 if (flow_controller_.UpdateSendWindowOffset(new_window)) {
521 OnCanWrite();
525 bool ReliableQuicStream::IsFlowControlBlocked() {
526 if (flow_controller_.IsBlocked()) {
527 return true;
529 return stream_contributes_to_connection_flow_control_ &&
530 connection_flow_controller_->IsBlocked();
533 } // namespace net