1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 * Copyright (c) 2001-2003 Intel Corp.
8 * This file is part of the SCTP kernel implementation
10 * These functions implement the sctp_outq class. The outqueue handles
11 * bundling and queueing of outgoing SCTP chunks.
13 * Please send any bug reports or fixes you make to the
15 * lksctp developers <linux-sctp@vger.kernel.org>
17 * Written or modified by:
18 * La Monte H.P. Yarroll <piggy@acm.org>
19 * Karl Knutson <karl@athena.chicago.il.us>
20 * Perry Melange <pmelange@null.cc.uic.edu>
21 * Xingang Guo <xingang.guo@intel.com>
22 * Hui Huang <hui.huang@nokia.com>
23 * Sridhar Samudrala <sri@us.ibm.com>
24 * Jon Grimm <jgrimm@us.ibm.com>
27 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 #include <linux/types.h>
30 #include <linux/list.h> /* For struct list_head */
31 #include <linux/socket.h>
33 #include <linux/slab.h>
34 #include <net/sock.h> /* For skb_set_owner_w */
36 #include <net/sctp/sctp.h>
37 #include <net/sctp/sm.h>
38 #include <net/sctp/stream_sched.h>
40 /* Declare internal functions here. */
41 static int sctp_acked(struct sctp_sackhdr
*sack
, __u32 tsn
);
42 static void sctp_check_transmitted(struct sctp_outq
*q
,
43 struct list_head
*transmitted_queue
,
44 struct sctp_transport
*transport
,
45 union sctp_addr
*saddr
,
46 struct sctp_sackhdr
*sack
,
47 __u32
*highest_new_tsn
);
49 static void sctp_mark_missing(struct sctp_outq
*q
,
50 struct list_head
*transmitted_queue
,
51 struct sctp_transport
*transport
,
52 __u32 highest_new_tsn
,
53 int count_of_newacks
);
55 static void sctp_outq_flush(struct sctp_outq
*q
, int rtx_timeout
, gfp_t gfp
);
57 /* Add data to the front of the queue. */
58 static inline void sctp_outq_head_data(struct sctp_outq
*q
,
59 struct sctp_chunk
*ch
)
61 struct sctp_stream_out_ext
*oute
;
64 list_add(&ch
->list
, &q
->out_chunk_list
);
65 q
->out_qlen
+= ch
->skb
->len
;
67 stream
= sctp_chunk_stream_no(ch
);
68 oute
= SCTP_SO(&q
->asoc
->stream
, stream
)->ext
;
69 list_add(&ch
->stream_list
, &oute
->outq
);
72 /* Take data from the front of the queue. */
73 static inline struct sctp_chunk
*sctp_outq_dequeue_data(struct sctp_outq
*q
)
75 return q
->sched
->dequeue(q
);
78 /* Add data chunk to the end of the queue. */
79 static inline void sctp_outq_tail_data(struct sctp_outq
*q
,
80 struct sctp_chunk
*ch
)
82 struct sctp_stream_out_ext
*oute
;
85 list_add_tail(&ch
->list
, &q
->out_chunk_list
);
86 q
->out_qlen
+= ch
->skb
->len
;
88 stream
= sctp_chunk_stream_no(ch
);
89 oute
= SCTP_SO(&q
->asoc
->stream
, stream
)->ext
;
90 list_add_tail(&ch
->stream_list
, &oute
->outq
);
95 * D) If count_of_newacks is greater than or equal to 2
96 * and t was not sent to the current primary then the
97 * sender MUST NOT increment missing report count for t.
99 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport
*primary
,
100 struct sctp_transport
*transport
,
101 int count_of_newacks
)
103 if (count_of_newacks
>= 2 && transport
!= primary
)
109 * SFR-CACC algorithm:
110 * F) If count_of_newacks is less than 2, let d be the
111 * destination to which t was sent. If cacc_saw_newack
112 * is 0 for destination d, then the sender MUST NOT
113 * increment missing report count for t.
115 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport
*transport
,
116 int count_of_newacks
)
118 if (count_of_newacks
< 2 &&
119 (transport
&& !transport
->cacc
.cacc_saw_newack
))
125 * SFR-CACC algorithm:
126 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
127 * execute steps C, D, F.
129 * C has been implemented in sctp_outq_sack
131 static inline int sctp_cacc_skip_3_1(struct sctp_transport
*primary
,
132 struct sctp_transport
*transport
,
133 int count_of_newacks
)
135 if (!primary
->cacc
.cycling_changeover
) {
136 if (sctp_cacc_skip_3_1_d(primary
, transport
, count_of_newacks
))
138 if (sctp_cacc_skip_3_1_f(transport
, count_of_newacks
))
146 * SFR-CACC algorithm:
147 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
148 * than next_tsn_at_change of the current primary, then
149 * the sender MUST NOT increment missing report count
152 static inline int sctp_cacc_skip_3_2(struct sctp_transport
*primary
, __u32 tsn
)
154 if (primary
->cacc
.cycling_changeover
&&
155 TSN_lt(tsn
, primary
->cacc
.next_tsn_at_change
))
161 * SFR-CACC algorithm:
162 * 3) If the missing report count for TSN t is to be
163 * incremented according to [RFC2960] and
164 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
165 * then the sender MUST further execute steps 3.1 and
166 * 3.2 to determine if the missing report count for
167 * TSN t SHOULD NOT be incremented.
169 * 3.3) If 3.1 and 3.2 do not dictate that the missing
170 * report count for t should not be incremented, then
171 * the sender SHOULD increment missing report count for
172 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
174 static inline int sctp_cacc_skip(struct sctp_transport
*primary
,
175 struct sctp_transport
*transport
,
176 int count_of_newacks
,
179 if (primary
->cacc
.changeover_active
&&
180 (sctp_cacc_skip_3_1(primary
, transport
, count_of_newacks
) ||
181 sctp_cacc_skip_3_2(primary
, tsn
)))
186 /* Initialize an existing sctp_outq. This does the boring stuff.
187 * You still need to define handlers if you really want to DO
188 * something with this structure...
190 void sctp_outq_init(struct sctp_association
*asoc
, struct sctp_outq
*q
)
192 memset(q
, 0, sizeof(struct sctp_outq
));
195 INIT_LIST_HEAD(&q
->out_chunk_list
);
196 INIT_LIST_HEAD(&q
->control_chunk_list
);
197 INIT_LIST_HEAD(&q
->retransmit
);
198 INIT_LIST_HEAD(&q
->sacked
);
199 INIT_LIST_HEAD(&q
->abandoned
);
200 sctp_sched_set_sched(asoc
, sctp_sk(asoc
->base
.sk
)->default_ss
);
203 /* Free the outqueue structure and any related pending chunks.
205 static void __sctp_outq_teardown(struct sctp_outq
*q
)
207 struct sctp_transport
*transport
;
208 struct list_head
*lchunk
, *temp
;
209 struct sctp_chunk
*chunk
, *tmp
;
211 /* Throw away unacknowledged chunks. */
212 list_for_each_entry(transport
, &q
->asoc
->peer
.transport_addr_list
,
214 while ((lchunk
= sctp_list_dequeue(&transport
->transmitted
)) != NULL
) {
215 chunk
= list_entry(lchunk
, struct sctp_chunk
,
217 /* Mark as part of a failed message. */
218 sctp_chunk_fail(chunk
, q
->error
);
219 sctp_chunk_free(chunk
);
223 /* Throw away chunks that have been gap ACKed. */
224 list_for_each_safe(lchunk
, temp
, &q
->sacked
) {
225 list_del_init(lchunk
);
226 chunk
= list_entry(lchunk
, struct sctp_chunk
,
228 sctp_chunk_fail(chunk
, q
->error
);
229 sctp_chunk_free(chunk
);
232 /* Throw away any chunks in the retransmit queue. */
233 list_for_each_safe(lchunk
, temp
, &q
->retransmit
) {
234 list_del_init(lchunk
);
235 chunk
= list_entry(lchunk
, struct sctp_chunk
,
237 sctp_chunk_fail(chunk
, q
->error
);
238 sctp_chunk_free(chunk
);
241 /* Throw away any chunks that are in the abandoned queue. */
242 list_for_each_safe(lchunk
, temp
, &q
->abandoned
) {
243 list_del_init(lchunk
);
244 chunk
= list_entry(lchunk
, struct sctp_chunk
,
246 sctp_chunk_fail(chunk
, q
->error
);
247 sctp_chunk_free(chunk
);
250 /* Throw away any leftover data chunks. */
251 while ((chunk
= sctp_outq_dequeue_data(q
)) != NULL
) {
252 sctp_sched_dequeue_done(q
, chunk
);
254 /* Mark as send failure. */
255 sctp_chunk_fail(chunk
, q
->error
);
256 sctp_chunk_free(chunk
);
259 /* Throw away any leftover control chunks. */
260 list_for_each_entry_safe(chunk
, tmp
, &q
->control_chunk_list
, list
) {
261 list_del_init(&chunk
->list
);
262 sctp_chunk_free(chunk
);
266 void sctp_outq_teardown(struct sctp_outq
*q
)
268 __sctp_outq_teardown(q
);
269 sctp_outq_init(q
->asoc
, q
);
272 /* Free the outqueue structure and any related pending chunks. */
273 void sctp_outq_free(struct sctp_outq
*q
)
275 /* Throw away leftover chunks. */
276 __sctp_outq_teardown(q
);
279 /* Put a new chunk in an sctp_outq. */
280 void sctp_outq_tail(struct sctp_outq
*q
, struct sctp_chunk
*chunk
, gfp_t gfp
)
282 struct net
*net
= sock_net(q
->asoc
->base
.sk
);
284 pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__
, q
, chunk
,
285 chunk
&& chunk
->chunk_hdr
?
286 sctp_cname(SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
)) :
289 /* If it is data, queue it up, otherwise, send it
292 if (sctp_chunk_is_data(chunk
)) {
293 pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
294 __func__
, q
, chunk
, chunk
&& chunk
->chunk_hdr
?
295 sctp_cname(SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
)) :
298 sctp_outq_tail_data(q
, chunk
);
299 if (chunk
->asoc
->peer
.prsctp_capable
&&
300 SCTP_PR_PRIO_ENABLED(chunk
->sinfo
.sinfo_flags
))
301 chunk
->asoc
->sent_cnt_removable
++;
302 if (chunk
->chunk_hdr
->flags
& SCTP_DATA_UNORDERED
)
303 SCTP_INC_STATS(net
, SCTP_MIB_OUTUNORDERCHUNKS
);
305 SCTP_INC_STATS(net
, SCTP_MIB_OUTORDERCHUNKS
);
307 list_add_tail(&chunk
->list
, &q
->control_chunk_list
);
308 SCTP_INC_STATS(net
, SCTP_MIB_OUTCTRLCHUNKS
);
312 sctp_outq_flush(q
, 0, gfp
);
315 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list
316 * and the abandoned list are in ascending order.
318 static void sctp_insert_list(struct list_head
*head
, struct list_head
*new)
320 struct list_head
*pos
;
321 struct sctp_chunk
*nchunk
, *lchunk
;
325 nchunk
= list_entry(new, struct sctp_chunk
, transmitted_list
);
326 ntsn
= ntohl(nchunk
->subh
.data_hdr
->tsn
);
328 list_for_each(pos
, head
) {
329 lchunk
= list_entry(pos
, struct sctp_chunk
, transmitted_list
);
330 ltsn
= ntohl(lchunk
->subh
.data_hdr
->tsn
);
331 if (TSN_lt(ntsn
, ltsn
)) {
332 list_add(new, pos
->prev
);
338 list_add_tail(new, head
);
341 static int sctp_prsctp_prune_sent(struct sctp_association
*asoc
,
342 struct sctp_sndrcvinfo
*sinfo
,
343 struct list_head
*queue
, int msg_len
)
345 struct sctp_chunk
*chk
, *temp
;
347 list_for_each_entry_safe(chk
, temp
, queue
, transmitted_list
) {
348 struct sctp_stream_out
*streamout
;
350 if (!chk
->msg
->abandoned
&&
351 (!SCTP_PR_PRIO_ENABLED(chk
->sinfo
.sinfo_flags
) ||
352 chk
->sinfo
.sinfo_timetolive
<= sinfo
->sinfo_timetolive
))
355 chk
->msg
->abandoned
= 1;
356 list_del_init(&chk
->transmitted_list
);
357 sctp_insert_list(&asoc
->outqueue
.abandoned
,
358 &chk
->transmitted_list
);
360 streamout
= SCTP_SO(&asoc
->stream
, chk
->sinfo
.sinfo_stream
);
361 asoc
->sent_cnt_removable
--;
362 asoc
->abandoned_sent
[SCTP_PR_INDEX(PRIO
)]++;
363 streamout
->ext
->abandoned_sent
[SCTP_PR_INDEX(PRIO
)]++;
365 if (queue
!= &asoc
->outqueue
.retransmit
&&
366 !chk
->tsn_gap_acked
) {
368 chk
->transport
->flight_size
-=
370 asoc
->outqueue
.outstanding_bytes
-= sctp_data_size(chk
);
373 msg_len
-= chk
->skb
->truesize
+ sizeof(struct sctp_chunk
);
381 static int sctp_prsctp_prune_unsent(struct sctp_association
*asoc
,
382 struct sctp_sndrcvinfo
*sinfo
, int msg_len
)
384 struct sctp_outq
*q
= &asoc
->outqueue
;
385 struct sctp_chunk
*chk
, *temp
;
387 q
->sched
->unsched_all(&asoc
->stream
);
389 list_for_each_entry_safe(chk
, temp
, &q
->out_chunk_list
, list
) {
390 if (!chk
->msg
->abandoned
&&
391 (!(chk
->chunk_hdr
->flags
& SCTP_DATA_FIRST_FRAG
) ||
392 !SCTP_PR_PRIO_ENABLED(chk
->sinfo
.sinfo_flags
) ||
393 chk
->sinfo
.sinfo_timetolive
<= sinfo
->sinfo_timetolive
))
396 chk
->msg
->abandoned
= 1;
397 sctp_sched_dequeue_common(q
, chk
);
398 asoc
->sent_cnt_removable
--;
399 asoc
->abandoned_unsent
[SCTP_PR_INDEX(PRIO
)]++;
400 if (chk
->sinfo
.sinfo_stream
< asoc
->stream
.outcnt
) {
401 struct sctp_stream_out
*streamout
=
402 SCTP_SO(&asoc
->stream
, chk
->sinfo
.sinfo_stream
);
404 streamout
->ext
->abandoned_unsent
[SCTP_PR_INDEX(PRIO
)]++;
407 msg_len
-= chk
->skb
->truesize
+ sizeof(struct sctp_chunk
);
408 sctp_chunk_free(chk
);
413 q
->sched
->sched_all(&asoc
->stream
);
418 /* Abandon the chunks according their priorities */
419 void sctp_prsctp_prune(struct sctp_association
*asoc
,
420 struct sctp_sndrcvinfo
*sinfo
, int msg_len
)
422 struct sctp_transport
*transport
;
424 if (!asoc
->peer
.prsctp_capable
|| !asoc
->sent_cnt_removable
)
427 msg_len
= sctp_prsctp_prune_sent(asoc
, sinfo
,
428 &asoc
->outqueue
.retransmit
,
433 list_for_each_entry(transport
, &asoc
->peer
.transport_addr_list
,
435 msg_len
= sctp_prsctp_prune_sent(asoc
, sinfo
,
436 &transport
->transmitted
,
442 sctp_prsctp_prune_unsent(asoc
, sinfo
, msg_len
);
445 /* Mark all the eligible packets on a transport for retransmission. */
446 void sctp_retransmit_mark(struct sctp_outq
*q
,
447 struct sctp_transport
*transport
,
450 struct list_head
*lchunk
, *ltemp
;
451 struct sctp_chunk
*chunk
;
453 /* Walk through the specified transmitted queue. */
454 list_for_each_safe(lchunk
, ltemp
, &transport
->transmitted
) {
455 chunk
= list_entry(lchunk
, struct sctp_chunk
,
458 /* If the chunk is abandoned, move it to abandoned list. */
459 if (sctp_chunk_abandoned(chunk
)) {
460 list_del_init(lchunk
);
461 sctp_insert_list(&q
->abandoned
, lchunk
);
463 /* If this chunk has not been previousely acked,
464 * stop considering it 'outstanding'. Our peer
465 * will most likely never see it since it will
466 * not be retransmitted
468 if (!chunk
->tsn_gap_acked
) {
469 if (chunk
->transport
)
470 chunk
->transport
->flight_size
-=
471 sctp_data_size(chunk
);
472 q
->outstanding_bytes
-= sctp_data_size(chunk
);
473 q
->asoc
->peer
.rwnd
+= sctp_data_size(chunk
);
478 /* If we are doing retransmission due to a timeout or pmtu
479 * discovery, only the chunks that are not yet acked should
480 * be added to the retransmit queue.
482 if ((reason
== SCTP_RTXR_FAST_RTX
&&
483 (chunk
->fast_retransmit
== SCTP_NEED_FRTX
)) ||
484 (reason
!= SCTP_RTXR_FAST_RTX
&& !chunk
->tsn_gap_acked
)) {
485 /* RFC 2960 6.2.1 Processing a Received SACK
487 * C) Any time a DATA chunk is marked for
488 * retransmission (via either T3-rtx timer expiration
489 * (Section 6.3.3) or via fast retransmit
490 * (Section 7.2.4)), add the data size of those
491 * chunks to the rwnd.
493 q
->asoc
->peer
.rwnd
+= sctp_data_size(chunk
);
494 q
->outstanding_bytes
-= sctp_data_size(chunk
);
495 if (chunk
->transport
)
496 transport
->flight_size
-= sctp_data_size(chunk
);
498 /* sctpimpguide-05 Section 2.8.2
499 * M5) If a T3-rtx timer expires, the
500 * 'TSN.Missing.Report' of all affected TSNs is set
503 chunk
->tsn_missing_report
= 0;
505 /* If a chunk that is being used for RTT measurement
506 * has to be retransmitted, we cannot use this chunk
507 * anymore for RTT measurements. Reset rto_pending so
508 * that a new RTT measurement is started when a new
509 * data chunk is sent.
511 if (chunk
->rtt_in_progress
) {
512 chunk
->rtt_in_progress
= 0;
513 transport
->rto_pending
= 0;
516 /* Move the chunk to the retransmit queue. The chunks
517 * on the retransmit queue are always kept in order.
519 list_del_init(lchunk
);
520 sctp_insert_list(&q
->retransmit
, lchunk
);
524 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
525 "flight_size:%d, pba:%d\n", __func__
, transport
, reason
,
526 transport
->cwnd
, transport
->ssthresh
, transport
->flight_size
,
527 transport
->partial_bytes_acked
);
530 /* Mark all the eligible packets on a transport for retransmission and force
533 void sctp_retransmit(struct sctp_outq
*q
, struct sctp_transport
*transport
,
534 enum sctp_retransmit_reason reason
)
536 struct net
*net
= sock_net(q
->asoc
->base
.sk
);
539 case SCTP_RTXR_T3_RTX
:
540 SCTP_INC_STATS(net
, SCTP_MIB_T3_RETRANSMITS
);
541 sctp_transport_lower_cwnd(transport
, SCTP_LOWER_CWND_T3_RTX
);
542 /* Update the retran path if the T3-rtx timer has expired for
543 * the current retran path.
545 if (transport
== transport
->asoc
->peer
.retran_path
)
546 sctp_assoc_update_retran_path(transport
->asoc
);
547 transport
->asoc
->rtx_data_chunks
+=
548 transport
->asoc
->unack_data
;
550 case SCTP_RTXR_FAST_RTX
:
551 SCTP_INC_STATS(net
, SCTP_MIB_FAST_RETRANSMITS
);
552 sctp_transport_lower_cwnd(transport
, SCTP_LOWER_CWND_FAST_RTX
);
555 case SCTP_RTXR_PMTUD
:
556 SCTP_INC_STATS(net
, SCTP_MIB_PMTUD_RETRANSMITS
);
558 case SCTP_RTXR_T1_RTX
:
559 SCTP_INC_STATS(net
, SCTP_MIB_T1_RETRANSMITS
);
560 transport
->asoc
->init_retries
++;
566 sctp_retransmit_mark(q
, transport
, reason
);
568 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
569 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
570 * following the procedures outlined in C1 - C5.
572 if (reason
== SCTP_RTXR_T3_RTX
)
573 q
->asoc
->stream
.si
->generate_ftsn(q
, q
->asoc
->ctsn_ack_point
);
575 /* Flush the queues only on timeout, since fast_rtx is only
576 * triggered during sack processing and the queue
577 * will be flushed at the end.
579 if (reason
!= SCTP_RTXR_FAST_RTX
)
580 sctp_outq_flush(q
, /* rtx_timeout */ 1, GFP_ATOMIC
);
584 * Transmit DATA chunks on the retransmit queue. Upon return from
585 * __sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
586 * need to be transmitted by the caller.
587 * We assume that pkt->transport has already been set.
589 * The return value is a normal kernel error return value.
591 static int __sctp_outq_flush_rtx(struct sctp_outq
*q
, struct sctp_packet
*pkt
,
592 int rtx_timeout
, int *start_timer
, gfp_t gfp
)
594 struct sctp_transport
*transport
= pkt
->transport
;
595 struct sctp_chunk
*chunk
, *chunk1
;
596 struct list_head
*lqueue
;
597 enum sctp_xmit status
;
603 lqueue
= &q
->retransmit
;
604 fast_rtx
= q
->fast_rtx
;
606 /* This loop handles time-out retransmissions, fast retransmissions,
607 * and retransmissions due to opening of whindow.
609 * RFC 2960 6.3.3 Handle T3-rtx Expiration
611 * E3) Determine how many of the earliest (i.e., lowest TSN)
612 * outstanding DATA chunks for the address for which the
613 * T3-rtx has expired will fit into a single packet, subject
614 * to the MTU constraint for the path corresponding to the
615 * destination transport address to which the retransmission
616 * is being sent (this may be different from the address for
617 * which the timer expires [see Section 6.4]). Call this value
618 * K. Bundle and retransmit those K DATA chunks in a single
619 * packet to the destination endpoint.
621 * [Just to be painfully clear, if we are retransmitting
622 * because a timeout just happened, we should send only ONE
623 * packet of retransmitted data.]
625 * For fast retransmissions we also send only ONE packet. However,
626 * if we are just flushing the queue due to open window, we'll
627 * try to send as much as possible.
629 list_for_each_entry_safe(chunk
, chunk1
, lqueue
, transmitted_list
) {
630 /* If the chunk is abandoned, move it to abandoned list. */
631 if (sctp_chunk_abandoned(chunk
)) {
632 list_del_init(&chunk
->transmitted_list
);
633 sctp_insert_list(&q
->abandoned
,
634 &chunk
->transmitted_list
);
638 /* Make sure that Gap Acked TSNs are not retransmitted. A
639 * simple approach is just to move such TSNs out of the
640 * way and into a 'transmitted' queue and skip to the
643 if (chunk
->tsn_gap_acked
) {
644 list_move_tail(&chunk
->transmitted_list
,
645 &transport
->transmitted
);
649 /* If we are doing fast retransmit, ignore non-fast_rtransmit
652 if (fast_rtx
&& !chunk
->fast_retransmit
)
656 /* Attempt to append this chunk to the packet. */
657 status
= sctp_packet_append_chunk(pkt
, chunk
);
660 case SCTP_XMIT_PMTU_FULL
:
661 if (!pkt
->has_data
&& !pkt
->has_cookie_echo
) {
662 /* If this packet did not contain DATA then
663 * retransmission did not happen, so do it
664 * again. We'll ignore the error here since
665 * control chunks are already freed so there
666 * is nothing we can do.
668 sctp_packet_transmit(pkt
, gfp
);
672 /* Send this packet. */
673 error
= sctp_packet_transmit(pkt
, gfp
);
675 /* If we are retransmitting, we should only
676 * send a single packet.
677 * Otherwise, try appending this chunk again.
679 if (rtx_timeout
|| fast_rtx
)
684 /* Bundle next chunk in the next round. */
687 case SCTP_XMIT_RWND_FULL
:
688 /* Send this packet. */
689 error
= sctp_packet_transmit(pkt
, gfp
);
691 /* Stop sending DATA as there is no more room
697 case SCTP_XMIT_DELAY
:
698 /* Send this packet. */
699 error
= sctp_packet_transmit(pkt
, gfp
);
701 /* Stop sending DATA because of nagle delay. */
706 /* The append was successful, so add this chunk to
707 * the transmitted list.
709 list_move_tail(&chunk
->transmitted_list
,
710 &transport
->transmitted
);
712 /* Mark the chunk as ineligible for fast retransmit
713 * after it is retransmitted.
715 if (chunk
->fast_retransmit
== SCTP_NEED_FRTX
)
716 chunk
->fast_retransmit
= SCTP_DONT_FRTX
;
718 q
->asoc
->stats
.rtxchunks
++;
722 /* Set the timer if there were no errors */
723 if (!error
&& !timer
)
730 /* If we are here due to a retransmit timeout or a fast
731 * retransmit and if there are any chunks left in the retransmit
732 * queue that could not fit in the PMTU sized packet, they need
733 * to be marked as ineligible for a subsequent fast retransmit.
735 if (rtx_timeout
|| fast_rtx
) {
736 list_for_each_entry(chunk1
, lqueue
, transmitted_list
) {
737 if (chunk1
->fast_retransmit
== SCTP_NEED_FRTX
)
738 chunk1
->fast_retransmit
= SCTP_DONT_FRTX
;
742 *start_timer
= timer
;
744 /* Clear fast retransmit hint */
751 /* Cork the outqueue so queued chunks are really queued. */
752 void sctp_outq_uncork(struct sctp_outq
*q
, gfp_t gfp
)
757 sctp_outq_flush(q
, 0, gfp
);
760 static int sctp_packet_singleton(struct sctp_transport
*transport
,
761 struct sctp_chunk
*chunk
, gfp_t gfp
)
763 const struct sctp_association
*asoc
= transport
->asoc
;
764 const __u16 sport
= asoc
->base
.bind_addr
.port
;
765 const __u16 dport
= asoc
->peer
.port
;
766 const __u32 vtag
= asoc
->peer
.i
.init_tag
;
767 struct sctp_packet singleton
;
769 sctp_packet_init(&singleton
, transport
, sport
, dport
);
770 sctp_packet_config(&singleton
, vtag
, 0);
771 sctp_packet_append_chunk(&singleton
, chunk
);
772 return sctp_packet_transmit(&singleton
, gfp
);
775 /* Struct to hold the context during sctp outq flush */
776 struct sctp_flush_ctx
{
778 /* Current transport being used. It's NOT the same as curr active one */
779 struct sctp_transport
*transport
;
780 /* These transports have chunks to send. */
781 struct list_head transport_list
;
782 struct sctp_association
*asoc
;
783 /* Packet on the current transport above */
784 struct sctp_packet
*packet
;
788 /* transport: current transport */
789 static void sctp_outq_select_transport(struct sctp_flush_ctx
*ctx
,
790 struct sctp_chunk
*chunk
)
792 struct sctp_transport
*new_transport
= chunk
->transport
;
794 if (!new_transport
) {
795 if (!sctp_chunk_is_data(chunk
)) {
796 /* If we have a prior transport pointer, see if
797 * the destination address of the chunk
798 * matches the destination address of the
799 * current transport. If not a match, then
800 * try to look up the transport with a given
801 * destination address. We do this because
802 * after processing ASCONFs, we may have new
803 * transports created.
805 if (ctx
->transport
&& sctp_cmp_addr_exact(&chunk
->dest
,
806 &ctx
->transport
->ipaddr
))
807 new_transport
= ctx
->transport
;
809 new_transport
= sctp_assoc_lookup_paddr(ctx
->asoc
,
813 /* if we still don't have a new transport, then
814 * use the current active path.
817 new_transport
= ctx
->asoc
->peer
.active_path
;
821 switch (new_transport
->state
) {
823 case SCTP_UNCONFIRMED
:
825 /* If the chunk is Heartbeat or Heartbeat Ack,
826 * send it to chunk->transport, even if it's
829 * 3.3.6 Heartbeat Acknowledgement:
831 * A HEARTBEAT ACK is always sent to the source IP
832 * address of the IP datagram containing the
833 * HEARTBEAT chunk to which this ack is responding.
836 * ASCONF_ACKs also must be sent to the source.
838 type
= chunk
->chunk_hdr
->type
;
839 if (type
!= SCTP_CID_HEARTBEAT
&&
840 type
!= SCTP_CID_HEARTBEAT_ACK
&&
841 type
!= SCTP_CID_ASCONF_ACK
)
842 new_transport
= ctx
->asoc
->peer
.active_path
;
849 /* Are we switching transports? Take care of transport locks. */
850 if (new_transport
!= ctx
->transport
) {
851 ctx
->transport
= new_transport
;
852 ctx
->packet
= &ctx
->transport
->packet
;
854 if (list_empty(&ctx
->transport
->send_ready
))
855 list_add_tail(&ctx
->transport
->send_ready
,
856 &ctx
->transport_list
);
858 sctp_packet_config(ctx
->packet
,
859 ctx
->asoc
->peer
.i
.init_tag
,
860 ctx
->asoc
->peer
.ecn_capable
);
861 /* We've switched transports, so apply the
862 * Burst limit to the new transport.
864 sctp_transport_burst_limited(ctx
->transport
);
868 static void sctp_outq_flush_ctrl(struct sctp_flush_ctx
*ctx
)
870 struct sctp_chunk
*chunk
, *tmp
;
871 enum sctp_xmit status
;
872 int one_packet
, error
;
874 list_for_each_entry_safe(chunk
, tmp
, &ctx
->q
->control_chunk_list
, list
) {
878 * F1) This means that until such time as the ASCONF
879 * containing the add is acknowledged, the sender MUST
880 * NOT use the new IP address as a source for ANY SCTP
881 * packet except on carrying an ASCONF Chunk.
883 if (ctx
->asoc
->src_out_of_asoc_ok
&&
884 chunk
->chunk_hdr
->type
!= SCTP_CID_ASCONF
)
887 list_del_init(&chunk
->list
);
889 /* Pick the right transport to use. Should always be true for
890 * the first chunk as we don't have a transport by then.
892 sctp_outq_select_transport(ctx
, chunk
);
894 switch (chunk
->chunk_hdr
->type
) {
897 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
898 * COMPLETE with any other chunks. [Send them immediately.]
901 case SCTP_CID_INIT_ACK
:
902 case SCTP_CID_SHUTDOWN_COMPLETE
:
903 error
= sctp_packet_singleton(ctx
->transport
, chunk
,
906 ctx
->asoc
->base
.sk
->sk_err
= -error
;
912 if (sctp_test_T_bit(chunk
))
913 ctx
->packet
->vtag
= ctx
->asoc
->c
.my_vtag
;
916 /* The following chunks are "response" chunks, i.e.
917 * they are generated in response to something we
918 * received. If we are sending these, then we can
919 * send only 1 packet containing these chunks.
921 case SCTP_CID_HEARTBEAT_ACK
:
922 case SCTP_CID_SHUTDOWN_ACK
:
923 case SCTP_CID_COOKIE_ACK
:
924 case SCTP_CID_COOKIE_ECHO
:
926 case SCTP_CID_ECN_CWR
:
927 case SCTP_CID_ASCONF_ACK
:
932 case SCTP_CID_HEARTBEAT
:
933 case SCTP_CID_SHUTDOWN
:
934 case SCTP_CID_ECN_ECNE
:
935 case SCTP_CID_ASCONF
:
936 case SCTP_CID_FWD_TSN
:
937 case SCTP_CID_I_FWD_TSN
:
938 case SCTP_CID_RECONF
:
939 status
= sctp_packet_transmit_chunk(ctx
->packet
, chunk
,
940 one_packet
, ctx
->gfp
);
941 if (status
!= SCTP_XMIT_OK
) {
942 /* put the chunk back */
943 list_add(&chunk
->list
, &ctx
->q
->control_chunk_list
);
947 ctx
->asoc
->stats
.octrlchunks
++;
948 /* PR-SCTP C5) If a FORWARD TSN is sent, the
949 * sender MUST assure that at least one T3-rtx
952 if (chunk
->chunk_hdr
->type
== SCTP_CID_FWD_TSN
||
953 chunk
->chunk_hdr
->type
== SCTP_CID_I_FWD_TSN
) {
954 sctp_transport_reset_t3_rtx(ctx
->transport
);
955 ctx
->transport
->last_time_sent
= jiffies
;
958 if (chunk
== ctx
->asoc
->strreset_chunk
)
959 sctp_transport_reset_reconf_timer(ctx
->transport
);
964 /* We built a chunk with an illegal type! */
970 /* Returns false if new data shouldn't be sent */
971 static bool sctp_outq_flush_rtx(struct sctp_flush_ctx
*ctx
,
974 int error
, start_timer
= 0;
976 if (ctx
->asoc
->peer
.retran_path
->state
== SCTP_UNCONFIRMED
)
979 if (ctx
->transport
!= ctx
->asoc
->peer
.retran_path
) {
980 /* Switch transports & prepare the packet. */
981 ctx
->transport
= ctx
->asoc
->peer
.retran_path
;
982 ctx
->packet
= &ctx
->transport
->packet
;
984 if (list_empty(&ctx
->transport
->send_ready
))
985 list_add_tail(&ctx
->transport
->send_ready
,
986 &ctx
->transport_list
);
988 sctp_packet_config(ctx
->packet
, ctx
->asoc
->peer
.i
.init_tag
,
989 ctx
->asoc
->peer
.ecn_capable
);
992 error
= __sctp_outq_flush_rtx(ctx
->q
, ctx
->packet
, rtx_timeout
,
993 &start_timer
, ctx
->gfp
);
995 ctx
->asoc
->base
.sk
->sk_err
= -error
;
998 sctp_transport_reset_t3_rtx(ctx
->transport
);
999 ctx
->transport
->last_time_sent
= jiffies
;
1002 /* This can happen on COOKIE-ECHO resend. Only
1003 * one chunk can get bundled with a COOKIE-ECHO.
1005 if (ctx
->packet
->has_cookie_echo
)
1008 /* Don't send new data if there is still data
1009 * waiting to retransmit.
1011 if (!list_empty(&ctx
->q
->retransmit
))
1017 static void sctp_outq_flush_data(struct sctp_flush_ctx
*ctx
,
1020 struct sctp_chunk
*chunk
;
1021 enum sctp_xmit status
;
1023 /* Is it OK to send data chunks? */
1024 switch (ctx
->asoc
->state
) {
1025 case SCTP_STATE_COOKIE_ECHOED
:
1026 /* Only allow bundling when this packet has a COOKIE-ECHO
1029 if (!ctx
->packet
|| !ctx
->packet
->has_cookie_echo
)
1033 case SCTP_STATE_ESTABLISHED
:
1034 case SCTP_STATE_SHUTDOWN_PENDING
:
1035 case SCTP_STATE_SHUTDOWN_RECEIVED
:
1043 /* RFC 2960 6.1 Transmission of DATA Chunks
1045 * C) When the time comes for the sender to transmit,
1046 * before sending new DATA chunks, the sender MUST
1047 * first transmit any outstanding DATA chunks which
1048 * are marked for retransmission (limited by the
1051 if (!list_empty(&ctx
->q
->retransmit
) &&
1052 !sctp_outq_flush_rtx(ctx
, rtx_timeout
))
1055 /* Apply Max.Burst limitation to the current transport in
1056 * case it will be used for new data. We are going to
1057 * rest it before we return, but we want to apply the limit
1058 * to the currently queued data.
1061 sctp_transport_burst_limited(ctx
->transport
);
1063 /* Finally, transmit new packets. */
1064 while ((chunk
= sctp_outq_dequeue_data(ctx
->q
)) != NULL
) {
1065 __u32 sid
= ntohs(chunk
->subh
.data_hdr
->stream
);
1066 __u8 stream_state
= SCTP_SO(&ctx
->asoc
->stream
, sid
)->state
;
1068 /* Has this chunk expired? */
1069 if (sctp_chunk_abandoned(chunk
)) {
1070 sctp_sched_dequeue_done(ctx
->q
, chunk
);
1071 sctp_chunk_fail(chunk
, 0);
1072 sctp_chunk_free(chunk
);
1076 if (stream_state
== SCTP_STREAM_CLOSED
) {
1077 sctp_outq_head_data(ctx
->q
, chunk
);
1081 sctp_outq_select_transport(ctx
, chunk
);
1083 pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p skb->users:%d\n",
1084 __func__
, ctx
->q
, chunk
, chunk
&& chunk
->chunk_hdr
?
1085 sctp_cname(SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
)) :
1086 "illegal chunk", ntohl(chunk
->subh
.data_hdr
->tsn
),
1087 chunk
->skb
? chunk
->skb
->head
: NULL
, chunk
->skb
?
1088 refcount_read(&chunk
->skb
->users
) : -1);
1090 /* Add the chunk to the packet. */
1091 status
= sctp_packet_transmit_chunk(ctx
->packet
, chunk
, 0,
1093 if (status
!= SCTP_XMIT_OK
) {
1094 /* We could not append this chunk, so put
1095 * the chunk back on the output queue.
1097 pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
1098 __func__
, ntohl(chunk
->subh
.data_hdr
->tsn
),
1101 sctp_outq_head_data(ctx
->q
, chunk
);
1105 /* The sender is in the SHUTDOWN-PENDING state,
1106 * The sender MAY set the I-bit in the DATA
1109 if (ctx
->asoc
->state
== SCTP_STATE_SHUTDOWN_PENDING
)
1110 chunk
->chunk_hdr
->flags
|= SCTP_DATA_SACK_IMM
;
1111 if (chunk
->chunk_hdr
->flags
& SCTP_DATA_UNORDERED
)
1112 ctx
->asoc
->stats
.ouodchunks
++;
1114 ctx
->asoc
->stats
.oodchunks
++;
1116 /* Only now it's safe to consider this
1117 * chunk as sent, sched-wise.
1119 sctp_sched_dequeue_done(ctx
->q
, chunk
);
1121 list_add_tail(&chunk
->transmitted_list
,
1122 &ctx
->transport
->transmitted
);
1124 sctp_transport_reset_t3_rtx(ctx
->transport
);
1125 ctx
->transport
->last_time_sent
= jiffies
;
1127 /* Only let one DATA chunk get bundled with a
1128 * COOKIE-ECHO chunk.
1130 if (ctx
->packet
->has_cookie_echo
)
1135 static void sctp_outq_flush_transports(struct sctp_flush_ctx
*ctx
)
1137 struct list_head
*ltransport
;
1138 struct sctp_packet
*packet
;
1139 struct sctp_transport
*t
;
1142 while ((ltransport
= sctp_list_dequeue(&ctx
->transport_list
)) != NULL
) {
1143 t
= list_entry(ltransport
, struct sctp_transport
, send_ready
);
1144 packet
= &t
->packet
;
1145 if (!sctp_packet_empty(packet
)) {
1146 error
= sctp_packet_transmit(packet
, ctx
->gfp
);
1148 ctx
->q
->asoc
->base
.sk
->sk_err
= -error
;
1151 /* Clear the burst limited state, if any */
1152 sctp_transport_burst_reset(t
);
1156 /* Try to flush an outqueue.
1158 * Description: Send everything in q which we legally can, subject to
1159 * congestion limitations.
1160 * * Note: This function can be called from multiple contexts so appropriate
1161 * locking concerns must be made. Today we use the sock lock to protect
1165 static void sctp_outq_flush(struct sctp_outq
*q
, int rtx_timeout
, gfp_t gfp
)
1167 struct sctp_flush_ctx ctx
= {
1170 .transport_list
= LIST_HEAD_INIT(ctx
.transport_list
),
1178 * When bundling control chunks with DATA chunks, an
1179 * endpoint MUST place control chunks first in the outbound
1180 * SCTP packet. The transmitter MUST transmit DATA chunks
1181 * within a SCTP packet in increasing order of TSN.
1185 sctp_outq_flush_ctrl(&ctx
);
1187 if (q
->asoc
->src_out_of_asoc_ok
)
1188 goto sctp_flush_out
;
1190 sctp_outq_flush_data(&ctx
, rtx_timeout
);
1194 sctp_outq_flush_transports(&ctx
);
1197 /* Update unack_data based on the incoming SACK chunk */
1198 static void sctp_sack_update_unack_data(struct sctp_association
*assoc
,
1199 struct sctp_sackhdr
*sack
)
1201 union sctp_sack_variable
*frags
;
1205 unack_data
= assoc
->next_tsn
- assoc
->ctsn_ack_point
- 1;
1207 frags
= sack
->variable
;
1208 for (i
= 0; i
< ntohs(sack
->num_gap_ack_blocks
); i
++) {
1209 unack_data
-= ((ntohs(frags
[i
].gab
.end
) -
1210 ntohs(frags
[i
].gab
.start
) + 1));
1213 assoc
->unack_data
= unack_data
;
1216 /* This is where we REALLY process a SACK.
1218 * Process the SACK against the outqueue. Mostly, this just frees
1219 * things off the transmitted queue.
1221 int sctp_outq_sack(struct sctp_outq
*q
, struct sctp_chunk
*chunk
)
1223 struct sctp_association
*asoc
= q
->asoc
;
1224 struct sctp_sackhdr
*sack
= chunk
->subh
.sack_hdr
;
1225 struct sctp_transport
*transport
;
1226 struct sctp_chunk
*tchunk
= NULL
;
1227 struct list_head
*lchunk
, *transport_list
, *temp
;
1228 union sctp_sack_variable
*frags
= sack
->variable
;
1229 __u32 sack_ctsn
, ctsn
, tsn
;
1230 __u32 highest_tsn
, highest_new_tsn
;
1232 unsigned int outstanding
;
1233 struct sctp_transport
*primary
= asoc
->peer
.primary_path
;
1234 int count_of_newacks
= 0;
1238 /* Grab the association's destination address list. */
1239 transport_list
= &asoc
->peer
.transport_addr_list
;
1241 sack_ctsn
= ntohl(sack
->cum_tsn_ack
);
1242 gap_ack_blocks
= ntohs(sack
->num_gap_ack_blocks
);
1243 asoc
->stats
.gapcnt
+= gap_ack_blocks
;
1245 * SFR-CACC algorithm:
1246 * On receipt of a SACK the sender SHOULD execute the
1247 * following statements.
1249 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1250 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1251 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1253 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1254 * is set the receiver of the SACK MUST take the following actions:
1256 * A) Initialize the cacc_saw_newack to 0 for all destination
1259 * Only bother if changeover_active is set. Otherwise, this is
1260 * totally suboptimal to do on every SACK.
1262 if (primary
->cacc
.changeover_active
) {
1263 u8 clear_cycling
= 0;
1265 if (TSN_lte(primary
->cacc
.next_tsn_at_change
, sack_ctsn
)) {
1266 primary
->cacc
.changeover_active
= 0;
1270 if (clear_cycling
|| gap_ack_blocks
) {
1271 list_for_each_entry(transport
, transport_list
,
1274 transport
->cacc
.cycling_changeover
= 0;
1276 transport
->cacc
.cacc_saw_newack
= 0;
1281 /* Get the highest TSN in the sack. */
1282 highest_tsn
= sack_ctsn
;
1284 highest_tsn
+= ntohs(frags
[gap_ack_blocks
- 1].gab
.end
);
1286 if (TSN_lt(asoc
->highest_sacked
, highest_tsn
))
1287 asoc
->highest_sacked
= highest_tsn
;
1289 highest_new_tsn
= sack_ctsn
;
1291 /* Run through the retransmit queue. Credit bytes received
1292 * and free those chunks that we can.
1294 sctp_check_transmitted(q
, &q
->retransmit
, NULL
, NULL
, sack
, &highest_new_tsn
);
1296 /* Run through the transmitted queue.
1297 * Credit bytes received and free those chunks which we can.
1299 * This is a MASSIVE candidate for optimization.
1301 list_for_each_entry(transport
, transport_list
, transports
) {
1302 sctp_check_transmitted(q
, &transport
->transmitted
,
1303 transport
, &chunk
->source
, sack
,
1306 * SFR-CACC algorithm:
1307 * C) Let count_of_newacks be the number of
1308 * destinations for which cacc_saw_newack is set.
1310 if (transport
->cacc
.cacc_saw_newack
)
1314 /* Move the Cumulative TSN Ack Point if appropriate. */
1315 if (TSN_lt(asoc
->ctsn_ack_point
, sack_ctsn
)) {
1316 asoc
->ctsn_ack_point
= sack_ctsn
;
1320 if (gap_ack_blocks
) {
1322 if (asoc
->fast_recovery
&& accum_moved
)
1323 highest_new_tsn
= highest_tsn
;
1325 list_for_each_entry(transport
, transport_list
, transports
)
1326 sctp_mark_missing(q
, &transport
->transmitted
, transport
,
1327 highest_new_tsn
, count_of_newacks
);
1330 /* Update unack_data field in the assoc. */
1331 sctp_sack_update_unack_data(asoc
, sack
);
1333 ctsn
= asoc
->ctsn_ack_point
;
1335 /* Throw away stuff rotting on the sack queue. */
1336 list_for_each_safe(lchunk
, temp
, &q
->sacked
) {
1337 tchunk
= list_entry(lchunk
, struct sctp_chunk
,
1339 tsn
= ntohl(tchunk
->subh
.data_hdr
->tsn
);
1340 if (TSN_lte(tsn
, ctsn
)) {
1341 list_del_init(&tchunk
->transmitted_list
);
1342 if (asoc
->peer
.prsctp_capable
&&
1343 SCTP_PR_PRIO_ENABLED(chunk
->sinfo
.sinfo_flags
))
1344 asoc
->sent_cnt_removable
--;
1345 sctp_chunk_free(tchunk
);
1349 /* ii) Set rwnd equal to the newly received a_rwnd minus the
1350 * number of bytes still outstanding after processing the
1351 * Cumulative TSN Ack and the Gap Ack Blocks.
1354 sack_a_rwnd
= ntohl(sack
->a_rwnd
);
1355 asoc
->peer
.zero_window_announced
= !sack_a_rwnd
;
1356 outstanding
= q
->outstanding_bytes
;
1358 if (outstanding
< sack_a_rwnd
)
1359 sack_a_rwnd
-= outstanding
;
1363 asoc
->peer
.rwnd
= sack_a_rwnd
;
1365 asoc
->stream
.si
->generate_ftsn(q
, sack_ctsn
);
1367 pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__
, sack_ctsn
);
1368 pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
1369 "advertised peer ack point:0x%x\n", __func__
, asoc
, ctsn
,
1370 asoc
->adv_peer_ack_point
);
1372 return sctp_outq_is_empty(q
);
1375 /* Is the outqueue empty?
1376 * The queue is empty when we have not pending data, no in-flight data
1377 * and nothing pending retransmissions.
1379 int sctp_outq_is_empty(const struct sctp_outq
*q
)
1381 return q
->out_qlen
== 0 && q
->outstanding_bytes
== 0 &&
1382 list_empty(&q
->retransmit
);
1385 /********************************************************************
1386 * 2nd Level Abstractions
1387 ********************************************************************/
1389 /* Go through a transport's transmitted list or the association's retransmit
1390 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1391 * The retransmit list will not have an associated transport.
1393 * I added coherent debug information output. --xguo
1395 * Instead of printing 'sacked' or 'kept' for each TSN on the
1396 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1397 * KEPT TSN6-TSN7, etc.
1399 static void sctp_check_transmitted(struct sctp_outq
*q
,
1400 struct list_head
*transmitted_queue
,
1401 struct sctp_transport
*transport
,
1402 union sctp_addr
*saddr
,
1403 struct sctp_sackhdr
*sack
,
1404 __u32
*highest_new_tsn_in_sack
)
1406 struct list_head
*lchunk
;
1407 struct sctp_chunk
*tchunk
;
1408 struct list_head tlist
;
1412 __u8 restart_timer
= 0;
1413 int bytes_acked
= 0;
1414 int migrate_bytes
= 0;
1415 bool forward_progress
= false;
1417 sack_ctsn
= ntohl(sack
->cum_tsn_ack
);
1419 INIT_LIST_HEAD(&tlist
);
1421 /* The while loop will skip empty transmitted queues. */
1422 while (NULL
!= (lchunk
= sctp_list_dequeue(transmitted_queue
))) {
1423 tchunk
= list_entry(lchunk
, struct sctp_chunk
,
1426 if (sctp_chunk_abandoned(tchunk
)) {
1427 /* Move the chunk to abandoned list. */
1428 sctp_insert_list(&q
->abandoned
, lchunk
);
1430 /* If this chunk has not been acked, stop
1431 * considering it as 'outstanding'.
1433 if (transmitted_queue
!= &q
->retransmit
&&
1434 !tchunk
->tsn_gap_acked
) {
1435 if (tchunk
->transport
)
1436 tchunk
->transport
->flight_size
-=
1437 sctp_data_size(tchunk
);
1438 q
->outstanding_bytes
-= sctp_data_size(tchunk
);
1443 tsn
= ntohl(tchunk
->subh
.data_hdr
->tsn
);
1444 if (sctp_acked(sack
, tsn
)) {
1445 /* If this queue is the retransmit queue, the
1446 * retransmit timer has already reclaimed
1447 * the outstanding bytes for this chunk, so only
1448 * count bytes associated with a transport.
1450 if (transport
&& !tchunk
->tsn_gap_acked
) {
1451 /* If this chunk is being used for RTT
1452 * measurement, calculate the RTT and update
1453 * the RTO using this value.
1455 * 6.3.1 C5) Karn's algorithm: RTT measurements
1456 * MUST NOT be made using packets that were
1457 * retransmitted (and thus for which it is
1458 * ambiguous whether the reply was for the
1459 * first instance of the packet or a later
1462 if (!sctp_chunk_retransmitted(tchunk
) &&
1463 tchunk
->rtt_in_progress
) {
1464 tchunk
->rtt_in_progress
= 0;
1465 rtt
= jiffies
- tchunk
->sent_at
;
1466 sctp_transport_update_rto(transport
,
1470 if (TSN_lte(tsn
, sack_ctsn
)) {
1472 * SFR-CACC algorithm:
1473 * 2) If the SACK contains gap acks
1474 * and the flag CHANGEOVER_ACTIVE is
1475 * set the receiver of the SACK MUST
1476 * take the following action:
1478 * B) For each TSN t being acked that
1479 * has not been acked in any SACK so
1480 * far, set cacc_saw_newack to 1 for
1481 * the destination that the TSN was
1484 if (sack
->num_gap_ack_blocks
&&
1485 q
->asoc
->peer
.primary_path
->cacc
.
1487 transport
->cacc
.cacc_saw_newack
1492 /* If the chunk hasn't been marked as ACKED,
1493 * mark it and account bytes_acked if the
1494 * chunk had a valid transport (it will not
1495 * have a transport if ASCONF had deleted it
1496 * while DATA was outstanding).
1498 if (!tchunk
->tsn_gap_acked
) {
1499 tchunk
->tsn_gap_acked
= 1;
1500 if (TSN_lt(*highest_new_tsn_in_sack
, tsn
))
1501 *highest_new_tsn_in_sack
= tsn
;
1502 bytes_acked
+= sctp_data_size(tchunk
);
1503 if (!tchunk
->transport
)
1504 migrate_bytes
+= sctp_data_size(tchunk
);
1505 forward_progress
= true;
1508 if (TSN_lte(tsn
, sack_ctsn
)) {
1509 /* RFC 2960 6.3.2 Retransmission Timer Rules
1511 * R3) Whenever a SACK is received
1512 * that acknowledges the DATA chunk
1513 * with the earliest outstanding TSN
1514 * for that address, restart T3-rtx
1515 * timer for that address with its
1519 forward_progress
= true;
1521 list_add_tail(&tchunk
->transmitted_list
,
1524 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1525 * M2) Each time a SACK arrives reporting
1526 * 'Stray DATA chunk(s)' record the highest TSN
1527 * reported as newly acknowledged, call this
1528 * value 'HighestTSNinSack'. A newly
1529 * acknowledged DATA chunk is one not
1530 * previously acknowledged in a SACK.
1532 * When the SCTP sender of data receives a SACK
1533 * chunk that acknowledges, for the first time,
1534 * the receipt of a DATA chunk, all the still
1535 * unacknowledged DATA chunks whose TSN is
1536 * older than that newly acknowledged DATA
1537 * chunk, are qualified as 'Stray DATA chunks'.
1539 list_add_tail(lchunk
, &tlist
);
1542 if (tchunk
->tsn_gap_acked
) {
1543 pr_debug("%s: receiver reneged on data TSN:0x%x\n",
1546 tchunk
->tsn_gap_acked
= 0;
1548 if (tchunk
->transport
)
1549 bytes_acked
-= sctp_data_size(tchunk
);
1551 /* RFC 2960 6.3.2 Retransmission Timer Rules
1553 * R4) Whenever a SACK is received missing a
1554 * TSN that was previously acknowledged via a
1555 * Gap Ack Block, start T3-rtx for the
1556 * destination address to which the DATA
1557 * chunk was originally
1558 * transmitted if it is not already running.
1563 list_add_tail(lchunk
, &tlist
);
1569 struct sctp_association
*asoc
= transport
->asoc
;
1571 /* We may have counted DATA that was migrated
1572 * to this transport due to DEL-IP operation.
1573 * Subtract those bytes, since the were never
1574 * send on this transport and shouldn't be
1575 * credited to this transport.
1577 bytes_acked
-= migrate_bytes
;
1579 /* 8.2. When an outstanding TSN is acknowledged,
1580 * the endpoint shall clear the error counter of
1581 * the destination transport address to which the
1582 * DATA chunk was last sent.
1583 * The association's overall error counter is
1586 transport
->error_count
= 0;
1587 transport
->asoc
->overall_error_count
= 0;
1588 forward_progress
= true;
1591 * While in SHUTDOWN PENDING, we may have started
1592 * the T5 shutdown guard timer after reaching the
1593 * retransmission limit. Stop that timer as soon
1594 * as the receiver acknowledged any data.
1596 if (asoc
->state
== SCTP_STATE_SHUTDOWN_PENDING
&&
1597 del_timer(&asoc
->timers
1598 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
]))
1599 sctp_association_put(asoc
);
1601 /* Mark the destination transport address as
1602 * active if it is not so marked.
1604 if ((transport
->state
== SCTP_INACTIVE
||
1605 transport
->state
== SCTP_UNCONFIRMED
) &&
1606 sctp_cmp_addr_exact(&transport
->ipaddr
, saddr
)) {
1607 sctp_assoc_control_transport(
1611 SCTP_RECEIVED_SACK
);
1614 sctp_transport_raise_cwnd(transport
, sack_ctsn
,
1617 transport
->flight_size
-= bytes_acked
;
1618 if (transport
->flight_size
== 0)
1619 transport
->partial_bytes_acked
= 0;
1620 q
->outstanding_bytes
-= bytes_acked
+ migrate_bytes
;
1622 /* RFC 2960 6.1, sctpimpguide-06 2.15.2
1623 * When a sender is doing zero window probing, it
1624 * should not timeout the association if it continues
1625 * to receive new packets from the receiver. The
1626 * reason is that the receiver MAY keep its window
1627 * closed for an indefinite time.
1628 * A sender is doing zero window probing when the
1629 * receiver's advertised window is zero, and there is
1630 * only one data chunk in flight to the receiver.
1632 * Allow the association to timeout while in SHUTDOWN
1633 * PENDING or SHUTDOWN RECEIVED in case the receiver
1634 * stays in zero window mode forever.
1636 if (!q
->asoc
->peer
.rwnd
&&
1637 !list_empty(&tlist
) &&
1638 (sack_ctsn
+2 == q
->asoc
->next_tsn
) &&
1639 q
->asoc
->state
< SCTP_STATE_SHUTDOWN_PENDING
) {
1640 pr_debug("%s: sack received for zero window "
1641 "probe:%u\n", __func__
, sack_ctsn
);
1643 q
->asoc
->overall_error_count
= 0;
1644 transport
->error_count
= 0;
1648 /* RFC 2960 6.3.2 Retransmission Timer Rules
1650 * R2) Whenever all outstanding data sent to an address have
1651 * been acknowledged, turn off the T3-rtx timer of that
1654 if (!transport
->flight_size
) {
1655 if (del_timer(&transport
->T3_rtx_timer
))
1656 sctp_transport_put(transport
);
1657 } else if (restart_timer
) {
1658 if (!mod_timer(&transport
->T3_rtx_timer
,
1659 jiffies
+ transport
->rto
))
1660 sctp_transport_hold(transport
);
1663 if (forward_progress
) {
1665 sctp_transport_dst_confirm(transport
);
1669 list_splice(&tlist
, transmitted_queue
);
1672 /* Mark chunks as missing and consequently may get retransmitted. */
1673 static void sctp_mark_missing(struct sctp_outq
*q
,
1674 struct list_head
*transmitted_queue
,
1675 struct sctp_transport
*transport
,
1676 __u32 highest_new_tsn_in_sack
,
1677 int count_of_newacks
)
1679 struct sctp_chunk
*chunk
;
1681 char do_fast_retransmit
= 0;
1682 struct sctp_association
*asoc
= q
->asoc
;
1683 struct sctp_transport
*primary
= asoc
->peer
.primary_path
;
1685 list_for_each_entry(chunk
, transmitted_queue
, transmitted_list
) {
1687 tsn
= ntohl(chunk
->subh
.data_hdr
->tsn
);
1689 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1690 * 'Unacknowledged TSN's', if the TSN number of an
1691 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1692 * value, increment the 'TSN.Missing.Report' count on that
1693 * chunk if it has NOT been fast retransmitted or marked for
1694 * fast retransmit already.
1696 if (chunk
->fast_retransmit
== SCTP_CAN_FRTX
&&
1697 !chunk
->tsn_gap_acked
&&
1698 TSN_lt(tsn
, highest_new_tsn_in_sack
)) {
1700 /* SFR-CACC may require us to skip marking
1701 * this chunk as missing.
1703 if (!transport
|| !sctp_cacc_skip(primary
,
1705 count_of_newacks
, tsn
)) {
1706 chunk
->tsn_missing_report
++;
1708 pr_debug("%s: tsn:0x%x missing counter:%d\n",
1709 __func__
, tsn
, chunk
->tsn_missing_report
);
1713 * M4) If any DATA chunk is found to have a
1714 * 'TSN.Missing.Report'
1715 * value larger than or equal to 3, mark that chunk for
1716 * retransmission and start the fast retransmit procedure.
1719 if (chunk
->tsn_missing_report
>= 3) {
1720 chunk
->fast_retransmit
= SCTP_NEED_FRTX
;
1721 do_fast_retransmit
= 1;
1726 if (do_fast_retransmit
)
1727 sctp_retransmit(q
, transport
, SCTP_RTXR_FAST_RTX
);
1729 pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
1730 "flight_size:%d, pba:%d\n", __func__
, transport
,
1731 transport
->cwnd
, transport
->ssthresh
,
1732 transport
->flight_size
, transport
->partial_bytes_acked
);
1736 /* Is the given TSN acked by this packet? */
1737 static int sctp_acked(struct sctp_sackhdr
*sack
, __u32 tsn
)
1739 __u32 ctsn
= ntohl(sack
->cum_tsn_ack
);
1740 union sctp_sack_variable
*frags
;
1741 __u16 tsn_offset
, blocks
;
1744 if (TSN_lte(tsn
, ctsn
))
1747 /* 3.3.4 Selective Acknowledgment (SACK) (3):
1750 * These fields contain the Gap Ack Blocks. They are repeated
1751 * for each Gap Ack Block up to the number of Gap Ack Blocks
1752 * defined in the Number of Gap Ack Blocks field. All DATA
1753 * chunks with TSNs greater than or equal to (Cumulative TSN
1754 * Ack + Gap Ack Block Start) and less than or equal to
1755 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1756 * Block are assumed to have been received correctly.
1759 frags
= sack
->variable
;
1760 blocks
= ntohs(sack
->num_gap_ack_blocks
);
1761 tsn_offset
= tsn
- ctsn
;
1762 for (i
= 0; i
< blocks
; ++i
) {
1763 if (tsn_offset
>= ntohs(frags
[i
].gab
.start
) &&
1764 tsn_offset
<= ntohs(frags
[i
].gab
.end
))
1773 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip
*skiplist
,
1774 int nskips
, __be16 stream
)
1778 for (i
= 0; i
< nskips
; i
++) {
1779 if (skiplist
[i
].stream
== stream
)
1785 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1786 void sctp_generate_fwdtsn(struct sctp_outq
*q
, __u32 ctsn
)
1788 struct sctp_association
*asoc
= q
->asoc
;
1789 struct sctp_chunk
*ftsn_chunk
= NULL
;
1790 struct sctp_fwdtsn_skip ftsn_skip_arr
[10];
1794 struct sctp_chunk
*chunk
;
1795 struct list_head
*lchunk
, *temp
;
1797 if (!asoc
->peer
.prsctp_capable
)
1800 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1803 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1804 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1806 if (TSN_lt(asoc
->adv_peer_ack_point
, ctsn
))
1807 asoc
->adv_peer_ack_point
= ctsn
;
1809 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1810 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1811 * the chunk next in the out-queue space is marked as "abandoned" as
1812 * shown in the following example:
1814 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1815 * and the Advanced.Peer.Ack.Point is updated to this value:
1817 * out-queue at the end of ==> out-queue after Adv.Ack.Point
1818 * normal SACK processing local advancement
1820 * Adv.Ack.Pt-> 102 acked 102 acked
1821 * 103 abandoned 103 abandoned
1822 * 104 abandoned Adv.Ack.P-> 104 abandoned
1824 * 106 acked 106 acked
1827 * In this example, the data sender successfully advanced the
1828 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1830 list_for_each_safe(lchunk
, temp
, &q
->abandoned
) {
1831 chunk
= list_entry(lchunk
, struct sctp_chunk
,
1833 tsn
= ntohl(chunk
->subh
.data_hdr
->tsn
);
1835 /* Remove any chunks in the abandoned queue that are acked by
1838 if (TSN_lte(tsn
, ctsn
)) {
1839 list_del_init(lchunk
);
1840 sctp_chunk_free(chunk
);
1842 if (TSN_lte(tsn
, asoc
->adv_peer_ack_point
+1)) {
1843 asoc
->adv_peer_ack_point
= tsn
;
1844 if (chunk
->chunk_hdr
->flags
&
1845 SCTP_DATA_UNORDERED
)
1847 skip_pos
= sctp_get_skip_pos(&ftsn_skip_arr
[0],
1849 chunk
->subh
.data_hdr
->stream
);
1850 ftsn_skip_arr
[skip_pos
].stream
=
1851 chunk
->subh
.data_hdr
->stream
;
1852 ftsn_skip_arr
[skip_pos
].ssn
=
1853 chunk
->subh
.data_hdr
->ssn
;
1854 if (skip_pos
== nskips
)
1863 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1864 * is greater than the Cumulative TSN ACK carried in the received
1865 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1866 * chunk containing the latest value of the
1867 * "Advanced.Peer.Ack.Point".
1869 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1870 * list each stream and sequence number in the forwarded TSN. This
1871 * information will enable the receiver to easily find any
1872 * stranded TSN's waiting on stream reorder queues. Each stream
1873 * SHOULD only be reported once; this means that if multiple
1874 * abandoned messages occur in the same stream then only the
1875 * highest abandoned stream sequence number is reported. If the
1876 * total size of the FORWARD TSN does NOT fit in a single MTU then
1877 * the sender of the FORWARD TSN SHOULD lower the
1878 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1881 if (asoc
->adv_peer_ack_point
> ctsn
)
1882 ftsn_chunk
= sctp_make_fwdtsn(asoc
, asoc
->adv_peer_ack_point
,
1883 nskips
, &ftsn_skip_arr
[0]);
1886 list_add_tail(&ftsn_chunk
->list
, &q
->control_chunk_list
);
1887 SCTP_INC_STATS(sock_net(asoc
->base
.sk
), SCTP_MIB_OUTCTRLCHUNKS
);