2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb
*stcb
, struct sctp_association
*asoc
)
62 asoc
->my_rwnd
= sctp_calc_rwnd(stcb
, asoc
);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb
*stcb
, struct sctp_association
*asoc
)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb
->sctp_socket
== NULL
)
80 if (stcb
->asoc
.sb_cc
== 0 &&
81 asoc
->size_on_reasm_queue
== 0 &&
82 asoc
->size_on_all_streams
== 0) {
83 /* Full rwnd granted */
84 calc
= max(SCTP_SB_LIMIT_RCV(stcb
->sctp_socket
), SCTP_MINIMAL_RWND
);
87 /* get actual space */
88 calc
= (uint32_t) sctp_sbspace(&stcb
->asoc
, &stcb
->sctp_socket
->so_rcv
);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc
= sctp_sbspace_sub(calc
, (uint32_t) asoc
->size_on_reasm_queue
);
95 calc
= sctp_sbspace_sub(calc
, (uint32_t) asoc
->size_on_all_streams
);
101 /* what is the overhead of all these rwnd's */
102 calc
= sctp_sbspace_sub(calc
, stcb
->asoc
.my_rwnd_control_len
);
104 * If the window gets too small due to ctrl-stuff, reduce it to 1,
105 * even it is 0. SWS engaged
107 if (calc
< stcb
->asoc
.my_rwnd_control_len
) {
116 * Build out our readq entry based on the incoming packet.
118 struct sctp_queued_to_read
*
119 sctp_build_readq_entry(struct sctp_tcb
*stcb
,
120 struct sctp_nets
*net
,
121 uint32_t tsn
, uint32_t ppid
,
122 uint32_t context
, uint16_t stream_no
,
123 uint16_t stream_seq
, uint8_t flags
,
126 struct sctp_queued_to_read
*read_queue_e
= NULL
;
128 sctp_alloc_a_readq(stcb
, read_queue_e
);
129 if (read_queue_e
== NULL
) {
132 read_queue_e
->sinfo_stream
= stream_no
;
133 read_queue_e
->sinfo_ssn
= stream_seq
;
134 read_queue_e
->sinfo_flags
= (flags
<< 8);
135 read_queue_e
->sinfo_ppid
= ppid
;
136 read_queue_e
->sinfo_context
= stcb
->asoc
.context
;
137 read_queue_e
->sinfo_timetolive
= 0;
138 read_queue_e
->sinfo_tsn
= tsn
;
139 read_queue_e
->sinfo_cumtsn
= tsn
;
140 read_queue_e
->sinfo_assoc_id
= sctp_get_associd(stcb
);
141 read_queue_e
->whoFrom
= net
;
142 read_queue_e
->length
= 0;
143 atomic_add_int(&net
->ref_count
, 1);
144 read_queue_e
->data
= dm
;
145 read_queue_e
->spec_flags
= 0;
146 read_queue_e
->tail_mbuf
= NULL
;
147 read_queue_e
->aux_data
= NULL
;
148 read_queue_e
->stcb
= stcb
;
149 read_queue_e
->port_from
= stcb
->rport
;
150 read_queue_e
->do_not_ref_stcb
= 0;
151 read_queue_e
->end_added
= 0;
152 read_queue_e
->some_taken
= 0;
153 read_queue_e
->pdapi_aborted
= 0;
155 return (read_queue_e
);
160 * Build out our readq entry based on the incoming packet.
162 static struct sctp_queued_to_read
*
163 sctp_build_readq_entry_chk(struct sctp_tcb
*stcb
,
164 struct sctp_tmit_chunk
*chk
)
166 struct sctp_queued_to_read
*read_queue_e
= NULL
;
168 sctp_alloc_a_readq(stcb
, read_queue_e
);
169 if (read_queue_e
== NULL
) {
172 read_queue_e
->sinfo_stream
= chk
->rec
.data
.stream_number
;
173 read_queue_e
->sinfo_ssn
= chk
->rec
.data
.stream_seq
;
174 read_queue_e
->sinfo_flags
= (chk
->rec
.data
.rcv_flags
<< 8);
175 read_queue_e
->sinfo_ppid
= chk
->rec
.data
.payloadtype
;
176 read_queue_e
->sinfo_context
= stcb
->asoc
.context
;
177 read_queue_e
->sinfo_timetolive
= 0;
178 read_queue_e
->sinfo_tsn
= chk
->rec
.data
.TSN_seq
;
179 read_queue_e
->sinfo_cumtsn
= chk
->rec
.data
.TSN_seq
;
180 read_queue_e
->sinfo_assoc_id
= sctp_get_associd(stcb
);
181 read_queue_e
->whoFrom
= chk
->whoTo
;
182 read_queue_e
->aux_data
= NULL
;
183 read_queue_e
->length
= 0;
184 atomic_add_int(&chk
->whoTo
->ref_count
, 1);
185 read_queue_e
->data
= chk
->data
;
186 read_queue_e
->tail_mbuf
= NULL
;
187 read_queue_e
->stcb
= stcb
;
188 read_queue_e
->port_from
= stcb
->rport
;
189 read_queue_e
->spec_flags
= 0;
190 read_queue_e
->do_not_ref_stcb
= 0;
191 read_queue_e
->end_added
= 0;
192 read_queue_e
->some_taken
= 0;
193 read_queue_e
->pdapi_aborted
= 0;
195 return (read_queue_e
);
200 sctp_build_ctl_nchunk(struct sctp_inpcb
*inp
,
201 struct sctp_sndrcvinfo
*sinfo
)
203 struct sctp_sndrcvinfo
*outinfo
;
207 int use_extended
= 0;
209 if (sctp_is_feature_off(inp
, SCTP_PCB_FLAGS_RECVDATAIOEVNT
)) {
210 /* user does not want the sndrcv ctl */
213 if (sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_EXT_RCVINFO
)) {
215 len
= CMSG_LEN(sizeof(struct sctp_extrcvinfo
));
217 len
= CMSG_LEN(sizeof(struct sctp_sndrcvinfo
));
221 ret
= sctp_get_mbuf_for_msg(len
,
222 0, M_DONTWAIT
, 1, MT_DATA
);
228 /* We need a CMSG header followed by the struct */
229 cmh
= mtod(ret
, struct cmsghdr
*);
230 outinfo
= (struct sctp_sndrcvinfo
*)CMSG_DATA(cmh
);
231 cmh
->cmsg_level
= IPPROTO_SCTP
;
233 cmh
->cmsg_type
= SCTP_EXTRCV
;
235 memcpy(outinfo
, sinfo
, len
);
237 cmh
->cmsg_type
= SCTP_SNDRCV
;
241 SCTP_BUF_LEN(ret
) = cmh
->cmsg_len
;
247 sctp_build_ctl_cchunk(struct sctp_inpcb
*inp
,
249 struct sctp_sndrcvinfo
*sinfo
)
251 struct sctp_sndrcvinfo
*outinfo
;
255 int use_extended
= 0;
257 if (sctp_is_feature_off(inp
, SCTP_PCB_FLAGS_RECVDATAIOEVNT
)) {
258 /* user does not want the sndrcv ctl */
261 if (sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_EXT_RCVINFO
)) {
263 len
= CMSG_LEN(sizeof(struct sctp_extrcvinfo
));
265 len
= CMSG_LEN(sizeof(struct sctp_sndrcvinfo
));
267 SCTP_MALLOC(buf
, char *, len
, SCTP_M_CMSG
);
272 /* We need a CMSG header followed by the struct */
273 cmh
= (struct cmsghdr
*)buf
;
274 outinfo
= (struct sctp_sndrcvinfo
*)CMSG_DATA(cmh
);
275 cmh
->cmsg_level
= IPPROTO_SCTP
;
277 cmh
->cmsg_type
= SCTP_EXTRCV
;
279 memcpy(outinfo
, sinfo
, len
);
281 cmh
->cmsg_type
= SCTP_SNDRCV
;
291 * We are delivering currently from the reassembly queue. We must continue to
292 * deliver until we either: 1) run out of space. 2) run out of sequential
293 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
296 sctp_service_reassembly(struct sctp_tcb
*stcb
, struct sctp_association
*asoc
)
298 struct sctp_tmit_chunk
*chk
;
303 struct sctp_queued_to_read
*control
, *ctl
, *ctlat
;
308 cntDel
= stream_no
= 0;
309 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
310 (stcb
->asoc
.state
& SCTP_STATE_ABOUT_TO_BE_FREED
) ||
311 (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
)) {
312 /* socket above is long gone or going.. */
314 asoc
->fragmented_delivery_inprogress
= 0;
315 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
317 TAILQ_REMOVE(&asoc
->reasmqueue
, chk
, sctp_next
);
318 asoc
->size_on_reasm_queue
-= chk
->send_size
;
319 sctp_ucount_decr(asoc
->cnt_on_reasm_queue
);
321 * Lose the data pointer, since its in the socket
325 sctp_m_freem(chk
->data
);
328 /* Now free the address and data */
329 sctp_free_a_chunk(stcb
, chk
);
330 /* sa_ignore FREED_MEMORY */
331 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
335 SCTP_TCB_LOCK_ASSERT(stcb
);
337 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
341 if (chk
->rec
.data
.TSN_seq
!= (asoc
->tsn_last_delivered
+ 1)) {
342 /* Can't deliver more :< */
345 stream_no
= chk
->rec
.data
.stream_number
;
346 nxt_todel
= asoc
->strmin
[stream_no
].last_sequence_delivered
+ 1;
347 if (nxt_todel
!= chk
->rec
.data
.stream_seq
&&
348 (chk
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
) == 0) {
350 * Not the next sequence to deliver in its stream OR
355 if (chk
->rec
.data
.rcv_flags
& SCTP_DATA_FIRST_FRAG
) {
357 control
= sctp_build_readq_entry_chk(stcb
, chk
);
358 if (control
== NULL
) {
362 /* save it off for our future deliveries */
363 stcb
->asoc
.control_pdapi
= control
;
364 if (chk
->rec
.data
.rcv_flags
& SCTP_DATA_LAST_FRAG
)
368 sctp_add_to_readq(stcb
->sctp_ep
,
369 stcb
, control
, &stcb
->sctp_socket
->so_rcv
, end
, SCTP_SO_NOT_LOCKED
);
372 if (chk
->rec
.data
.rcv_flags
& SCTP_DATA_LAST_FRAG
)
376 if (sctp_append_to_readq(stcb
->sctp_ep
, stcb
,
377 stcb
->asoc
.control_pdapi
,
378 chk
->data
, end
, chk
->rec
.data
.TSN_seq
,
379 &stcb
->sctp_socket
->so_rcv
)) {
381 * something is very wrong, either
382 * control_pdapi is NULL, or the tail_mbuf
383 * is corrupt, or there is a EOM already on
386 if (stcb
->asoc
.state
& SCTP_STATE_ABOUT_TO_BE_FREED
) {
390 if ((stcb
->asoc
.control_pdapi
== NULL
) || (stcb
->asoc
.control_pdapi
->tail_mbuf
== NULL
)) {
391 panic("This should not happen control_pdapi NULL?");
393 /* if we did not panic, it was a EOM */
394 panic("Bad chunking ??");
396 if ((stcb
->asoc
.control_pdapi
== NULL
) || (stcb
->asoc
.control_pdapi
->tail_mbuf
== NULL
)) {
397 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
399 SCTP_PRINTF("Bad chunking ??\n");
400 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
408 /* pull it we did it */
409 TAILQ_REMOVE(&asoc
->reasmqueue
, chk
, sctp_next
);
410 if (chk
->rec
.data
.rcv_flags
& SCTP_DATA_LAST_FRAG
) {
411 asoc
->fragmented_delivery_inprogress
= 0;
412 if ((chk
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
) == 0) {
413 asoc
->strmin
[stream_no
].last_sequence_delivered
++;
415 if ((chk
->rec
.data
.rcv_flags
& SCTP_DATA_FIRST_FRAG
) == 0) {
416 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs
);
418 } else if (chk
->rec
.data
.rcv_flags
& SCTP_DATA_FIRST_FRAG
) {
420 * turn the flag back on since we just delivered
423 asoc
->fragmented_delivery_inprogress
= 1;
425 asoc
->tsn_of_pdapi_last_delivered
= chk
->rec
.data
.TSN_seq
;
426 asoc
->last_flags_delivered
= chk
->rec
.data
.rcv_flags
;
427 asoc
->last_strm_seq_delivered
= chk
->rec
.data
.stream_seq
;
428 asoc
->last_strm_no_delivered
= chk
->rec
.data
.stream_number
;
430 asoc
->tsn_last_delivered
= chk
->rec
.data
.TSN_seq
;
431 asoc
->size_on_reasm_queue
-= chk
->send_size
;
432 sctp_ucount_decr(asoc
->cnt_on_reasm_queue
);
433 /* free up the chk */
435 sctp_free_a_chunk(stcb
, chk
);
437 if (asoc
->fragmented_delivery_inprogress
== 0) {
439 * Now lets see if we can deliver the next one on
442 struct sctp_stream_in
*strm
;
444 strm
= &asoc
->strmin
[stream_no
];
445 nxt_todel
= strm
->last_sequence_delivered
+ 1;
446 ctl
= TAILQ_FIRST(&strm
->inqueue
);
447 if (ctl
&& (nxt_todel
== ctl
->sinfo_ssn
)) {
448 while (ctl
!= NULL
) {
449 /* Deliver more if we can. */
450 if (nxt_todel
== ctl
->sinfo_ssn
) {
451 ctlat
= TAILQ_NEXT(ctl
, next
);
452 TAILQ_REMOVE(&strm
->inqueue
, ctl
, next
);
453 asoc
->size_on_all_streams
-= ctl
->length
;
454 sctp_ucount_decr(asoc
->cnt_on_all_streams
);
455 strm
->last_sequence_delivered
++;
456 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
458 &stcb
->sctp_socket
->so_rcv
, 1, SCTP_SO_NOT_LOCKED
);
463 nxt_todel
= strm
->last_sequence_delivered
+ 1;
468 /* sa_ignore FREED_MEMORY */
469 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
474 * Queue the chunk either right into the socket buffer if it is the next one
475 * to go OR put it in the correct place in the delivery queue. If we do
476 * append to the so_buf, keep doing so until we are out of order. One big
477 * question still remains, what to do when the socket buffer is FULL??
480 sctp_queue_data_to_stream(struct sctp_tcb
*stcb
, struct sctp_association
*asoc
,
481 struct sctp_queued_to_read
*control
, int *abort_flag
)
484 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
485 * all the data in one stream this could happen quite rapidly. One
486 * could use the TSN to keep track of things, but this scheme breaks
487 * down in the other type of stream useage that could occur. Send a
488 * single msg to stream 0, send 4Billion messages to stream 1, now
489 * send a message to stream 0. You have a situation where the TSN
490 * has wrapped but not in the stream. Is this worth worrying about
491 * or should we just change our queue sort at the bottom to be by
494 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
495 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
496 * assignment this could happen... and I don't see how this would be
497 * a violation. So for now I am undecided an will leave the sort by
498 * SSN alone. Maybe a hybred approach is the answer
501 struct sctp_stream_in
*strm
;
502 struct sctp_queued_to_read
*at
;
508 asoc
->size_on_all_streams
+= control
->length
;
509 sctp_ucount_incr(asoc
->cnt_on_all_streams
);
510 strm
= &asoc
->strmin
[control
->sinfo_stream
];
511 nxt_todel
= strm
->last_sequence_delivered
+ 1;
512 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_STR_LOGGING_ENABLE
) {
513 sctp_log_strm_del(control
, NULL
, SCTP_STR_LOG_FROM_INTO_STRD
);
515 SCTPDBG(SCTP_DEBUG_INDATA1
,
516 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
517 (uint32_t) control
->sinfo_stream
,
518 (uint32_t) strm
->last_sequence_delivered
,
519 (uint32_t) nxt_todel
);
520 if (compare_with_wrap(strm
->last_sequence_delivered
,
521 control
->sinfo_ssn
, MAX_SEQ
) ||
522 (strm
->last_sequence_delivered
== control
->sinfo_ssn
)) {
523 /* The incoming sseq is behind where we last delivered? */
524 SCTPDBG(SCTP_DEBUG_INDATA1
, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
525 control
->sinfo_ssn
, strm
->last_sequence_delivered
);
528 * throw it in the stream so it gets cleaned up in
529 * association destruction
531 TAILQ_INSERT_HEAD(&strm
->inqueue
, control
, next
);
532 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
533 0, M_DONTWAIT
, 1, MT_DATA
);
535 struct sctp_paramhdr
*ph
;
538 SCTP_BUF_LEN(oper
) = sizeof(struct sctp_paramhdr
) +
539 (sizeof(uint32_t) * 3);
540 ph
= mtod(oper
, struct sctp_paramhdr
*);
541 ph
->param_type
= htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
542 ph
->param_length
= htons(SCTP_BUF_LEN(oper
));
543 ippp
= (uint32_t *) (ph
+ 1);
544 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_1
);
546 *ippp
= control
->sinfo_tsn
;
548 *ippp
= ((control
->sinfo_stream
<< 16) | control
->sinfo_ssn
);
550 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_1
;
551 sctp_abort_an_association(stcb
->sctp_ep
, stcb
,
552 SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
558 if (nxt_todel
== control
->sinfo_ssn
) {
559 /* can be delivered right away? */
560 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_STR_LOGGING_ENABLE
) {
561 sctp_log_strm_del(control
, NULL
, SCTP_STR_LOG_FROM_IMMED_DEL
);
564 asoc
->size_on_all_streams
-= control
->length
;
565 sctp_ucount_decr(asoc
->cnt_on_all_streams
);
566 strm
->last_sequence_delivered
++;
567 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
569 &stcb
->sctp_socket
->so_rcv
, 1, SCTP_SO_NOT_LOCKED
);
570 control
= TAILQ_FIRST(&strm
->inqueue
);
571 while (control
!= NULL
) {
573 nxt_todel
= strm
->last_sequence_delivered
+ 1;
574 if (nxt_todel
== control
->sinfo_ssn
) {
575 at
= TAILQ_NEXT(control
, next
);
576 TAILQ_REMOVE(&strm
->inqueue
, control
, next
);
577 asoc
->size_on_all_streams
-= control
->length
;
578 sctp_ucount_decr(asoc
->cnt_on_all_streams
);
579 strm
->last_sequence_delivered
++;
581 * We ignore the return of deliver_data here
582 * since we always can hold the chunk on the
583 * d-queue. And we have a finite number that
584 * can be delivered from the strq.
586 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_STR_LOGGING_ENABLE
) {
587 sctp_log_strm_del(control
, NULL
,
588 SCTP_STR_LOG_FROM_IMMED_DEL
);
590 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
592 &stcb
->sctp_socket
->so_rcv
, 1, SCTP_SO_NOT_LOCKED
);
601 * Ok, we did not deliver this guy, find the correct place
602 * to put it on the queue.
604 if ((compare_with_wrap(asoc
->cumulative_tsn
,
605 control
->sinfo_tsn
, MAX_TSN
)) ||
606 (control
->sinfo_tsn
== asoc
->cumulative_tsn
)) {
609 if (TAILQ_EMPTY(&strm
->inqueue
)) {
611 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_STR_LOGGING_ENABLE
) {
612 sctp_log_strm_del(control
, NULL
, SCTP_STR_LOG_FROM_INSERT_HD
);
614 TAILQ_INSERT_HEAD(&strm
->inqueue
, control
, next
);
616 TAILQ_FOREACH(at
, &strm
->inqueue
, next
) {
617 if (compare_with_wrap(at
->sinfo_ssn
,
618 control
->sinfo_ssn
, MAX_SEQ
)) {
620 * one in queue is bigger than the
621 * new one, insert before this one
623 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_STR_LOGGING_ENABLE
) {
624 sctp_log_strm_del(control
, at
,
625 SCTP_STR_LOG_FROM_INSERT_MD
);
627 TAILQ_INSERT_BEFORE(at
, control
, next
);
629 } else if (at
->sinfo_ssn
== control
->sinfo_ssn
) {
631 * Gak, He sent me a duplicate str
635 * foo bar, I guess I will just free
636 * this new guy, should we abort
637 * too? FIX ME MAYBE? Or it COULD be
638 * that the SSN's have wrapped.
639 * Maybe I should compare to TSN
640 * somehow... sigh for now just blow
645 sctp_m_freem(control
->data
);
646 control
->data
= NULL
;
647 asoc
->size_on_all_streams
-= control
->length
;
648 sctp_ucount_decr(asoc
->cnt_on_all_streams
);
649 if (control
->whoFrom
)
650 sctp_free_remote_addr(control
->whoFrom
);
651 control
->whoFrom
= NULL
;
652 sctp_free_a_readq(stcb
, control
);
655 if (TAILQ_NEXT(at
, next
) == NULL
) {
657 * We are at the end, insert
660 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_STR_LOGGING_ENABLE
) {
661 sctp_log_strm_del(control
, at
,
662 SCTP_STR_LOG_FROM_INSERT_TL
);
664 TAILQ_INSERT_AFTER(&strm
->inqueue
,
675 * Returns two things: You get the total size of the deliverable parts of the
676 * first fragmented message on the reassembly queue. And you get a 1 back if
677 * all of the message is ready or a 0 back if the message is still incomplete
680 sctp_is_all_msg_on_reasm(struct sctp_association
*asoc
, uint32_t * t_size
)
682 struct sctp_tmit_chunk
*chk
;
686 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
688 /* nothing on the queue */
691 if ((chk
->rec
.data
.rcv_flags
& SCTP_DATA_FIRST_FRAG
) == 0) {
692 /* Not a first on the queue */
695 tsn
= chk
->rec
.data
.TSN_seq
;
697 if (tsn
!= chk
->rec
.data
.TSN_seq
) {
700 *t_size
+= chk
->send_size
;
701 if (chk
->rec
.data
.rcv_flags
& SCTP_DATA_LAST_FRAG
) {
705 chk
= TAILQ_NEXT(chk
, sctp_next
);
711 sctp_deliver_reasm_check(struct sctp_tcb
*stcb
, struct sctp_association
*asoc
)
713 struct sctp_tmit_chunk
*chk
;
718 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
721 asoc
->size_on_reasm_queue
= 0;
722 asoc
->cnt_on_reasm_queue
= 0;
725 if (asoc
->fragmented_delivery_inprogress
== 0) {
727 asoc
->strmin
[chk
->rec
.data
.stream_number
].last_sequence_delivered
+ 1;
728 if ((chk
->rec
.data
.rcv_flags
& SCTP_DATA_FIRST_FRAG
) &&
729 (nxt_todel
== chk
->rec
.data
.stream_seq
||
730 (chk
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
))) {
732 * Yep the first one is here and its ok to deliver
735 if ((sctp_is_all_msg_on_reasm(asoc
, &tsize
) ||
736 (tsize
>= stcb
->sctp_ep
->partial_delivery_point
))) {
739 * Yes, we setup to start reception, by
740 * backing down the TSN just in case we
741 * can't deliver. If we
743 asoc
->fragmented_delivery_inprogress
= 1;
744 asoc
->tsn_last_delivered
=
745 chk
->rec
.data
.TSN_seq
- 1;
747 chk
->rec
.data
.stream_number
;
748 asoc
->ssn_of_pdapi
= chk
->rec
.data
.stream_seq
;
749 asoc
->pdapi_ppid
= chk
->rec
.data
.payloadtype
;
750 asoc
->fragment_flags
= chk
->rec
.data
.rcv_flags
;
751 sctp_service_reassembly(stcb
, asoc
);
756 * Service re-assembly will deliver stream data queued at
757 * the end of fragmented delivery.. but it wont know to go
758 * back and call itself again... we do that here with the
761 sctp_service_reassembly(stcb
, asoc
);
762 if (asoc
->fragmented_delivery_inprogress
== 0) {
764 * finished our Fragmented delivery, could be more
773 * Dump onto the re-assembly queue, in its proper place. After dumping on the
774 * queue, see if anthing can be delivered. If so pull it off (or as much as
775 * we can. If we run out of space then we must dump what we can and set the
776 * appropriate flag to say we queued what we could.
779 sctp_queue_data_for_reasm(struct sctp_tcb
*stcb
, struct sctp_association
*asoc
,
780 struct sctp_tmit_chunk
*chk
, int *abort_flag
)
783 uint32_t cum_ackp1
, last_tsn
, prev_tsn
, post_tsn
;
785 struct sctp_tmit_chunk
*at
, *prev
, *next
;
788 cum_ackp1
= asoc
->tsn_last_delivered
+ 1;
789 if (TAILQ_EMPTY(&asoc
->reasmqueue
)) {
790 /* This is the first one on the queue */
791 TAILQ_INSERT_HEAD(&asoc
->reasmqueue
, chk
, sctp_next
);
793 * we do not check for delivery of anything when only one
796 asoc
->size_on_reasm_queue
= chk
->send_size
;
797 sctp_ucount_incr(asoc
->cnt_on_reasm_queue
);
798 if (chk
->rec
.data
.TSN_seq
== cum_ackp1
) {
799 if (asoc
->fragmented_delivery_inprogress
== 0 &&
800 (chk
->rec
.data
.rcv_flags
& SCTP_DATA_FIRST_FRAG
) !=
801 SCTP_DATA_FIRST_FRAG
) {
803 * An empty queue, no delivery inprogress,
804 * we hit the next one and it does NOT have
805 * a FIRST fragment mark.
807 SCTPDBG(SCTP_DEBUG_INDATA1
, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
808 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
809 0, M_DONTWAIT
, 1, MT_DATA
);
812 struct sctp_paramhdr
*ph
;
816 sizeof(struct sctp_paramhdr
) +
817 (sizeof(uint32_t) * 3);
818 ph
= mtod(oper
, struct sctp_paramhdr
*);
820 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
821 ph
->param_length
= htons(SCTP_BUF_LEN(oper
));
822 ippp
= (uint32_t *) (ph
+ 1);
823 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_2
);
825 *ippp
= chk
->rec
.data
.TSN_seq
;
827 *ippp
= ((chk
->rec
.data
.stream_number
<< 16) | chk
->rec
.data
.stream_seq
);
830 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_2
;
831 sctp_abort_an_association(stcb
->sctp_ep
, stcb
,
832 SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
834 } else if (asoc
->fragmented_delivery_inprogress
&&
835 (chk
->rec
.data
.rcv_flags
& SCTP_DATA_FIRST_FRAG
) == SCTP_DATA_FIRST_FRAG
) {
837 * We are doing a partial delivery and the
838 * NEXT chunk MUST be either the LAST or
839 * MIDDLE fragment NOT a FIRST
841 SCTPDBG(SCTP_DEBUG_INDATA1
, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
842 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
843 0, M_DONTWAIT
, 1, MT_DATA
);
845 struct sctp_paramhdr
*ph
;
849 sizeof(struct sctp_paramhdr
) +
850 (3 * sizeof(uint32_t));
851 ph
= mtod(oper
, struct sctp_paramhdr
*);
853 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
854 ph
->param_length
= htons(SCTP_BUF_LEN(oper
));
855 ippp
= (uint32_t *) (ph
+ 1);
856 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_3
);
858 *ippp
= chk
->rec
.data
.TSN_seq
;
860 *ippp
= ((chk
->rec
.data
.stream_number
<< 16) | chk
->rec
.data
.stream_seq
);
862 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_3
;
863 sctp_abort_an_association(stcb
->sctp_ep
, stcb
,
864 SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
866 } else if (asoc
->fragmented_delivery_inprogress
) {
868 * Here we are ok with a MIDDLE or LAST
871 if (chk
->rec
.data
.stream_number
!=
872 asoc
->str_of_pdapi
) {
873 /* Got to be the right STR No */
874 SCTPDBG(SCTP_DEBUG_INDATA1
, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
875 chk
->rec
.data
.stream_number
,
877 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
878 0, M_DONTWAIT
, 1, MT_DATA
);
880 struct sctp_paramhdr
*ph
;
884 sizeof(struct sctp_paramhdr
) +
885 (sizeof(uint32_t) * 3);
887 struct sctp_paramhdr
*);
889 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
891 htons(SCTP_BUF_LEN(oper
));
892 ippp
= (uint32_t *) (ph
+ 1);
893 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_4
);
895 *ippp
= chk
->rec
.data
.TSN_seq
;
897 *ippp
= ((chk
->rec
.data
.stream_number
<< 16) | chk
->rec
.data
.stream_seq
);
899 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_4
;
900 sctp_abort_an_association(stcb
->sctp_ep
,
901 stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
903 } else if ((asoc
->fragment_flags
& SCTP_DATA_UNORDERED
) !=
904 SCTP_DATA_UNORDERED
&&
905 chk
->rec
.data
.stream_seq
!=
906 asoc
->ssn_of_pdapi
) {
907 /* Got to be the right STR Seq */
908 SCTPDBG(SCTP_DEBUG_INDATA1
, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
909 chk
->rec
.data
.stream_seq
,
911 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
912 0, M_DONTWAIT
, 1, MT_DATA
);
914 struct sctp_paramhdr
*ph
;
918 sizeof(struct sctp_paramhdr
) +
919 (3 * sizeof(uint32_t));
921 struct sctp_paramhdr
*);
923 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
925 htons(SCTP_BUF_LEN(oper
));
926 ippp
= (uint32_t *) (ph
+ 1);
927 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_5
);
929 *ippp
= chk
->rec
.data
.TSN_seq
;
931 *ippp
= ((chk
->rec
.data
.stream_number
<< 16) | chk
->rec
.data
.stream_seq
);
934 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_5
;
935 sctp_abort_an_association(stcb
->sctp_ep
,
936 stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
944 TAILQ_FOREACH(at
, &asoc
->reasmqueue
, sctp_next
) {
945 if (compare_with_wrap(at
->rec
.data
.TSN_seq
,
946 chk
->rec
.data
.TSN_seq
, MAX_TSN
)) {
948 * one in queue is bigger than the new one, insert
952 asoc
->size_on_reasm_queue
+= chk
->send_size
;
953 sctp_ucount_incr(asoc
->cnt_on_reasm_queue
);
955 TAILQ_INSERT_BEFORE(at
, chk
, sctp_next
);
957 } else if (at
->rec
.data
.TSN_seq
== chk
->rec
.data
.TSN_seq
) {
958 /* Gak, He sent me a duplicate str seq number */
960 * foo bar, I guess I will just free this new guy,
961 * should we abort too? FIX ME MAYBE? Or it COULD be
962 * that the SSN's have wrapped. Maybe I should
963 * compare to TSN somehow... sigh for now just blow
967 sctp_m_freem(chk
->data
);
970 sctp_free_a_chunk(stcb
, chk
);
973 last_flags
= at
->rec
.data
.rcv_flags
;
974 last_tsn
= at
->rec
.data
.TSN_seq
;
976 if (TAILQ_NEXT(at
, sctp_next
) == NULL
) {
978 * We are at the end, insert it after this
982 asoc
->size_on_reasm_queue
+= chk
->send_size
;
983 sctp_ucount_incr(asoc
->cnt_on_reasm_queue
);
984 TAILQ_INSERT_AFTER(&asoc
->reasmqueue
, at
, chk
, sctp_next
);
991 prev_tsn
= chk
->rec
.data
.TSN_seq
- 1;
992 if (prev_tsn
== prev
->rec
.data
.TSN_seq
) {
994 * Ok the one I am dropping onto the end is the
995 * NEXT. A bit of valdiation here.
997 if ((prev
->rec
.data
.rcv_flags
& SCTP_DATA_FRAG_MASK
) ==
998 SCTP_DATA_FIRST_FRAG
||
999 (prev
->rec
.data
.rcv_flags
& SCTP_DATA_FRAG_MASK
) ==
1000 SCTP_DATA_MIDDLE_FRAG
) {
1002 * Insert chk MUST be a MIDDLE or LAST
1005 if ((chk
->rec
.data
.rcv_flags
& SCTP_DATA_FRAG_MASK
) ==
1006 SCTP_DATA_FIRST_FRAG
) {
1007 SCTPDBG(SCTP_DEBUG_INDATA1
, "Prev check - It can be a midlle or last but not a first\n");
1008 SCTPDBG(SCTP_DEBUG_INDATA1
, "Gak, Evil plot, it's a FIRST!\n");
1009 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
1010 0, M_DONTWAIT
, 1, MT_DATA
);
1012 struct sctp_paramhdr
*ph
;
1015 SCTP_BUF_LEN(oper
) =
1016 sizeof(struct sctp_paramhdr
) +
1017 (3 * sizeof(uint32_t));
1019 struct sctp_paramhdr
*);
1021 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
1023 htons(SCTP_BUF_LEN(oper
));
1024 ippp
= (uint32_t *) (ph
+ 1);
1025 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_6
);
1027 *ippp
= chk
->rec
.data
.TSN_seq
;
1029 *ippp
= ((chk
->rec
.data
.stream_number
<< 16) | chk
->rec
.data
.stream_seq
);
1032 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_6
;
1033 sctp_abort_an_association(stcb
->sctp_ep
,
1034 stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
1038 if (chk
->rec
.data
.stream_number
!=
1039 prev
->rec
.data
.stream_number
) {
1041 * Huh, need the correct STR here,
1042 * they must be the same.
1044 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1045 chk
->rec
.data
.stream_number
,
1046 prev
->rec
.data
.stream_number
);
1047 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
1048 0, M_DONTWAIT
, 1, MT_DATA
);
1050 struct sctp_paramhdr
*ph
;
1053 SCTP_BUF_LEN(oper
) =
1054 sizeof(struct sctp_paramhdr
) +
1055 (3 * sizeof(uint32_t));
1057 struct sctp_paramhdr
*);
1059 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
1061 htons(SCTP_BUF_LEN(oper
));
1062 ippp
= (uint32_t *) (ph
+ 1);
1063 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_7
);
1065 *ippp
= chk
->rec
.data
.TSN_seq
;
1067 *ippp
= ((chk
->rec
.data
.stream_number
<< 16) | chk
->rec
.data
.stream_seq
);
1069 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_7
;
1070 sctp_abort_an_association(stcb
->sctp_ep
,
1071 stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
1076 if ((prev
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
) == 0 &&
1077 chk
->rec
.data
.stream_seq
!=
1078 prev
->rec
.data
.stream_seq
) {
1080 * Huh, need the correct STR here,
1081 * they must be the same.
1083 SCTPDBG(SCTP_DEBUG_INDATA1
, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1084 chk
->rec
.data
.stream_seq
,
1085 prev
->rec
.data
.stream_seq
);
1086 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
1087 0, M_DONTWAIT
, 1, MT_DATA
);
1089 struct sctp_paramhdr
*ph
;
1092 SCTP_BUF_LEN(oper
) =
1093 sizeof(struct sctp_paramhdr
) +
1094 (3 * sizeof(uint32_t));
1096 struct sctp_paramhdr
*);
1098 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
1100 htons(SCTP_BUF_LEN(oper
));
1101 ippp
= (uint32_t *) (ph
+ 1);
1102 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_8
);
1104 *ippp
= chk
->rec
.data
.TSN_seq
;
1106 *ippp
= ((chk
->rec
.data
.stream_number
<< 16) | chk
->rec
.data
.stream_seq
);
1108 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_8
;
1109 sctp_abort_an_association(stcb
->sctp_ep
,
1110 stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
1115 } else if ((prev
->rec
.data
.rcv_flags
& SCTP_DATA_FRAG_MASK
) ==
1116 SCTP_DATA_LAST_FRAG
) {
1117 /* Insert chk MUST be a FIRST */
1118 if ((chk
->rec
.data
.rcv_flags
& SCTP_DATA_FRAG_MASK
) !=
1119 SCTP_DATA_FIRST_FRAG
) {
1120 SCTPDBG(SCTP_DEBUG_INDATA1
, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1121 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
1122 0, M_DONTWAIT
, 1, MT_DATA
);
1124 struct sctp_paramhdr
*ph
;
1127 SCTP_BUF_LEN(oper
) =
1128 sizeof(struct sctp_paramhdr
) +
1129 (3 * sizeof(uint32_t));
1131 struct sctp_paramhdr
*);
1133 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
1135 htons(SCTP_BUF_LEN(oper
));
1136 ippp
= (uint32_t *) (ph
+ 1);
1137 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_9
);
1139 *ippp
= chk
->rec
.data
.TSN_seq
;
1141 *ippp
= ((chk
->rec
.data
.stream_number
<< 16) | chk
->rec
.data
.stream_seq
);
1144 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_9
;
1145 sctp_abort_an_association(stcb
->sctp_ep
,
1146 stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
1155 post_tsn
= chk
->rec
.data
.TSN_seq
+ 1;
1156 if (post_tsn
== next
->rec
.data
.TSN_seq
) {
1158 * Ok the one I am inserting ahead of is my NEXT
1159 * one. A bit of valdiation here.
1161 if (next
->rec
.data
.rcv_flags
& SCTP_DATA_FIRST_FRAG
) {
1162 /* Insert chk MUST be a last fragment */
1163 if ((chk
->rec
.data
.rcv_flags
& SCTP_DATA_FRAG_MASK
)
1164 != SCTP_DATA_LAST_FRAG
) {
1165 SCTPDBG(SCTP_DEBUG_INDATA1
, "Next chk - Next is FIRST, we must be LAST\n");
1166 SCTPDBG(SCTP_DEBUG_INDATA1
, "Gak, Evil plot, its not a last!\n");
1167 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
1168 0, M_DONTWAIT
, 1, MT_DATA
);
1170 struct sctp_paramhdr
*ph
;
1173 SCTP_BUF_LEN(oper
) =
1174 sizeof(struct sctp_paramhdr
) +
1175 (3 * sizeof(uint32_t));
1177 struct sctp_paramhdr
*);
1179 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
1181 htons(SCTP_BUF_LEN(oper
));
1182 ippp
= (uint32_t *) (ph
+ 1);
1183 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_10
);
1185 *ippp
= chk
->rec
.data
.TSN_seq
;
1187 *ippp
= ((chk
->rec
.data
.stream_number
<< 16) | chk
->rec
.data
.stream_seq
);
1189 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_10
;
1190 sctp_abort_an_association(stcb
->sctp_ep
,
1191 stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
1196 } else if ((next
->rec
.data
.rcv_flags
& SCTP_DATA_FRAG_MASK
) ==
1197 SCTP_DATA_MIDDLE_FRAG
||
1198 (next
->rec
.data
.rcv_flags
& SCTP_DATA_FRAG_MASK
) ==
1199 SCTP_DATA_LAST_FRAG
) {
1201 * Insert chk CAN be MIDDLE or FIRST NOT
1204 if ((chk
->rec
.data
.rcv_flags
& SCTP_DATA_FRAG_MASK
) ==
1205 SCTP_DATA_LAST_FRAG
) {
1206 SCTPDBG(SCTP_DEBUG_INDATA1
, "Next chk - Next is a MIDDLE/LAST\n");
1207 SCTPDBG(SCTP_DEBUG_INDATA1
, "Gak, Evil plot, new prev chunk is a LAST\n");
1208 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
1209 0, M_DONTWAIT
, 1, MT_DATA
);
1211 struct sctp_paramhdr
*ph
;
1214 SCTP_BUF_LEN(oper
) =
1215 sizeof(struct sctp_paramhdr
) +
1216 (3 * sizeof(uint32_t));
1218 struct sctp_paramhdr
*);
1220 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
1222 htons(SCTP_BUF_LEN(oper
));
1223 ippp
= (uint32_t *) (ph
+ 1);
1224 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_11
);
1226 *ippp
= chk
->rec
.data
.TSN_seq
;
1228 *ippp
= ((chk
->rec
.data
.stream_number
<< 16) | chk
->rec
.data
.stream_seq
);
1231 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_11
;
1232 sctp_abort_an_association(stcb
->sctp_ep
,
1233 stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
1238 if (chk
->rec
.data
.stream_number
!=
1239 next
->rec
.data
.stream_number
) {
1241 * Huh, need the correct STR here,
1242 * they must be the same.
1244 SCTPDBG(SCTP_DEBUG_INDATA1
, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1245 chk
->rec
.data
.stream_number
,
1246 next
->rec
.data
.stream_number
);
1247 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
1248 0, M_DONTWAIT
, 1, MT_DATA
);
1250 struct sctp_paramhdr
*ph
;
1253 SCTP_BUF_LEN(oper
) =
1254 sizeof(struct sctp_paramhdr
) +
1255 (3 * sizeof(uint32_t));
1257 struct sctp_paramhdr
*);
1259 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
1261 htons(SCTP_BUF_LEN(oper
));
1262 ippp
= (uint32_t *) (ph
+ 1);
1263 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_12
);
1265 *ippp
= chk
->rec
.data
.TSN_seq
;
1267 *ippp
= ((chk
->rec
.data
.stream_number
<< 16) | chk
->rec
.data
.stream_seq
);
1270 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_12
;
1271 sctp_abort_an_association(stcb
->sctp_ep
,
1272 stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
1277 if ((next
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
) == 0 &&
1278 chk
->rec
.data
.stream_seq
!=
1279 next
->rec
.data
.stream_seq
) {
1281 * Huh, need the correct STR here,
1282 * they must be the same.
1284 SCTPDBG(SCTP_DEBUG_INDATA1
, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1285 chk
->rec
.data
.stream_seq
,
1286 next
->rec
.data
.stream_seq
);
1287 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
1288 0, M_DONTWAIT
, 1, MT_DATA
);
1290 struct sctp_paramhdr
*ph
;
1293 SCTP_BUF_LEN(oper
) =
1294 sizeof(struct sctp_paramhdr
) +
1295 (3 * sizeof(uint32_t));
1297 struct sctp_paramhdr
*);
1299 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
1301 htons(SCTP_BUF_LEN(oper
));
1302 ippp
= (uint32_t *) (ph
+ 1);
1303 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_13
);
1305 *ippp
= chk
->rec
.data
.TSN_seq
;
1307 *ippp
= ((chk
->rec
.data
.stream_number
<< 16) | chk
->rec
.data
.stream_seq
);
1309 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_13
;
1310 sctp_abort_an_association(stcb
->sctp_ep
,
1311 stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
1319 /* Do we need to do some delivery? check */
1320 sctp_deliver_reasm_check(stcb
, asoc
);
1324 * This is an unfortunate routine. It checks to make sure a evil guy is not
1325 * stuffing us full of bad packet fragments. A broken peer could also do this
1326 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1330 sctp_does_tsn_belong_to_reasm(struct sctp_association
*asoc
,
1333 struct sctp_tmit_chunk
*at
;
1336 TAILQ_FOREACH(at
, &asoc
->reasmqueue
, sctp_next
) {
1337 if (compare_with_wrap(TSN_seq
,
1338 at
->rec
.data
.TSN_seq
, MAX_TSN
)) {
1339 /* is it one bigger? */
1340 tsn_est
= at
->rec
.data
.TSN_seq
+ 1;
1341 if (tsn_est
== TSN_seq
) {
1342 /* yep. It better be a last then */
1343 if ((at
->rec
.data
.rcv_flags
& SCTP_DATA_FRAG_MASK
) !=
1344 SCTP_DATA_LAST_FRAG
) {
1346 * Ok this guy belongs next to a guy
1347 * that is NOT last, it should be a
1348 * middle/last, not a complete
1354 * This guy is ok since its a LAST
1355 * and the new chunk is a fully
1356 * self- contained one.
1361 } else if (TSN_seq
== at
->rec
.data
.TSN_seq
) {
1362 /* Software error since I have a dup? */
1366 * Ok, 'at' is larger than new chunk but does it
1367 * need to be right before it.
1369 tsn_est
= TSN_seq
+ 1;
1370 if (tsn_est
== at
->rec
.data
.TSN_seq
) {
1371 /* Yep, It better be a first */
1372 if ((at
->rec
.data
.rcv_flags
& SCTP_DATA_FRAG_MASK
) !=
1373 SCTP_DATA_FIRST_FRAG
) {
1386 sctp_process_a_data_chunk(struct sctp_tcb
*stcb
, struct sctp_association
*asoc
,
1387 struct mbuf
**m
, int offset
, struct sctp_data_chunk
*ch
, int chk_length
,
1388 struct sctp_nets
*net
, uint32_t * high_tsn
, int *abort_flag
,
1389 int *break_flag
, int last_chunk
)
1391 /* Process a data chunk */
1392 /* struct sctp_tmit_chunk *chk; */
1393 struct sctp_tmit_chunk
*chk
;
1397 int need_reasm_check
= 0;
1398 uint16_t strmno
, strmseq
;
1400 struct sctp_queued_to_read
*control
;
1402 uint32_t protocol_id
;
1403 uint8_t chunk_flags
;
1404 struct sctp_stream_reset_list
*liste
;
1407 tsn
= ntohl(ch
->dp
.tsn
);
1408 chunk_flags
= ch
->ch
.chunk_flags
;
1409 if ((chunk_flags
& SCTP_DATA_SACK_IMMEDIATELY
) == SCTP_DATA_SACK_IMMEDIATELY
) {
1410 asoc
->send_sack
= 1;
1412 protocol_id
= ch
->dp
.protocol_id
;
1413 ordered
= ((ch
->ch
.chunk_flags
& SCTP_DATA_UNORDERED
) == 0);
1414 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MAP_LOGGING_ENABLE
) {
1415 sctp_log_map(tsn
, asoc
->cumulative_tsn
, asoc
->highest_tsn_inside_map
, SCTP_MAP_TSN_ENTERS
);
1420 SCTP_LTRACE_CHK(stcb
->sctp_ep
, stcb
, ch
->ch
.chunk_type
, tsn
);
1421 if (compare_with_wrap(asoc
->cumulative_tsn
, tsn
, MAX_TSN
) ||
1422 asoc
->cumulative_tsn
== tsn
) {
1423 /* It is a duplicate */
1424 SCTP_STAT_INCR(sctps_recvdupdata
);
1425 if (asoc
->numduptsns
< SCTP_MAX_DUP_TSNS
) {
1426 /* Record a dup for the next outbound sack */
1427 asoc
->dup_tsns
[asoc
->numduptsns
] = tsn
;
1430 asoc
->send_sack
= 1;
1433 /* Calculate the number of TSN's between the base and this TSN */
1434 if (tsn
>= asoc
->mapping_array_base_tsn
) {
1435 gap
= tsn
- asoc
->mapping_array_base_tsn
;
1437 gap
= (MAX_TSN
- asoc
->mapping_array_base_tsn
) + tsn
+ 1;
1439 if (gap
>= (SCTP_MAPPING_ARRAY
<< 3)) {
1440 /* Can't hold the bit in the mapping at max array, toss it */
1443 if (gap
>= (uint32_t) (asoc
->mapping_array_size
<< 3)) {
1444 SCTP_TCB_LOCK_ASSERT(stcb
);
1445 if (sctp_expand_mapping_array(asoc
, gap
)) {
1446 /* Can't expand, drop it */
1450 if (compare_with_wrap(tsn
, *high_tsn
, MAX_TSN
)) {
1453 /* See if we have received this one already */
1454 if (SCTP_IS_TSN_PRESENT(asoc
->mapping_array
, gap
)) {
1455 SCTP_STAT_INCR(sctps_recvdupdata
);
1456 if (asoc
->numduptsns
< SCTP_MAX_DUP_TSNS
) {
1457 /* Record a dup for the next outbound sack */
1458 asoc
->dup_tsns
[asoc
->numduptsns
] = tsn
;
1461 asoc
->send_sack
= 1;
1465 * Check to see about the GONE flag, duplicates would cause a sack
1466 * to be sent up above
1468 if (((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
1469 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_ALLGONE
) ||
1470 (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
))
1473 * wait a minute, this guy is gone, there is no longer a
1474 * receiver. Send peer an ABORT!
1476 struct mbuf
*op_err
;
1478 op_err
= sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC
);
1479 sctp_abort_an_association(stcb
->sctp_ep
, stcb
, 0, op_err
, SCTP_SO_NOT_LOCKED
);
1484 * Now before going further we see if there is room. If NOT then we
1485 * MAY let one through only IF this TSN is the one we are waiting
1486 * for on a partial delivery API.
1489 /* now do the tests */
1490 if (((asoc
->cnt_on_all_streams
+
1491 asoc
->cnt_on_reasm_queue
+
1492 asoc
->cnt_msg_on_sb
) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue
)) ||
1493 (((int)asoc
->my_rwnd
) <= 0)) {
1495 * When we have NO room in the rwnd we check to make sure
1496 * the reader is doing its job...
1498 if (stcb
->sctp_socket
->so_rcv
.sb_cc
) {
1499 /* some to read, wake-up */
1500 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1503 so
= SCTP_INP_SO(stcb
->sctp_ep
);
1504 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
1505 SCTP_TCB_UNLOCK(stcb
);
1506 SCTP_SOCKET_LOCK(so
, 1);
1507 SCTP_TCB_LOCK(stcb
);
1508 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
1509 if (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
) {
1510 /* assoc was freed while we were unlocked */
1511 SCTP_SOCKET_UNLOCK(so
, 1);
1515 sctp_sorwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
1516 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1517 SCTP_SOCKET_UNLOCK(so
, 1);
1520 /* now is it in the mapping array of what we have accepted? */
1521 if (compare_with_wrap(tsn
, asoc
->highest_tsn_inside_map
, MAX_TSN
)) {
1522 /* Nope not in the valid range dump it */
1523 sctp_set_rwnd(stcb
, asoc
);
1524 if ((asoc
->cnt_on_all_streams
+
1525 asoc
->cnt_on_reasm_queue
+
1526 asoc
->cnt_msg_on_sb
) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue
)) {
1527 SCTP_STAT_INCR(sctps_datadropchklmt
);
1529 SCTP_STAT_INCR(sctps_datadroprwnd
);
1536 strmno
= ntohs(ch
->dp
.stream_id
);
1537 if (strmno
>= asoc
->streamincnt
) {
1538 struct sctp_paramhdr
*phdr
;
1541 mb
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) * 2),
1542 0, M_DONTWAIT
, 1, MT_DATA
);
1544 /* add some space up front so prepend will work well */
1545 SCTP_BUF_RESV_UF(mb
, sizeof(struct sctp_chunkhdr
));
1546 phdr
= mtod(mb
, struct sctp_paramhdr
*);
1548 * Error causes are just param's and this one has
1549 * two back to back phdr, one with the error type
1550 * and size, the other with the streamid and a rsvd
1552 SCTP_BUF_LEN(mb
) = (sizeof(struct sctp_paramhdr
) * 2);
1553 phdr
->param_type
= htons(SCTP_CAUSE_INVALID_STREAM
);
1554 phdr
->param_length
=
1555 htons(sizeof(struct sctp_paramhdr
) * 2);
1557 /* We insert the stream in the type field */
1558 phdr
->param_type
= ch
->dp
.stream_id
;
1559 /* And set the length to 0 for the rsvd field */
1560 phdr
->param_length
= 0;
1561 sctp_queue_op_err(stcb
, mb
);
1563 SCTP_STAT_INCR(sctps_badsid
);
1564 SCTP_TCB_LOCK_ASSERT(stcb
);
1565 SCTP_SET_TSN_PRESENT(asoc
->mapping_array
, gap
);
1566 if (compare_with_wrap(tsn
, asoc
->highest_tsn_inside_map
, MAX_TSN
)) {
1567 /* we have a new high score */
1568 asoc
->highest_tsn_inside_map
= tsn
;
1569 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MAP_LOGGING_ENABLE
) {
1570 sctp_log_map(0, 2, asoc
->highest_tsn_inside_map
, SCTP_MAP_SLIDE_RESULT
);
1573 if (tsn
== (asoc
->cumulative_tsn
+ 1)) {
1574 /* Update cum-ack */
1575 asoc
->cumulative_tsn
= tsn
;
1580 * Before we continue lets validate that we are not being fooled by
1581 * an evil attacker. We can only have 4k chunks based on our TSN
1582 * spread allowed by the mapping array 512 * 8 bits, so there is no
1583 * way our stream sequence numbers could have wrapped. We of course
1584 * only validate the FIRST fragment so the bit must be set.
1586 strmseq
= ntohs(ch
->dp
.stream_sequence
);
1587 #ifdef SCTP_ASOCLOG_OF_TSNS
1588 SCTP_TCB_LOCK_ASSERT(stcb
);
1589 if (asoc
->tsn_in_at
>= SCTP_TSN_LOG_SIZE
) {
1590 asoc
->tsn_in_at
= 0;
1591 asoc
->tsn_in_wrapped
= 1;
1593 asoc
->in_tsnlog
[asoc
->tsn_in_at
].tsn
= tsn
;
1594 asoc
->in_tsnlog
[asoc
->tsn_in_at
].strm
= strmno
;
1595 asoc
->in_tsnlog
[asoc
->tsn_in_at
].seq
= strmseq
;
1596 asoc
->in_tsnlog
[asoc
->tsn_in_at
].sz
= chk_length
;
1597 asoc
->in_tsnlog
[asoc
->tsn_in_at
].flgs
= chunk_flags
;
1598 asoc
->in_tsnlog
[asoc
->tsn_in_at
].stcb
= (void *)stcb
;
1599 asoc
->in_tsnlog
[asoc
->tsn_in_at
].in_pos
= asoc
->tsn_in_at
;
1600 asoc
->in_tsnlog
[asoc
->tsn_in_at
].in_out
= 1;
1603 if ((chunk_flags
& SCTP_DATA_FIRST_FRAG
) &&
1604 (TAILQ_EMPTY(&asoc
->resetHead
)) &&
1605 (chunk_flags
& SCTP_DATA_UNORDERED
) == 0 &&
1606 (compare_with_wrap(asoc
->strmin
[strmno
].last_sequence_delivered
,
1607 strmseq
, MAX_SEQ
) ||
1608 asoc
->strmin
[strmno
].last_sequence_delivered
== strmseq
)) {
1609 /* The incoming sseq is behind where we last delivered? */
1610 SCTPDBG(SCTP_DEBUG_INDATA1
, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1611 strmseq
, asoc
->strmin
[strmno
].last_sequence_delivered
);
1612 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
1613 0, M_DONTWAIT
, 1, MT_DATA
);
1615 struct sctp_paramhdr
*ph
;
1618 SCTP_BUF_LEN(oper
) = sizeof(struct sctp_paramhdr
) +
1619 (3 * sizeof(uint32_t));
1620 ph
= mtod(oper
, struct sctp_paramhdr
*);
1621 ph
->param_type
= htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
1622 ph
->param_length
= htons(SCTP_BUF_LEN(oper
));
1623 ippp
= (uint32_t *) (ph
+ 1);
1624 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_14
);
1628 *ippp
= ((strmno
<< 16) | strmseq
);
1631 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_14
;
1632 sctp_abort_an_association(stcb
->sctp_ep
, stcb
,
1633 SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
1637 /************************************
1638 * From here down we may find ch-> invalid
1639 * so its a good idea NOT to use it.
1640 *************************************/
1642 the_len
= (chk_length
- sizeof(struct sctp_data_chunk
));
1643 if (last_chunk
== 0) {
1644 dmbuf
= SCTP_M_COPYM(*m
,
1645 (offset
+ sizeof(struct sctp_data_chunk
)),
1646 the_len
, M_DONTWAIT
);
1647 #ifdef SCTP_MBUF_LOGGING
1648 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MBUF_LOGGING_ENABLE
) {
1653 if (SCTP_BUF_IS_EXTENDED(mat
)) {
1654 sctp_log_mb(mat
, SCTP_MBUF_ICOPY
);
1656 mat
= SCTP_BUF_NEXT(mat
);
1661 /* We can steal the last chunk */
1665 /* lop off the top part */
1666 m_adj(dmbuf
, (offset
+ sizeof(struct sctp_data_chunk
)));
1667 if (SCTP_BUF_NEXT(dmbuf
) == NULL
) {
1668 l_len
= SCTP_BUF_LEN(dmbuf
);
1671 * need to count up the size hopefully does not hit
1679 l_len
+= SCTP_BUF_LEN(lat
);
1680 lat
= SCTP_BUF_NEXT(lat
);
1683 if (l_len
> the_len
) {
1684 /* Trim the end round bytes off too */
1685 m_adj(dmbuf
, -(l_len
- the_len
));
1688 if (dmbuf
== NULL
) {
1689 SCTP_STAT_INCR(sctps_nomem
);
1692 if ((chunk_flags
& SCTP_DATA_NOT_FRAG
) == SCTP_DATA_NOT_FRAG
&&
1693 asoc
->fragmented_delivery_inprogress
== 0 &&
1694 TAILQ_EMPTY(&asoc
->resetHead
) &&
1696 ((asoc
->strmin
[strmno
].last_sequence_delivered
+ 1) == strmseq
&&
1697 TAILQ_EMPTY(&asoc
->strmin
[strmno
].inqueue
)))) {
1698 /* Candidate for express delivery */
1700 * Its not fragmented, No PD-API is up, Nothing in the
1701 * delivery queue, Its un-ordered OR ordered and the next to
1702 * deliver AND nothing else is stuck on the stream queue,
1703 * And there is room for it in the socket buffer. Lets just
1704 * stuff it up the buffer....
1707 /* It would be nice to avoid this copy if we could :< */
1708 sctp_alloc_a_readq(stcb
, control
);
1709 sctp_build_readq_entry_mac(control
, stcb
, asoc
->context
, net
, tsn
,
1715 if (control
== NULL
) {
1716 goto failed_express_del
;
1718 sctp_add_to_readq(stcb
->sctp_ep
, stcb
, control
, &stcb
->sctp_socket
->so_rcv
, 1, SCTP_SO_NOT_LOCKED
);
1719 if ((chunk_flags
& SCTP_DATA_UNORDERED
) == 0) {
1720 /* for ordered, bump what we delivered */
1721 asoc
->strmin
[strmno
].last_sequence_delivered
++;
1723 SCTP_STAT_INCR(sctps_recvexpress
);
1724 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_STR_LOGGING_ENABLE
) {
1725 sctp_log_strm_del_alt(stcb
, tsn
, strmseq
, strmno
,
1726 SCTP_STR_LOG_FROM_EXPRS_DEL
);
1729 goto finish_express_del
;
1732 /* If we reach here this is a new chunk */
1735 /* Express for fragmented delivery? */
1736 if ((asoc
->fragmented_delivery_inprogress
) &&
1737 (stcb
->asoc
.control_pdapi
) &&
1738 (asoc
->str_of_pdapi
== strmno
) &&
1739 (asoc
->ssn_of_pdapi
== strmseq
)
1741 control
= stcb
->asoc
.control_pdapi
;
1742 if ((chunk_flags
& SCTP_DATA_FIRST_FRAG
) == SCTP_DATA_FIRST_FRAG
) {
1743 /* Can't be another first? */
1744 goto failed_pdapi_express_del
;
1746 if (tsn
== (control
->sinfo_tsn
+ 1)) {
1747 /* Yep, we can add it on */
1751 if (chunk_flags
& SCTP_DATA_LAST_FRAG
) {
1754 cumack
= asoc
->cumulative_tsn
;
1755 if ((cumack
+ 1) == tsn
)
1758 if (sctp_append_to_readq(stcb
->sctp_ep
, stcb
, control
, dmbuf
, end
,
1760 &stcb
->sctp_socket
->so_rcv
)) {
1761 SCTP_PRINTF("Append fails end:%d\n", end
);
1762 goto failed_pdapi_express_del
;
1764 SCTP_STAT_INCR(sctps_recvexpressm
);
1765 control
->sinfo_tsn
= tsn
;
1766 asoc
->tsn_last_delivered
= tsn
;
1767 asoc
->fragment_flags
= chunk_flags
;
1768 asoc
->tsn_of_pdapi_last_delivered
= tsn
;
1769 asoc
->last_flags_delivered
= chunk_flags
;
1770 asoc
->last_strm_seq_delivered
= strmseq
;
1771 asoc
->last_strm_no_delivered
= strmno
;
1773 /* clean up the flags and such */
1774 asoc
->fragmented_delivery_inprogress
= 0;
1775 if ((chunk_flags
& SCTP_DATA_UNORDERED
) == 0) {
1776 asoc
->strmin
[strmno
].last_sequence_delivered
++;
1778 stcb
->asoc
.control_pdapi
= NULL
;
1779 if (TAILQ_EMPTY(&asoc
->reasmqueue
) == 0) {
1781 * There could be another message
1784 need_reasm_check
= 1;
1788 goto finish_express_del
;
1791 failed_pdapi_express_del
:
1793 if ((chunk_flags
& SCTP_DATA_NOT_FRAG
) != SCTP_DATA_NOT_FRAG
) {
1794 sctp_alloc_a_chunk(stcb
, chk
);
1796 /* No memory so we drop the chunk */
1797 SCTP_STAT_INCR(sctps_nomem
);
1798 if (last_chunk
== 0) {
1799 /* we copied it, free the copy */
1800 sctp_m_freem(dmbuf
);
1804 chk
->rec
.data
.TSN_seq
= tsn
;
1805 chk
->no_fr_allowed
= 0;
1806 chk
->rec
.data
.stream_seq
= strmseq
;
1807 chk
->rec
.data
.stream_number
= strmno
;
1808 chk
->rec
.data
.payloadtype
= protocol_id
;
1809 chk
->rec
.data
.context
= stcb
->asoc
.context
;
1810 chk
->rec
.data
.doing_fast_retransmit
= 0;
1811 chk
->rec
.data
.rcv_flags
= chunk_flags
;
1813 chk
->send_size
= the_len
;
1815 atomic_add_int(&net
->ref_count
, 1);
1818 sctp_alloc_a_readq(stcb
, control
);
1819 sctp_build_readq_entry_mac(control
, stcb
, asoc
->context
, net
, tsn
,
1825 if (control
== NULL
) {
1826 /* No memory so we drop the chunk */
1827 SCTP_STAT_INCR(sctps_nomem
);
1828 if (last_chunk
== 0) {
1829 /* we copied it, free the copy */
1830 sctp_m_freem(dmbuf
);
1834 control
->length
= the_len
;
1837 /* Mark it as received */
1838 /* Now queue it where it belongs */
1839 if (control
!= NULL
) {
1840 /* First a sanity check */
1841 if (asoc
->fragmented_delivery_inprogress
) {
1843 * Ok, we have a fragmented delivery in progress if
1844 * this chunk is next to deliver OR belongs in our
1845 * view to the reassembly, the peer is evil or
1848 uint32_t estimate_tsn
;
1850 estimate_tsn
= asoc
->tsn_last_delivered
+ 1;
1851 if (TAILQ_EMPTY(&asoc
->reasmqueue
) &&
1852 (estimate_tsn
== control
->sinfo_tsn
)) {
1853 /* Evil/Broke peer */
1854 sctp_m_freem(control
->data
);
1855 control
->data
= NULL
;
1856 if (control
->whoFrom
) {
1857 sctp_free_remote_addr(control
->whoFrom
);
1858 control
->whoFrom
= NULL
;
1860 sctp_free_a_readq(stcb
, control
);
1861 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
1862 0, M_DONTWAIT
, 1, MT_DATA
);
1864 struct sctp_paramhdr
*ph
;
1867 SCTP_BUF_LEN(oper
) =
1868 sizeof(struct sctp_paramhdr
) +
1869 (3 * sizeof(uint32_t));
1870 ph
= mtod(oper
, struct sctp_paramhdr
*);
1872 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
1873 ph
->param_length
= htons(SCTP_BUF_LEN(oper
));
1874 ippp
= (uint32_t *) (ph
+ 1);
1875 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_15
);
1879 *ippp
= ((strmno
<< 16) | strmseq
);
1881 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_15
;
1882 sctp_abort_an_association(stcb
->sctp_ep
, stcb
,
1883 SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
1888 if (sctp_does_tsn_belong_to_reasm(asoc
, control
->sinfo_tsn
)) {
1889 sctp_m_freem(control
->data
);
1890 control
->data
= NULL
;
1891 if (control
->whoFrom
) {
1892 sctp_free_remote_addr(control
->whoFrom
);
1893 control
->whoFrom
= NULL
;
1895 sctp_free_a_readq(stcb
, control
);
1897 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
1898 0, M_DONTWAIT
, 1, MT_DATA
);
1900 struct sctp_paramhdr
*ph
;
1903 SCTP_BUF_LEN(oper
) =
1904 sizeof(struct sctp_paramhdr
) +
1905 (3 * sizeof(uint32_t));
1907 struct sctp_paramhdr
*);
1909 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
1911 htons(SCTP_BUF_LEN(oper
));
1912 ippp
= (uint32_t *) (ph
+ 1);
1913 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_16
);
1917 *ippp
= ((strmno
<< 16) | strmseq
);
1919 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_16
;
1920 sctp_abort_an_association(stcb
->sctp_ep
,
1921 stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
1928 /* No PDAPI running */
1929 if (!TAILQ_EMPTY(&asoc
->reasmqueue
)) {
1931 * Reassembly queue is NOT empty validate
1932 * that this tsn does not need to be in
1933 * reasembly queue. If it does then our peer
1934 * is broken or evil.
1936 if (sctp_does_tsn_belong_to_reasm(asoc
, control
->sinfo_tsn
)) {
1937 sctp_m_freem(control
->data
);
1938 control
->data
= NULL
;
1939 if (control
->whoFrom
) {
1940 sctp_free_remote_addr(control
->whoFrom
);
1941 control
->whoFrom
= NULL
;
1943 sctp_free_a_readq(stcb
, control
);
1944 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
1945 0, M_DONTWAIT
, 1, MT_DATA
);
1947 struct sctp_paramhdr
*ph
;
1950 SCTP_BUF_LEN(oper
) =
1951 sizeof(struct sctp_paramhdr
) +
1952 (3 * sizeof(uint32_t));
1954 struct sctp_paramhdr
*);
1956 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
1958 htons(SCTP_BUF_LEN(oper
));
1959 ippp
= (uint32_t *) (ph
+ 1);
1960 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_17
);
1964 *ippp
= ((strmno
<< 16) | strmseq
);
1966 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_17
;
1967 sctp_abort_an_association(stcb
->sctp_ep
,
1968 stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
1975 /* ok, if we reach here we have passed the sanity checks */
1976 if (chunk_flags
& SCTP_DATA_UNORDERED
) {
1977 /* queue directly into socket buffer */
1978 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
1980 &stcb
->sctp_socket
->so_rcv
, 1, SCTP_SO_NOT_LOCKED
);
1983 * Special check for when streams are resetting. We
1984 * could be more smart about this and check the
1985 * actual stream to see if it is not being reset..
1986 * that way we would not create a HOLB when amongst
1987 * streams being reset and those not being reset.
1989 * We take complete messages that have a stream reset
1990 * intervening (aka the TSN is after where our
1991 * cum-ack needs to be) off and put them on a
1992 * pending_reply_queue. The reassembly ones we do
1993 * not have to worry about since they are all sorted
1994 * and proceessed by TSN order. It is only the
1995 * singletons I must worry about.
1997 if (((liste
= TAILQ_FIRST(&asoc
->resetHead
)) != NULL
) &&
1998 ((compare_with_wrap(tsn
, liste
->tsn
, MAX_TSN
)))
2001 * yep its past where we need to reset... go
2002 * ahead and queue it.
2004 if (TAILQ_EMPTY(&asoc
->pending_reply_queue
)) {
2006 TAILQ_INSERT_TAIL(&asoc
->pending_reply_queue
, control
, next
);
2008 struct sctp_queued_to_read
*ctlOn
;
2009 unsigned char inserted
= 0;
2011 ctlOn
= TAILQ_FIRST(&asoc
->pending_reply_queue
);
2013 if (compare_with_wrap(control
->sinfo_tsn
,
2014 ctlOn
->sinfo_tsn
, MAX_TSN
)) {
2015 ctlOn
= TAILQ_NEXT(ctlOn
, next
);
2018 TAILQ_INSERT_BEFORE(ctlOn
, control
, next
);
2023 if (inserted
== 0) {
2025 * must be put at end, use
2026 * prevP (all setup from
2027 * loop) to setup nextP.
2029 TAILQ_INSERT_TAIL(&asoc
->pending_reply_queue
, control
, next
);
2033 sctp_queue_data_to_stream(stcb
, asoc
, control
, abort_flag
);
2040 /* Into the re-assembly queue */
2041 sctp_queue_data_for_reasm(stcb
, asoc
, chk
, abort_flag
);
2044 * the assoc is now gone and chk was put onto the
2045 * reasm queue, which has all been freed.
2052 if (compare_with_wrap(tsn
, asoc
->highest_tsn_inside_map
, MAX_TSN
)) {
2053 /* we have a new high score */
2054 asoc
->highest_tsn_inside_map
= tsn
;
2055 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MAP_LOGGING_ENABLE
) {
2056 sctp_log_map(0, 2, asoc
->highest_tsn_inside_map
, SCTP_MAP_SLIDE_RESULT
);
2059 if (tsn
== (asoc
->cumulative_tsn
+ 1)) {
2060 /* Update cum-ack */
2061 asoc
->cumulative_tsn
= tsn
;
2067 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks
);
2069 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks
);
2071 SCTP_STAT_INCR(sctps_recvdata
);
2072 /* Set it present please */
2073 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_STR_LOGGING_ENABLE
) {
2074 sctp_log_strm_del_alt(stcb
, tsn
, strmseq
, strmno
, SCTP_STR_LOG_FROM_MARK_TSN
);
2076 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MAP_LOGGING_ENABLE
) {
2077 sctp_log_map(asoc
->mapping_array_base_tsn
, asoc
->cumulative_tsn
,
2078 asoc
->highest_tsn_inside_map
, SCTP_MAP_PREPARE_SLIDE
);
2080 SCTP_TCB_LOCK_ASSERT(stcb
);
2081 SCTP_SET_TSN_PRESENT(asoc
->mapping_array
, gap
);
2082 /* check the special flag for stream resets */
2083 if (((liste
= TAILQ_FIRST(&asoc
->resetHead
)) != NULL
) &&
2084 ((compare_with_wrap(asoc
->cumulative_tsn
, liste
->tsn
, MAX_TSN
)) ||
2085 (asoc
->cumulative_tsn
== liste
->tsn
))
2088 * we have finished working through the backlogged TSN's now
2089 * time to reset streams. 1: call reset function. 2: free
2090 * pending_reply space 3: distribute any chunks in
2091 * pending_reply_queue.
2093 struct sctp_queued_to_read
*ctl
;
2095 sctp_reset_in_stream(stcb
, liste
->number_entries
, liste
->req
.list_of_streams
);
2096 TAILQ_REMOVE(&asoc
->resetHead
, liste
, next_resp
);
2097 SCTP_FREE(liste
, SCTP_M_STRESET
);
2098 /* sa_ignore FREED_MEMORY */
2099 liste
= TAILQ_FIRST(&asoc
->resetHead
);
2100 ctl
= TAILQ_FIRST(&asoc
->pending_reply_queue
);
2101 if (ctl
&& (liste
== NULL
)) {
2102 /* All can be removed */
2104 TAILQ_REMOVE(&asoc
->pending_reply_queue
, ctl
, next
);
2105 sctp_queue_data_to_stream(stcb
, asoc
, ctl
, abort_flag
);
2109 ctl
= TAILQ_FIRST(&asoc
->pending_reply_queue
);
2112 /* more than one in queue */
2113 while (!compare_with_wrap(ctl
->sinfo_tsn
, liste
->tsn
, MAX_TSN
)) {
2115 * if ctl->sinfo_tsn is <= liste->tsn we can
2116 * process it which is the NOT of
2117 * ctl->sinfo_tsn > liste->tsn
2119 TAILQ_REMOVE(&asoc
->pending_reply_queue
, ctl
, next
);
2120 sctp_queue_data_to_stream(stcb
, asoc
, ctl
, abort_flag
);
2124 ctl
= TAILQ_FIRST(&asoc
->pending_reply_queue
);
2128 * Now service re-assembly to pick up anything that has been
2129 * held on reassembly queue?
2131 sctp_deliver_reasm_check(stcb
, asoc
);
2132 need_reasm_check
= 0;
2134 if (need_reasm_check
) {
2135 /* Another one waits ? */
2136 sctp_deliver_reasm_check(stcb
, asoc
);
2141 int8_t sctp_map_lookup_tab
[256] = {
2142 -1, 0, -1, 1, -1, 0, -1, 2,
2143 -1, 0, -1, 1, -1, 0, -1, 3,
2144 -1, 0, -1, 1, -1, 0, -1, 2,
2145 -1, 0, -1, 1, -1, 0, -1, 4,
2146 -1, 0, -1, 1, -1, 0, -1, 2,
2147 -1, 0, -1, 1, -1, 0, -1, 3,
2148 -1, 0, -1, 1, -1, 0, -1, 2,
2149 -1, 0, -1, 1, -1, 0, -1, 5,
2150 -1, 0, -1, 1, -1, 0, -1, 2,
2151 -1, 0, -1, 1, -1, 0, -1, 3,
2152 -1, 0, -1, 1, -1, 0, -1, 2,
2153 -1, 0, -1, 1, -1, 0, -1, 4,
2154 -1, 0, -1, 1, -1, 0, -1, 2,
2155 -1, 0, -1, 1, -1, 0, -1, 3,
2156 -1, 0, -1, 1, -1, 0, -1, 2,
2157 -1, 0, -1, 1, -1, 0, -1, 6,
2158 -1, 0, -1, 1, -1, 0, -1, 2,
2159 -1, 0, -1, 1, -1, 0, -1, 3,
2160 -1, 0, -1, 1, -1, 0, -1, 2,
2161 -1, 0, -1, 1, -1, 0, -1, 4,
2162 -1, 0, -1, 1, -1, 0, -1, 2,
2163 -1, 0, -1, 1, -1, 0, -1, 3,
2164 -1, 0, -1, 1, -1, 0, -1, 2,
2165 -1, 0, -1, 1, -1, 0, -1, 5,
2166 -1, 0, -1, 1, -1, 0, -1, 2,
2167 -1, 0, -1, 1, -1, 0, -1, 3,
2168 -1, 0, -1, 1, -1, 0, -1, 2,
2169 -1, 0, -1, 1, -1, 0, -1, 4,
2170 -1, 0, -1, 1, -1, 0, -1, 2,
2171 -1, 0, -1, 1, -1, 0, -1, 3,
2172 -1, 0, -1, 1, -1, 0, -1, 2,
2173 -1, 0, -1, 1, -1, 0, -1, 7,
2178 sctp_sack_check(struct sctp_tcb
*stcb
, int ok_to_sack
, int was_a_gap
, int *abort_flag
)
2181 * Now we also need to check the mapping array in a couple of ways.
2182 * 1) Did we move the cum-ack point?
2184 struct sctp_association
*asoc
;
2186 int last_all_ones
= 0;
2187 int slide_from
, slide_end
, lgap
, distance
;
2188 uint32_t old_cumack
, old_base
, old_highest
;
2189 unsigned char aux_array
[64];
2195 old_cumack
= asoc
->cumulative_tsn
;
2196 old_base
= asoc
->mapping_array_base_tsn
;
2197 old_highest
= asoc
->highest_tsn_inside_map
;
2198 if (asoc
->mapping_array_size
< 64)
2199 memcpy(aux_array
, asoc
->mapping_array
,
2200 asoc
->mapping_array_size
);
2202 memcpy(aux_array
, asoc
->mapping_array
, 64);
2205 * We could probably improve this a small bit by calculating the
2206 * offset of the current cum-ack as the starting point.
2209 for (slide_from
= 0; slide_from
< stcb
->asoc
.mapping_array_size
; slide_from
++) {
2211 if (asoc
->mapping_array
[slide_from
] == 0xff) {
2215 /* there is a 0 bit */
2216 at
+= sctp_map_lookup_tab
[asoc
->mapping_array
[slide_from
]];
2221 asoc
->cumulative_tsn
= asoc
->mapping_array_base_tsn
+ (at
- last_all_ones
);
2222 /* at is one off, since in the table a embedded -1 is present */
2225 if (compare_with_wrap(asoc
->cumulative_tsn
,
2226 asoc
->highest_tsn_inside_map
,
2229 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2230 asoc
->cumulative_tsn
, asoc
->highest_tsn_inside_map
);
2232 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2233 asoc
->cumulative_tsn
, asoc
->highest_tsn_inside_map
);
2234 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MAP_LOGGING_ENABLE
) {
2235 sctp_log_map(0, 6, asoc
->highest_tsn_inside_map
, SCTP_MAP_SLIDE_RESULT
);
2237 asoc
->highest_tsn_inside_map
= asoc
->cumulative_tsn
;
2240 if ((asoc
->cumulative_tsn
== asoc
->highest_tsn_inside_map
) && (at
>= 8)) {
2241 /* The complete array was completed by a single FR */
2242 /* higest becomes the cum-ack */
2245 asoc
->cumulative_tsn
= asoc
->highest_tsn_inside_map
;
2246 /* clear the array */
2247 clr
= (at
>> 3) + 1;
2248 if (clr
> asoc
->mapping_array_size
) {
2249 clr
= asoc
->mapping_array_size
;
2251 memset(asoc
->mapping_array
, 0, clr
);
2252 /* base becomes one ahead of the cum-ack */
2253 asoc
->mapping_array_base_tsn
= asoc
->cumulative_tsn
+ 1;
2254 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MAP_LOGGING_ENABLE
) {
2255 sctp_log_map(old_base
, old_cumack
, old_highest
,
2256 SCTP_MAP_PREPARE_SLIDE
);
2257 sctp_log_map(asoc
->mapping_array_base_tsn
, asoc
->cumulative_tsn
,
2258 asoc
->highest_tsn_inside_map
, SCTP_MAP_SLIDE_CLEARED
);
2260 } else if (at
>= 8) {
2261 /* we can slide the mapping array down */
2262 /* slide_from holds where we hit the first NON 0xff byte */
2265 * now calculate the ceiling of the move using our highest
2268 if (asoc
->highest_tsn_inside_map
>= asoc
->mapping_array_base_tsn
) {
2269 lgap
= asoc
->highest_tsn_inside_map
-
2270 asoc
->mapping_array_base_tsn
;
2272 lgap
= (MAX_TSN
- asoc
->mapping_array_base_tsn
) +
2273 asoc
->highest_tsn_inside_map
+ 1;
2275 slide_end
= lgap
>> 3;
2276 if (slide_end
< slide_from
) {
2278 panic("impossible slide");
2280 printf("impossible slide?\n");
2284 if (slide_end
> asoc
->mapping_array_size
) {
2286 panic("would overrun buffer");
2288 printf("Gak, would have overrun map end:%d slide_end:%d\n",
2289 asoc
->mapping_array_size
, slide_end
);
2290 slide_end
= asoc
->mapping_array_size
;
2293 distance
= (slide_end
- slide_from
) + 1;
2294 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MAP_LOGGING_ENABLE
) {
2295 sctp_log_map(old_base
, old_cumack
, old_highest
,
2296 SCTP_MAP_PREPARE_SLIDE
);
2297 sctp_log_map((uint32_t) slide_from
, (uint32_t) slide_end
,
2298 (uint32_t) lgap
, SCTP_MAP_SLIDE_FROM
);
2300 if (distance
+ slide_from
> asoc
->mapping_array_size
||
2303 * Here we do NOT slide forward the array so that
2304 * hopefully when more data comes in to fill it up
2305 * we will be able to slide it forward. Really I
2306 * don't think this should happen :-0
2309 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MAP_LOGGING_ENABLE
) {
2310 sctp_log_map((uint32_t) distance
, (uint32_t) slide_from
,
2311 (uint32_t) asoc
->mapping_array_size
,
2312 SCTP_MAP_SLIDE_NONE
);
2317 for (ii
= 0; ii
< distance
; ii
++) {
2318 asoc
->mapping_array
[ii
] =
2319 asoc
->mapping_array
[slide_from
+ ii
];
2321 for (ii
= distance
; ii
<= slide_end
; ii
++) {
2322 asoc
->mapping_array
[ii
] = 0;
2324 asoc
->mapping_array_base_tsn
+= (slide_from
<< 3);
2325 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MAP_LOGGING_ENABLE
) {
2326 sctp_log_map(asoc
->mapping_array_base_tsn
,
2327 asoc
->cumulative_tsn
, asoc
->highest_tsn_inside_map
,
2328 SCTP_MAP_SLIDE_RESULT
);
2333 * Now we need to see if we need to queue a sack or just start the
2334 * timer (if allowed).
2337 if (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) {
2339 * Ok special case, in SHUTDOWN-SENT case. here we
2340 * maker sure SACK timer is off and instead send a
2341 * SHUTDOWN and a SACK
2343 if (SCTP_OS_TIMER_PENDING(&stcb
->asoc
.dack_timer
.timer
)) {
2344 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
,
2345 stcb
->sctp_ep
, stcb
, NULL
, SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_18
);
2347 sctp_send_shutdown(stcb
, stcb
->asoc
.primary_destination
);
2348 sctp_send_sack(stcb
);
2352 /* is there a gap now ? */
2353 is_a_gap
= compare_with_wrap(stcb
->asoc
.highest_tsn_inside_map
,
2354 stcb
->asoc
.cumulative_tsn
, MAX_TSN
);
2357 * CMT DAC algorithm: increase number of packets
2358 * received since last ack
2360 stcb
->asoc
.cmt_dac_pkts_rcvd
++;
2362 if ((stcb
->asoc
.send_sack
== 1) || /* We need to send a
2364 ((was_a_gap
) && (is_a_gap
== 0)) || /* was a gap, but no
2366 (stcb
->asoc
.numduptsns
) || /* we have dup's */
2367 (is_a_gap
) || /* is still a gap */
2368 (stcb
->asoc
.delayed_ack
== 0) || /* Delayed sack disabled */
2369 (stcb
->asoc
.data_pkts_seen
>= stcb
->asoc
.sack_freq
) /* hit limit of pkts */
2372 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off
)) &&
2373 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac
)) &&
2374 (stcb
->asoc
.send_sack
== 0) &&
2375 (stcb
->asoc
.numduptsns
== 0) &&
2376 (stcb
->asoc
.delayed_ack
) &&
2377 (!SCTP_OS_TIMER_PENDING(&stcb
->asoc
.dack_timer
.timer
))) {
2380 * CMT DAC algorithm: With CMT,
2381 * delay acks even in the face of
2383 * reordering. Therefore, if acks that
2384 * do not have to be sent because of
2385 * the above reasons, will be
2386 * delayed. That is, acks that would
2387 * have been sent due to gap reports
2388 * will be delayed with DAC. Start
2389 * the delayed ack timer.
2391 sctp_timer_start(SCTP_TIMER_TYPE_RECV
,
2392 stcb
->sctp_ep
, stcb
, NULL
);
2395 * Ok we must build a SACK since the
2396 * timer is pending, we got our
2397 * first packet OR there are gaps or
2400 (void)SCTP_OS_TIMER_STOP(&stcb
->asoc
.dack_timer
.timer
);
2401 sctp_send_sack(stcb
);
2404 if (!SCTP_OS_TIMER_PENDING(&stcb
->asoc
.dack_timer
.timer
)) {
2405 sctp_timer_start(SCTP_TIMER_TYPE_RECV
,
2406 stcb
->sctp_ep
, stcb
, NULL
);
2414 sctp_service_queues(struct sctp_tcb
*stcb
, struct sctp_association
*asoc
)
2416 struct sctp_tmit_chunk
*chk
;
2420 if (asoc
->fragmented_delivery_inprogress
) {
2421 sctp_service_reassembly(stcb
, asoc
);
2423 /* Can we proceed further, i.e. the PD-API is complete */
2424 if (asoc
->fragmented_delivery_inprogress
) {
2429 * Now is there some other chunk I can deliver from the reassembly
2433 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
2435 asoc
->size_on_reasm_queue
= 0;
2436 asoc
->cnt_on_reasm_queue
= 0;
2439 nxt_todel
= asoc
->strmin
[chk
->rec
.data
.stream_number
].last_sequence_delivered
+ 1;
2440 if ((chk
->rec
.data
.rcv_flags
& SCTP_DATA_FIRST_FRAG
) &&
2441 ((nxt_todel
== chk
->rec
.data
.stream_seq
) ||
2442 (chk
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
))) {
2444 * Yep the first one is here. We setup to start reception,
2445 * by backing down the TSN just in case we can't deliver.
2449 * Before we start though either all of the message should
2450 * be here or 1/4 the socket buffer max or nothing on the
2451 * delivery queue and something can be delivered.
2453 if ((sctp_is_all_msg_on_reasm(asoc
, &tsize
) ||
2454 (tsize
>= stcb
->sctp_ep
->partial_delivery_point
))) {
2455 asoc
->fragmented_delivery_inprogress
= 1;
2456 asoc
->tsn_last_delivered
= chk
->rec
.data
.TSN_seq
- 1;
2457 asoc
->str_of_pdapi
= chk
->rec
.data
.stream_number
;
2458 asoc
->ssn_of_pdapi
= chk
->rec
.data
.stream_seq
;
2459 asoc
->pdapi_ppid
= chk
->rec
.data
.payloadtype
;
2460 asoc
->fragment_flags
= chk
->rec
.data
.rcv_flags
;
2461 sctp_service_reassembly(stcb
, asoc
);
2462 if (asoc
->fragmented_delivery_inprogress
== 0) {
2470 sctp_process_data(struct mbuf
**mm
, int iphlen
, int *offset
, int length
,
2471 struct sctphdr
*sh
, struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
2472 struct sctp_nets
*net
, uint32_t * high_tsn
)
2474 struct sctp_data_chunk
*ch
, chunk_buf
;
2475 struct sctp_association
*asoc
;
2476 int num_chunks
= 0; /* number of control chunks processed */
2478 int chk_length
, break_flag
, last_chunk
;
2479 int abort_flag
= 0, was_a_gap
= 0;
2483 sctp_set_rwnd(stcb
, &stcb
->asoc
);
2486 SCTP_TCB_LOCK_ASSERT(stcb
);
2488 if (compare_with_wrap(stcb
->asoc
.highest_tsn_inside_map
,
2489 stcb
->asoc
.cumulative_tsn
, MAX_TSN
)) {
2490 /* there was a gap before this data was processed */
2494 * setup where we got the last DATA packet from for any SACK that
2495 * may need to go out. Don't bump the net. This is done ONLY when a
2496 * chunk is assigned.
2498 asoc
->last_data_chunk_from
= net
;
2501 * Now before we proceed we must figure out if this is a wasted
2502 * cluster... i.e. it is a small packet sent in and yet the driver
2503 * underneath allocated a full cluster for it. If so we must copy it
2504 * to a smaller mbuf and free up the cluster mbuf. This will help
2505 * with cluster starvation. Note for __Panda__ we don't do this
2506 * since it has clusters all the way down to 64 bytes.
2508 if (SCTP_BUF_LEN(m
) < (long)MLEN
&& SCTP_BUF_NEXT(m
) == NULL
) {
2509 /* we only handle mbufs that are singletons.. not chains */
2510 m
= sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m
), 0, M_DONTWAIT
, 1, MT_DATA
);
2512 /* ok lets see if we can copy the data up */
2515 /* get the pointers and copy */
2516 to
= mtod(m
, caddr_t
*);
2517 from
= mtod((*mm
), caddr_t
*);
2518 memcpy(to
, from
, SCTP_BUF_LEN((*mm
)));
2519 /* copy the length and free up the old */
2520 SCTP_BUF_LEN(m
) = SCTP_BUF_LEN((*mm
));
2522 /* sucess, back copy */
2525 /* We are in trouble in the mbuf world .. yikes */
2529 /* get pointer to the first chunk header */
2530 ch
= (struct sctp_data_chunk
*)sctp_m_getptr(m
, *offset
,
2531 sizeof(struct sctp_data_chunk
), (uint8_t *) & chunk_buf
);
2536 * process all DATA chunks...
2538 *high_tsn
= asoc
->cumulative_tsn
;
2540 asoc
->data_pkts_seen
++;
2541 while (stop_proc
== 0) {
2542 /* validate chunk length */
2543 chk_length
= ntohs(ch
->ch
.chunk_length
);
2544 if (length
- *offset
< chk_length
) {
2545 /* all done, mutulated chunk */
2549 if (ch
->ch
.chunk_type
== SCTP_DATA
) {
2550 if ((size_t)chk_length
< sizeof(struct sctp_data_chunk
) + 1) {
2552 * Need to send an abort since we had a
2553 * invalid data chunk.
2555 struct mbuf
*op_err
;
2557 op_err
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 2 * sizeof(uint32_t)),
2558 0, M_DONTWAIT
, 1, MT_DATA
);
2561 struct sctp_paramhdr
*ph
;
2564 SCTP_BUF_LEN(op_err
) = sizeof(struct sctp_paramhdr
) +
2565 (2 * sizeof(uint32_t));
2566 ph
= mtod(op_err
, struct sctp_paramhdr
*);
2568 htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
2569 ph
->param_length
= htons(SCTP_BUF_LEN(op_err
));
2570 ippp
= (uint32_t *) (ph
+ 1);
2571 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_19
);
2573 *ippp
= asoc
->cumulative_tsn
;
2576 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_19
;
2577 sctp_abort_association(inp
, stcb
, m
, iphlen
, sh
,
2578 op_err
, 0, net
->port
);
2581 #ifdef SCTP_AUDITING_ENABLED
2582 sctp_audit_log(0xB1, 0);
2584 if (SCTP_SIZE32(chk_length
) == (length
- *offset
)) {
2589 if (sctp_process_a_data_chunk(stcb
, asoc
, mm
, *offset
, ch
,
2590 chk_length
, net
, high_tsn
, &abort_flag
, &break_flag
,
2599 * Set because of out of rwnd space and no
2600 * drop rep space left.
2606 /* not a data chunk in the data region */
2607 switch (ch
->ch
.chunk_type
) {
2608 case SCTP_INITIATION
:
2609 case SCTP_INITIATION_ACK
:
2610 case SCTP_SELECTIVE_ACK
:
2611 case SCTP_HEARTBEAT_REQUEST
:
2612 case SCTP_HEARTBEAT_ACK
:
2613 case SCTP_ABORT_ASSOCIATION
:
2615 case SCTP_SHUTDOWN_ACK
:
2616 case SCTP_OPERATION_ERROR
:
2617 case SCTP_COOKIE_ECHO
:
2618 case SCTP_COOKIE_ACK
:
2621 case SCTP_SHUTDOWN_COMPLETE
:
2622 case SCTP_AUTHENTICATION
:
2623 case SCTP_ASCONF_ACK
:
2624 case SCTP_PACKET_DROPPED
:
2625 case SCTP_STREAM_RESET
:
2626 case SCTP_FORWARD_CUM_TSN
:
2629 * Now, what do we do with KNOWN chunks that
2630 * are NOT in the right place?
2632 * For now, I do nothing but ignore them. We
2633 * may later want to add sysctl stuff to
2634 * switch out and do either an ABORT() or
2635 * possibly process them.
2637 if (SCTP_BASE_SYSCTL(sctp_strict_data_order
)) {
2638 struct mbuf
*op_err
;
2640 op_err
= sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION
);
2641 sctp_abort_association(inp
, stcb
, m
, iphlen
, sh
, op_err
, 0, net
->port
);
2646 /* unknown chunk type, use bit rules */
2647 if (ch
->ch
.chunk_type
& 0x40) {
2648 /* Add a error report to the queue */
2650 struct sctp_paramhdr
*phd
;
2652 merr
= sctp_get_mbuf_for_msg(sizeof(*phd
), 0, M_DONTWAIT
, 1, MT_DATA
);
2654 phd
= mtod(merr
, struct sctp_paramhdr
*);
2656 * We cheat and use param
2657 * type since we did not
2658 * bother to define a error
2659 * cause struct. They are
2660 * the same basic format
2661 * with different names.
2664 htons(SCTP_CAUSE_UNRECOG_CHUNK
);
2666 htons(chk_length
+ sizeof(*phd
));
2667 SCTP_BUF_LEN(merr
) = sizeof(*phd
);
2668 SCTP_BUF_NEXT(merr
) = SCTP_M_COPYM(m
, *offset
,
2669 SCTP_SIZE32(chk_length
),
2671 if (SCTP_BUF_NEXT(merr
)) {
2672 sctp_queue_op_err(stcb
, merr
);
2678 if ((ch
->ch
.chunk_type
& 0x80) == 0) {
2679 /* discard the rest of this packet */
2681 } /* else skip this bad chunk and
2684 }; /* switch of chunk type */
2686 *offset
+= SCTP_SIZE32(chk_length
);
2687 if ((*offset
>= length
) || stop_proc
) {
2688 /* no more data left in the mbuf chain */
2692 ch
= (struct sctp_data_chunk
*)sctp_m_getptr(m
, *offset
,
2693 sizeof(struct sctp_data_chunk
), (uint8_t *) & chunk_buf
);
2703 * we need to report rwnd overrun drops.
2705 sctp_send_packet_dropped(stcb
, net
, *mm
, iphlen
, 0);
2709 * Did we get data, if so update the time for auto-close and
2710 * give peer credit for being alive.
2712 SCTP_STAT_INCR(sctps_recvpktwithdata
);
2713 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_THRESHOLD_LOGGING
) {
2714 sctp_misc_ints(SCTP_THRESHOLD_CLEAR
,
2715 stcb
->asoc
.overall_error_count
,
2717 SCTP_FROM_SCTP_INDATA
,
2720 stcb
->asoc
.overall_error_count
= 0;
2721 (void)SCTP_GETTIME_TIMEVAL(&stcb
->asoc
.time_last_rcvd
);
2723 /* now service all of the reassm queue if needed */
2724 if (!(TAILQ_EMPTY(&asoc
->reasmqueue
)))
2725 sctp_service_queues(stcb
, asoc
);
2727 if (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_SENT
) {
2728 /* Assure that we ack right away */
2729 stcb
->asoc
.send_sack
= 1;
2731 /* Start a sack timer or QUEUE a SACK for sending */
2732 if ((stcb
->asoc
.cumulative_tsn
== stcb
->asoc
.highest_tsn_inside_map
) &&
2733 (stcb
->asoc
.mapping_array
[0] != 0xff)) {
2734 if ((stcb
->asoc
.data_pkts_seen
>= stcb
->asoc
.sack_freq
) ||
2735 (stcb
->asoc
.delayed_ack
== 0) ||
2736 (stcb
->asoc
.numduptsns
) ||
2737 (stcb
->asoc
.send_sack
== 1)) {
2738 if (SCTP_OS_TIMER_PENDING(&stcb
->asoc
.dack_timer
.timer
)) {
2739 (void)SCTP_OS_TIMER_STOP(&stcb
->asoc
.dack_timer
.timer
);
2741 sctp_send_sack(stcb
);
2743 if (!SCTP_OS_TIMER_PENDING(&stcb
->asoc
.dack_timer
.timer
)) {
2744 sctp_timer_start(SCTP_TIMER_TYPE_RECV
,
2745 stcb
->sctp_ep
, stcb
, NULL
);
2749 sctp_sack_check(stcb
, 1, was_a_gap
, &abort_flag
);
2758 sctp_handle_segments(struct mbuf
*m
, int *offset
, struct sctp_tcb
*stcb
, struct sctp_association
*asoc
,
2759 struct sctp_sack_chunk
*ch
, uint32_t last_tsn
, uint32_t * biggest_tsn_acked
,
2760 uint32_t * biggest_newly_acked_tsn
, uint32_t * this_sack_lowest_newack
,
2761 int num_seg
, int *ecn_seg_sums
)
2763 /************************************************/
2764 /* process fragments and update sendqueue */
2765 /************************************************/
2766 struct sctp_sack
*sack
;
2767 struct sctp_gap_ack_block
*frag
, block
;
2768 struct sctp_tmit_chunk
*tp1
;
2773 uint16_t frag_strt
, frag_end
, primary_flag_set
;
2774 u_long last_frag_high
;
2777 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
2779 if (asoc
->primary_destination
->dest_state
& SCTP_ADDR_SWITCH_PRIMARY
) {
2780 primary_flag_set
= 1;
2782 primary_flag_set
= 0;
2786 frag
= (struct sctp_gap_ack_block
*)sctp_m_getptr(m
, *offset
,
2787 sizeof(struct sctp_gap_ack_block
), (uint8_t *) & block
);
2788 *offset
+= sizeof(block
);
2794 for (i
= 0; i
< num_seg
; i
++) {
2795 frag_strt
= ntohs(frag
->start
);
2796 frag_end
= ntohs(frag
->end
);
2797 /* some sanity checks on the fargment offsets */
2798 if (frag_strt
> frag_end
) {
2799 /* this one is malformed, skip */
2803 if (compare_with_wrap((frag_end
+ last_tsn
), *biggest_tsn_acked
,
2805 *biggest_tsn_acked
= frag_end
+ last_tsn
;
2807 /* mark acked dgs and find out the highestTSN being acked */
2809 tp1
= TAILQ_FIRST(&asoc
->sent_queue
);
2811 /* save the locations of the last frags */
2812 last_frag_high
= frag_end
+ last_tsn
;
2815 * now lets see if we need to reset the queue due to
2816 * a out-of-order SACK fragment
2818 if (compare_with_wrap(frag_strt
+ last_tsn
,
2819 last_frag_high
, MAX_TSN
)) {
2821 * if the new frag starts after the last TSN
2822 * frag covered, we are ok and this one is
2823 * beyond the last one
2828 * ok, they have reset us, so we need to
2829 * reset the queue this will cause extra
2830 * hunting but hey, they chose the
2831 * performance hit when they failed to order
2834 tp1
= TAILQ_FIRST(&asoc
->sent_queue
);
2836 last_frag_high
= frag_end
+ last_tsn
;
2838 for (j
= frag_strt
+ last_tsn
; (compare_with_wrap((frag_end
+ last_tsn
), j
, MAX_TSN
)); j
++) {
2840 if (tp1
->rec
.data
.doing_fast_retransmit
)
2844 * CMT: CUCv2 algorithm. For each TSN being
2845 * processed from the sent queue, track the
2846 * next expected pseudo-cumack, or
2847 * rtx_pseudo_cumack, if required. Separate
2848 * cumack trackers for first transmissions,
2849 * and retransmissions.
2851 if ((tp1
->whoTo
->find_pseudo_cumack
== 1) && (tp1
->sent
< SCTP_DATAGRAM_RESEND
) &&
2852 (tp1
->snd_count
== 1)) {
2853 tp1
->whoTo
->pseudo_cumack
= tp1
->rec
.data
.TSN_seq
;
2854 tp1
->whoTo
->find_pseudo_cumack
= 0;
2856 if ((tp1
->whoTo
->find_rtx_pseudo_cumack
== 1) && (tp1
->sent
< SCTP_DATAGRAM_RESEND
) &&
2857 (tp1
->snd_count
> 1)) {
2858 tp1
->whoTo
->rtx_pseudo_cumack
= tp1
->rec
.data
.TSN_seq
;
2859 tp1
->whoTo
->find_rtx_pseudo_cumack
= 0;
2861 if (tp1
->rec
.data
.TSN_seq
== j
) {
2862 if (tp1
->sent
!= SCTP_DATAGRAM_UNSENT
) {
2864 * must be held until
2868 * ECN Nonce: Add the nonce
2869 * value to the sender's
2872 if (tp1
->sent
< SCTP_DATAGRAM_RESEND
) {
2874 * If it is less than RESEND, it is
2875 * now no-longer in flight.
2876 * Higher values may already be set
2877 * via previous Gap Ack Blocks...
2878 * i.e. ACKED or RESEND.
2880 if (compare_with_wrap(tp1
->rec
.data
.TSN_seq
,
2881 *biggest_newly_acked_tsn
, MAX_TSN
)) {
2882 *biggest_newly_acked_tsn
= tp1
->rec
.data
.TSN_seq
;
2891 * this_sack_highest_
2895 if (tp1
->rec
.data
.chunk_was_revoked
== 0)
2896 tp1
->whoTo
->saw_newack
= 1;
2898 if (compare_with_wrap(tp1
->rec
.data
.TSN_seq
,
2899 tp1
->whoTo
->this_sack_highest_newack
,
2901 tp1
->whoTo
->this_sack_highest_newack
=
2902 tp1
->rec
.data
.TSN_seq
;
2907 * this_sack_lowest_n
2910 if (*this_sack_lowest_newack
== 0) {
2911 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SACK_LOGGING_ENABLE
) {
2912 sctp_log_sack(*this_sack_lowest_newack
,
2914 tp1
->rec
.data
.TSN_seq
,
2917 SCTP_LOG_TSN_ACKED
);
2919 *this_sack_lowest_newack
= tp1
->rec
.data
.TSN_seq
;
2924 * (rtx-)pseudo-cumac
2929 * (rtx-)pseudo-cumac
2931 * new_(rtx_)pseudo_c
2939 * (rtx-)pseudo-cumac
2947 if (tp1
->rec
.data
.TSN_seq
== tp1
->whoTo
->pseudo_cumack
) {
2948 if (tp1
->rec
.data
.chunk_was_revoked
== 0) {
2949 tp1
->whoTo
->new_pseudo_cumack
= 1;
2951 tp1
->whoTo
->find_pseudo_cumack
= 1;
2953 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_CWND_LOGGING_ENABLE
) {
2954 sctp_log_cwnd(stcb
, tp1
->whoTo
, tp1
->rec
.data
.TSN_seq
, SCTP_CWND_LOG_FROM_SACK
);
2956 if (tp1
->rec
.data
.TSN_seq
== tp1
->whoTo
->rtx_pseudo_cumack
) {
2957 if (tp1
->rec
.data
.chunk_was_revoked
== 0) {
2958 tp1
->whoTo
->new_pseudo_cumack
= 1;
2960 tp1
->whoTo
->find_rtx_pseudo_cumack
= 1;
2962 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SACK_LOGGING_ENABLE
) {
2963 sctp_log_sack(*biggest_newly_acked_tsn
,
2965 tp1
->rec
.data
.TSN_seq
,
2968 SCTP_LOG_TSN_ACKED
);
2970 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FLIGHT_LOGGING_ENABLE
) {
2971 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP
,
2972 tp1
->whoTo
->flight_size
,
2974 (uintptr_t) tp1
->whoTo
,
2975 tp1
->rec
.data
.TSN_seq
);
2977 sctp_flight_size_decrease(tp1
);
2978 sctp_total_flight_decrease(stcb
, tp1
);
2980 tp1
->whoTo
->net_ack
+= tp1
->send_size
;
2981 if (tp1
->snd_count
< 2) {
2987 tp1
->whoTo
->net_ack2
+= tp1
->send_size
;
2994 sctp_calculate_rto(stcb
,
2997 &tp1
->sent_rcv_time
,
2998 sctp_align_safe_nocopy
);
3003 if (tp1
->sent
<= SCTP_DATAGRAM_RESEND
) {
3004 (*ecn_seg_sums
) += tp1
->rec
.data
.ect_nonce
;
3005 (*ecn_seg_sums
) &= SCTP_SACK_NONCE_SUM
;
3006 if (compare_with_wrap(tp1
->rec
.data
.TSN_seq
,
3007 asoc
->this_sack_highest_gap
,
3009 asoc
->this_sack_highest_gap
=
3010 tp1
->rec
.data
.TSN_seq
;
3012 if (tp1
->sent
== SCTP_DATAGRAM_RESEND
) {
3013 sctp_ucount_decr(asoc
->sent_queue_retran_cnt
);
3014 #ifdef SCTP_AUDITING_ENABLED
3015 sctp_audit_log(0xB2,
3016 (asoc
->sent_queue_retran_cnt
& 0x000000ff));
3021 * All chunks NOT UNSENT
3022 * fall through here and are
3025 tp1
->sent
= SCTP_DATAGRAM_MARKED
;
3026 if (tp1
->rec
.data
.chunk_was_revoked
) {
3027 /* deflate the cwnd */
3028 tp1
->whoTo
->cwnd
-= tp1
->book_size
;
3029 tp1
->rec
.data
.chunk_was_revoked
= 0;
3033 } /* if (tp1->TSN_seq == j) */
3034 if (compare_with_wrap(tp1
->rec
.data
.TSN_seq
, j
,
3038 tp1
= TAILQ_NEXT(tp1
, sctp_next
);
3039 } /* end while (tp1) */
3040 } /* end for (j = fragStart */
3041 frag
= (struct sctp_gap_ack_block
*)sctp_m_getptr(m
, *offset
,
3042 sizeof(struct sctp_gap_ack_block
), (uint8_t *) & block
);
3043 *offset
+= sizeof(block
);
3048 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FR_LOGGING_ENABLE
) {
3050 sctp_log_fr(*biggest_tsn_acked
,
3051 *biggest_newly_acked_tsn
,
3052 last_tsn
, SCTP_FR_LOG_BIGGEST_TSNS
);
3057 sctp_check_for_revoked(struct sctp_tcb
*stcb
,
3058 struct sctp_association
*asoc
, uint32_t cumack
,
3059 u_long biggest_tsn_acked
)
3061 struct sctp_tmit_chunk
*tp1
;
3062 int tot_revoked
= 0;
3064 tp1
= TAILQ_FIRST(&asoc
->sent_queue
);
3066 if (compare_with_wrap(tp1
->rec
.data
.TSN_seq
, cumack
,
3069 * ok this guy is either ACK or MARKED. If it is
3070 * ACKED it has been previously acked but not this
3071 * time i.e. revoked. If it is MARKED it was ACK'ed
3074 if (compare_with_wrap(tp1
->rec
.data
.TSN_seq
, biggest_tsn_acked
,
3079 if (tp1
->sent
== SCTP_DATAGRAM_ACKED
) {
3080 /* it has been revoked */
3081 tp1
->sent
= SCTP_DATAGRAM_SENT
;
3082 tp1
->rec
.data
.chunk_was_revoked
= 1;
3084 * We must add this stuff back in to assure
3085 * timers and such get started.
3087 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FLIGHT_LOGGING_ENABLE
) {
3088 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE
,
3089 tp1
->whoTo
->flight_size
,
3091 (uintptr_t) tp1
->whoTo
,
3092 tp1
->rec
.data
.TSN_seq
);
3094 sctp_flight_size_increase(tp1
);
3095 sctp_total_flight_increase(stcb
, tp1
);
3097 * We inflate the cwnd to compensate for our
3098 * artificial inflation of the flight_size.
3100 tp1
->whoTo
->cwnd
+= tp1
->book_size
;
3102 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SACK_LOGGING_ENABLE
) {
3103 sctp_log_sack(asoc
->last_acked_seq
,
3105 tp1
->rec
.data
.TSN_seq
,
3108 SCTP_LOG_TSN_REVOKED
);
3110 } else if (tp1
->sent
== SCTP_DATAGRAM_MARKED
) {
3111 /* it has been re-acked in this SACK */
3112 tp1
->sent
= SCTP_DATAGRAM_ACKED
;
3115 if (tp1
->sent
== SCTP_DATAGRAM_UNSENT
)
3117 tp1
= TAILQ_NEXT(tp1
, sctp_next
);
3119 if (tot_revoked
> 0) {
3121 * Setup the ecn nonce re-sync point. We do this since once
3122 * data is revoked we begin to retransmit things, which do
3123 * NOT have the ECN bits set. This means we are now out of
3124 * sync and must wait until we get back in sync with the
3125 * peer to check ECN bits.
3127 tp1
= TAILQ_FIRST(&asoc
->send_queue
);
3129 asoc
->nonce_resync_tsn
= asoc
->sending_seq
;
3131 asoc
->nonce_resync_tsn
= tp1
->rec
.data
.TSN_seq
;
3133 asoc
->nonce_wait_for_ecne
= 0;
3134 asoc
->nonce_sum_check
= 0;
3139 sctp_strike_gap_ack_chunks(struct sctp_tcb
*stcb
, struct sctp_association
*asoc
,
3140 u_long biggest_tsn_acked
, u_long biggest_tsn_newly_acked
, u_long this_sack_lowest_newack
, int accum_moved
)
3142 struct sctp_tmit_chunk
*tp1
;
3143 int strike_flag
= 0;
3145 int tot_retrans
= 0;
3146 uint32_t sending_seq
;
3147 struct sctp_nets
*net
;
3148 int num_dests_sacked
= 0;
3151 * select the sending_seq, this is either the next thing ready to be
3152 * sent but not transmitted, OR, the next seq we assign.
3154 tp1
= TAILQ_FIRST(&stcb
->asoc
.send_queue
);
3156 sending_seq
= asoc
->sending_seq
;
3158 sending_seq
= tp1
->rec
.data
.TSN_seq
;
3161 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3162 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off
) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac
)) {
3163 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
3164 if (net
->saw_newack
)
3168 if (stcb
->asoc
.peer_supports_prsctp
) {
3169 (void)SCTP_GETTIME_TIMEVAL(&now
);
3171 tp1
= TAILQ_FIRST(&asoc
->sent_queue
);
3174 if (tp1
->no_fr_allowed
) {
3175 /* this one had a timeout or something */
3176 tp1
= TAILQ_NEXT(tp1
, sctp_next
);
3179 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FR_LOGGING_ENABLE
) {
3180 if (tp1
->sent
< SCTP_DATAGRAM_RESEND
)
3181 sctp_log_fr(biggest_tsn_newly_acked
,
3182 tp1
->rec
.data
.TSN_seq
,
3184 SCTP_FR_LOG_CHECK_STRIKE
);
3186 if (compare_with_wrap(tp1
->rec
.data
.TSN_seq
, biggest_tsn_acked
,
3188 tp1
->sent
== SCTP_DATAGRAM_UNSENT
) {
3192 if (stcb
->asoc
.peer_supports_prsctp
) {
3193 if ((PR_SCTP_TTL_ENABLED(tp1
->flags
)) && tp1
->sent
< SCTP_DATAGRAM_ACKED
) {
3194 /* Is it expired? */
3197 * TODO sctp_constants.h needs alternative
3198 * time macros when _KERNEL is undefined.
3200 (timevalcmp(&now
, &tp1
->rec
.data
.timetodrop
, >))
3202 /* Yes so drop it */
3203 if (tp1
->data
!= NULL
) {
3204 (void)sctp_release_pr_sctp_chunk(stcb
, tp1
,
3205 (SCTP_RESPONSE_TO_USER_REQ
| SCTP_NOTIFY_DATAGRAM_SENT
),
3206 &asoc
->sent_queue
, SCTP_SO_NOT_LOCKED
);
3208 tp1
= TAILQ_NEXT(tp1
, sctp_next
);
3212 if ((PR_SCTP_RTX_ENABLED(tp1
->flags
)) && tp1
->sent
< SCTP_DATAGRAM_ACKED
) {
3213 /* Has it been retransmitted tv_sec times? */
3214 if (tp1
->snd_count
> tp1
->rec
.data
.timetodrop
.tv_sec
) {
3215 /* Yes, so drop it */
3216 if (tp1
->data
!= NULL
) {
3217 (void)sctp_release_pr_sctp_chunk(stcb
, tp1
,
3218 (SCTP_RESPONSE_TO_USER_REQ
| SCTP_NOTIFY_DATAGRAM_SENT
),
3219 &asoc
->sent_queue
, SCTP_SO_NOT_LOCKED
);
3221 tp1
= TAILQ_NEXT(tp1
, sctp_next
);
3226 if (compare_with_wrap(tp1
->rec
.data
.TSN_seq
,
3227 asoc
->this_sack_highest_gap
, MAX_TSN
)) {
3228 /* we are beyond the tsn in the sack */
3231 if (tp1
->sent
>= SCTP_DATAGRAM_RESEND
) {
3232 /* either a RESEND, ACKED, or MARKED */
3234 tp1
= TAILQ_NEXT(tp1
, sctp_next
);
3238 * CMT : SFR algo (covers part of DAC and HTNA as well)
3240 if (tp1
->whoTo
&& tp1
->whoTo
->saw_newack
== 0) {
3242 * No new acks were receieved for data sent to this
3243 * dest. Therefore, according to the SFR algo for
3244 * CMT, no data sent to this dest can be marked for
3245 * FR using this SACK.
3247 tp1
= TAILQ_NEXT(tp1
, sctp_next
);
3249 } else if (tp1
->whoTo
&& compare_with_wrap(tp1
->rec
.data
.TSN_seq
,
3250 tp1
->whoTo
->this_sack_highest_newack
, MAX_TSN
)) {
3252 * CMT: New acks were receieved for data sent to
3253 * this dest. But no new acks were seen for data
3254 * sent after tp1. Therefore, according to the SFR
3255 * algo for CMT, tp1 cannot be marked for FR using
3256 * this SACK. This step covers part of the DAC algo
3257 * and the HTNA algo as well.
3259 tp1
= TAILQ_NEXT(tp1
, sctp_next
);
3263 * Here we check to see if we were have already done a FR
3264 * and if so we see if the biggest TSN we saw in the sack is
3265 * smaller than the recovery point. If so we don't strike
3266 * the tsn... otherwise we CAN strike the TSN.
3269 * @@@ JRI: Check for CMT if (accum_moved &&
3270 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3273 if (accum_moved
&& asoc
->fast_retran_loss_recovery
) {
3275 * Strike the TSN if in fast-recovery and cum-ack
3278 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FR_LOGGING_ENABLE
) {
3279 sctp_log_fr(biggest_tsn_newly_acked
,
3280 tp1
->rec
.data
.TSN_seq
,
3282 SCTP_FR_LOG_STRIKE_CHUNK
);
3284 if (tp1
->sent
< SCTP_DATAGRAM_RESEND
) {
3287 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off
) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac
)) {
3289 * CMT DAC algorithm: If SACK flag is set to
3290 * 0, then lowest_newack test will not pass
3291 * because it would have been set to the
3292 * cumack earlier. If not already to be
3293 * rtx'd, If not a mixed sack and if tp1 is
3294 * not between two sacked TSNs, then mark by
3295 * one more. NOTE that we are marking by one
3296 * additional time since the SACK DAC flag
3297 * indicates that two packets have been
3298 * received after this missing TSN.
3300 if ((tp1
->sent
< SCTP_DATAGRAM_RESEND
) && (num_dests_sacked
== 1) &&
3301 compare_with_wrap(this_sack_lowest_newack
, tp1
->rec
.data
.TSN_seq
, MAX_TSN
)) {
3302 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FR_LOGGING_ENABLE
) {
3303 sctp_log_fr(16 + num_dests_sacked
,
3304 tp1
->rec
.data
.TSN_seq
,
3306 SCTP_FR_LOG_STRIKE_CHUNK
);
3311 } else if ((tp1
->rec
.data
.doing_fast_retransmit
) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off
) == 0)) {
3313 * For those that have done a FR we must take
3314 * special consideration if we strike. I.e the
3315 * biggest_newly_acked must be higher than the
3316 * sending_seq at the time we did the FR.
3319 #ifdef SCTP_FR_TO_ALTERNATE
3321 * If FR's go to new networks, then we must only do
3322 * this for singly homed asoc's. However if the FR's
3323 * go to the same network (Armando's work) then its
3324 * ok to FR multiple times.
3332 if ((compare_with_wrap(biggest_tsn_newly_acked
,
3333 tp1
->rec
.data
.fast_retran_tsn
, MAX_TSN
)) ||
3334 (biggest_tsn_newly_acked
==
3335 tp1
->rec
.data
.fast_retran_tsn
)) {
3337 * Strike the TSN, since this ack is
3338 * beyond where things were when we
3341 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FR_LOGGING_ENABLE
) {
3342 sctp_log_fr(biggest_tsn_newly_acked
,
3343 tp1
->rec
.data
.TSN_seq
,
3345 SCTP_FR_LOG_STRIKE_CHUNK
);
3347 if (tp1
->sent
< SCTP_DATAGRAM_RESEND
) {
3351 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off
) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac
)) {
3353 * CMT DAC algorithm: If
3354 * SACK flag is set to 0,
3355 * then lowest_newack test
3356 * will not pass because it
3357 * would have been set to
3358 * the cumack earlier. If
3359 * not already to be rtx'd,
3360 * If not a mixed sack and
3361 * if tp1 is not between two
3362 * sacked TSNs, then mark by
3363 * one more. NOTE that we
3364 * are marking by one
3365 * additional time since the
3366 * SACK DAC flag indicates
3367 * that two packets have
3368 * been received after this
3371 if ((tp1
->sent
< SCTP_DATAGRAM_RESEND
) &&
3372 (num_dests_sacked
== 1) &&
3373 compare_with_wrap(this_sack_lowest_newack
,
3374 tp1
->rec
.data
.TSN_seq
, MAX_TSN
)) {
3375 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FR_LOGGING_ENABLE
) {
3376 sctp_log_fr(32 + num_dests_sacked
,
3377 tp1
->rec
.data
.TSN_seq
,
3379 SCTP_FR_LOG_STRIKE_CHUNK
);
3381 if (tp1
->sent
< SCTP_DATAGRAM_RESEND
) {
3389 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3392 } else if (compare_with_wrap(tp1
->rec
.data
.TSN_seq
,
3393 biggest_tsn_newly_acked
, MAX_TSN
)) {
3395 * We don't strike these: This is the HTNA
3396 * algorithm i.e. we don't strike If our TSN is
3397 * larger than the Highest TSN Newly Acked.
3401 /* Strike the TSN */
3402 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FR_LOGGING_ENABLE
) {
3403 sctp_log_fr(biggest_tsn_newly_acked
,
3404 tp1
->rec
.data
.TSN_seq
,
3406 SCTP_FR_LOG_STRIKE_CHUNK
);
3408 if (tp1
->sent
< SCTP_DATAGRAM_RESEND
) {
3411 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off
) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac
)) {
3413 * CMT DAC algorithm: If SACK flag is set to
3414 * 0, then lowest_newack test will not pass
3415 * because it would have been set to the
3416 * cumack earlier. If not already to be
3417 * rtx'd, If not a mixed sack and if tp1 is
3418 * not between two sacked TSNs, then mark by
3419 * one more. NOTE that we are marking by one
3420 * additional time since the SACK DAC flag
3421 * indicates that two packets have been
3422 * received after this missing TSN.
3424 if ((tp1
->sent
< SCTP_DATAGRAM_RESEND
) && (num_dests_sacked
== 1) &&
3425 compare_with_wrap(this_sack_lowest_newack
, tp1
->rec
.data
.TSN_seq
, MAX_TSN
)) {
3426 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FR_LOGGING_ENABLE
) {
3427 sctp_log_fr(48 + num_dests_sacked
,
3428 tp1
->rec
.data
.TSN_seq
,
3430 SCTP_FR_LOG_STRIKE_CHUNK
);
3436 if (tp1
->sent
== SCTP_DATAGRAM_RESEND
) {
3437 /* Increment the count to resend */
3438 struct sctp_nets
*alt
;
3440 /* printf("OK, we are now ready to FR this guy\n"); */
3441 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FR_LOGGING_ENABLE
) {
3442 sctp_log_fr(tp1
->rec
.data
.TSN_seq
, tp1
->snd_count
,
3446 /* This is a subsequent FR */
3447 SCTP_STAT_INCR(sctps_sendmultfastretrans
);
3449 sctp_ucount_incr(stcb
->asoc
.sent_queue_retran_cnt
);
3450 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off
)) {
3452 * CMT: Using RTX_SSTHRESH policy for CMT.
3453 * If CMT is being used, then pick dest with
3454 * largest ssthresh for any retransmission.
3456 tp1
->no_fr_allowed
= 1;
3458 /* sa_ignore NO_NULL_CHK */
3459 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off
) && SCTP_BASE_SYSCTL(sctp_cmt_pf
)) {
3461 * JRS 5/18/07 - If CMT PF is on,
3462 * use the PF version of
3465 alt
= sctp_find_alternate_net(stcb
, alt
, 2);
3468 * JRS 5/18/07 - If only CMT is on,
3469 * use the CMT version of
3472 /* sa_ignore NO_NULL_CHK */
3473 alt
= sctp_find_alternate_net(stcb
, alt
, 1);
3479 * CUCv2: If a different dest is picked for
3480 * the retransmission, then new
3481 * (rtx-)pseudo_cumack needs to be tracked
3482 * for orig dest. Let CUCv2 track new (rtx-)
3483 * pseudo-cumack always.
3486 tp1
->whoTo
->find_pseudo_cumack
= 1;
3487 tp1
->whoTo
->find_rtx_pseudo_cumack
= 1;
3489 } else {/* CMT is OFF */
3491 #ifdef SCTP_FR_TO_ALTERNATE
3492 /* Can we find an alternate? */
3493 alt
= sctp_find_alternate_net(stcb
, tp1
->whoTo
, 0);
3496 * default behavior is to NOT retransmit
3497 * FR's to an alternate. Armando Caro's
3498 * paper details why.
3504 tp1
->rec
.data
.doing_fast_retransmit
= 1;
3506 /* mark the sending seq for possible subsequent FR's */
3508 * printf("Marking TSN for FR new value %x\n",
3509 * (uint32_t)tpi->rec.data.TSN_seq);
3511 if (TAILQ_EMPTY(&asoc
->send_queue
)) {
3513 * If the queue of send is empty then its
3514 * the next sequence number that will be
3515 * assigned so we subtract one from this to
3516 * get the one we last sent.
3518 tp1
->rec
.data
.fast_retran_tsn
= sending_seq
;
3521 * If there are chunks on the send queue
3522 * (unsent data that has made it from the
3523 * stream queues but not out the door, we
3524 * take the first one (which will have the
3525 * lowest TSN) and subtract one to get the
3528 struct sctp_tmit_chunk
*ttt
;
3530 ttt
= TAILQ_FIRST(&asoc
->send_queue
);
3531 tp1
->rec
.data
.fast_retran_tsn
=
3532 ttt
->rec
.data
.TSN_seq
;
3537 * this guy had a RTO calculation pending on
3542 /* fix counts and things */
3543 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FLIGHT_LOGGING_ENABLE
) {
3544 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND
,
3545 (tp1
->whoTo
? (tp1
->whoTo
->flight_size
) : 0),
3547 (uintptr_t) tp1
->whoTo
,
3548 tp1
->rec
.data
.TSN_seq
);
3551 tp1
->whoTo
->net_ack
++;
3552 sctp_flight_size_decrease(tp1
);
3554 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_LOG_RWND_ENABLE
) {
3555 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND
,
3556 asoc
->peers_rwnd
, tp1
->send_size
, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh
));
3558 /* add back to the rwnd */
3559 asoc
->peers_rwnd
+= (tp1
->send_size
+ SCTP_BASE_SYSCTL(sctp_peer_chunk_oh
));
3561 /* remove from the total flight */
3562 sctp_total_flight_decrease(stcb
, tp1
);
3563 if (alt
!= tp1
->whoTo
) {
3564 /* yes, there is an alternate. */
3565 sctp_free_remote_addr(tp1
->whoTo
);
3566 /* sa_ignore FREED_MEMORY */
3568 atomic_add_int(&alt
->ref_count
, 1);
3571 tp1
= TAILQ_NEXT(tp1
, sctp_next
);
3574 if (tot_retrans
> 0) {
3576 * Setup the ecn nonce re-sync point. We do this since once
3577 * we go to FR something we introduce a Karn's rule scenario
3578 * and won't know the totals for the ECN bits.
3580 asoc
->nonce_resync_tsn
= sending_seq
;
3581 asoc
->nonce_wait_for_ecne
= 0;
3582 asoc
->nonce_sum_check
= 0;
3586 struct sctp_tmit_chunk
*
3587 sctp_try_advance_peer_ack_point(struct sctp_tcb
*stcb
,
3588 struct sctp_association
*asoc
)
3590 struct sctp_tmit_chunk
*tp1
, *tp2
, *a_adv
= NULL
;
3594 if (asoc
->peer_supports_prsctp
== 0) {
3597 tp1
= TAILQ_FIRST(&asoc
->sent_queue
);
3599 if (tp1
->sent
!= SCTP_FORWARD_TSN_SKIP
&&
3600 tp1
->sent
!= SCTP_DATAGRAM_RESEND
) {
3601 /* no chance to advance, out of here */
3604 if (!PR_SCTP_ENABLED(tp1
->flags
)) {
3606 * We can't fwd-tsn past any that are reliable aka
3607 * retransmitted until the asoc fails.
3612 (void)SCTP_GETTIME_TIMEVAL(&now
);
3615 tp2
= TAILQ_NEXT(tp1
, sctp_next
);
3617 * now we got a chunk which is marked for another
3618 * retransmission to a PR-stream but has run out its chances
3619 * already maybe OR has been marked to skip now. Can we skip
3620 * it if its a resend?
3622 if (tp1
->sent
== SCTP_DATAGRAM_RESEND
&&
3623 (PR_SCTP_TTL_ENABLED(tp1
->flags
))) {
3625 * Now is this one marked for resend and its time is
3628 if (timevalcmp(&now
, &tp1
->rec
.data
.timetodrop
, >)) {
3629 /* Yes so drop it */
3631 (void)sctp_release_pr_sctp_chunk(stcb
, tp1
,
3632 (SCTP_RESPONSE_TO_USER_REQ
| SCTP_NOTIFY_DATAGRAM_SENT
),
3633 &asoc
->sent_queue
, SCTP_SO_NOT_LOCKED
);
3637 * No, we are done when hit one for resend
3638 * whos time as not expired.
3644 * Ok now if this chunk is marked to drop it we can clean up
3645 * the chunk, advance our peer ack point and we can check
3648 if (tp1
->sent
== SCTP_FORWARD_TSN_SKIP
) {
3649 /* advance PeerAckPoint goes forward */
3650 asoc
->advanced_peer_ack_point
= tp1
->rec
.data
.TSN_seq
;
3653 * we don't want to de-queue it here. Just wait for
3654 * the next peer SACK to come with a new cumTSN and
3655 * then the chunk will be droped in the normal
3659 sctp_free_bufspace(stcb
, asoc
, tp1
, 1);
3661 * Maybe there should be another
3664 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL
, stcb
,
3665 (SCTP_RESPONSE_TO_USER_REQ
| SCTP_NOTIFY_DATAGRAM_SENT
),
3666 tp1
, SCTP_SO_NOT_LOCKED
);
3667 sctp_m_freem(tp1
->data
);
3669 if (stcb
->sctp_socket
) {
3670 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3673 so
= SCTP_INP_SO(stcb
->sctp_ep
);
3674 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
3675 SCTP_TCB_UNLOCK(stcb
);
3676 SCTP_SOCKET_LOCK(so
, 1);
3677 SCTP_TCB_LOCK(stcb
);
3678 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
3679 if (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
) {
3681 * assoc was freed while we
3684 SCTP_SOCKET_UNLOCK(so
, 1);
3688 sctp_sowwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
3689 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3690 SCTP_SOCKET_UNLOCK(so
, 1);
3692 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_WAKE_LOGGING_ENABLE
) {
3693 sctp_wakeup_log(stcb
, tp1
->rec
.data
.TSN_seq
, 1, SCTP_WAKESND_FROM_FWDTSN
);
3699 * If it is still in RESEND we can advance no
3705 * If we hit here we just dumped tp1, move to next tsn on
3714 sctp_fs_audit(struct sctp_association
*asoc
)
3716 struct sctp_tmit_chunk
*chk
;
3717 int inflight
= 0, resend
= 0, inbetween
= 0, acked
= 0, above
= 0;
3719 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
3720 if (chk
->sent
< SCTP_DATAGRAM_RESEND
) {
3722 } else if (chk
->sent
== SCTP_DATAGRAM_RESEND
) {
3724 } else if (chk
->sent
< SCTP_DATAGRAM_ACKED
) {
3726 } else if (chk
->sent
> SCTP_DATAGRAM_ACKED
) {
3733 if ((inflight
> 0) || (inbetween
> 0)) {
3735 panic("Flight size-express incorrect? \n");
3737 SCTP_PRINTF("Flight size-express incorrect inflight:%d inbetween:%d\n",
3738 inflight
, inbetween
);
3745 sctp_window_probe_recovery(struct sctp_tcb
*stcb
,
3746 struct sctp_association
*asoc
,
3747 struct sctp_nets
*net
,
3748 struct sctp_tmit_chunk
*tp1
)
3750 struct sctp_tmit_chunk
*chk
;
3752 /* First setup this one and get it moved back */
3753 tp1
->sent
= SCTP_DATAGRAM_UNSENT
;
3754 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FLIGHT_LOGGING_ENABLE
) {
3755 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP
,
3756 tp1
->whoTo
->flight_size
,
3758 (uintptr_t) tp1
->whoTo
,
3759 tp1
->rec
.data
.TSN_seq
);
3761 sctp_flight_size_decrease(tp1
);
3762 sctp_total_flight_decrease(stcb
, tp1
);
3763 TAILQ_REMOVE(&asoc
->sent_queue
, tp1
, sctp_next
);
3764 TAILQ_INSERT_HEAD(&asoc
->send_queue
, tp1
, sctp_next
);
3765 asoc
->sent_queue_cnt
--;
3766 asoc
->send_queue_cnt
++;
3768 * Now all guys marked for RESEND on the sent_queue must be moved
3771 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
3772 if (chk
->sent
== SCTP_DATAGRAM_RESEND
) {
3773 /* Another chunk to move */
3774 chk
->sent
= SCTP_DATAGRAM_UNSENT
;
3775 /* It should not be in flight */
3776 TAILQ_REMOVE(&asoc
->sent_queue
, chk
, sctp_next
);
3777 TAILQ_INSERT_AFTER(&asoc
->send_queue
, tp1
, chk
, sctp_next
);
3778 asoc
->sent_queue_cnt
--;
3779 asoc
->send_queue_cnt
++;
3780 sctp_ucount_decr(asoc
->sent_queue_retran_cnt
);
3786 sctp_express_handle_sack(struct sctp_tcb
*stcb
, uint32_t cumack
,
3787 uint32_t rwnd
, int nonce_sum_flag
, int *abort_now
)
3789 struct sctp_nets
*net
;
3790 struct sctp_association
*asoc
;
3791 struct sctp_tmit_chunk
*tp1
, *tp2
;
3793 int win_probe_recovery
= 0;
3794 int win_probe_recovered
= 0;
3795 int j
, done_once
= 0;
3797 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_LOG_SACK_ARRIVALS_ENABLE
) {
3798 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS
, cumack
,
3799 rwnd
, stcb
->asoc
.last_acked_seq
, stcb
->asoc
.peers_rwnd
);
3801 SCTP_TCB_LOCK_ASSERT(stcb
);
3802 #ifdef SCTP_ASOCLOG_OF_TSNS
3803 stcb
->asoc
.cumack_log
[stcb
->asoc
.cumack_log_at
] = cumack
;
3804 stcb
->asoc
.cumack_log_at
++;
3805 if (stcb
->asoc
.cumack_log_at
> SCTP_TSN_LOG_SIZE
) {
3806 stcb
->asoc
.cumack_log_at
= 0;
3810 old_rwnd
= asoc
->peers_rwnd
;
3811 if (compare_with_wrap(asoc
->last_acked_seq
, cumack
, MAX_TSN
)) {
3814 } else if (asoc
->last_acked_seq
== cumack
) {
3815 /* Window update sack */
3816 asoc
->peers_rwnd
= sctp_sbspace_sub(rwnd
,
3817 (uint32_t) (asoc
->total_flight
+ (asoc
->sent_queue_cnt
* SCTP_BASE_SYSCTL(sctp_peer_chunk_oh
))));
3818 if (asoc
->peers_rwnd
< stcb
->sctp_ep
->sctp_ep
.sctp_sws_sender
) {
3819 /* SWS sender side engages */
3820 asoc
->peers_rwnd
= 0;
3822 if (asoc
->peers_rwnd
> old_rwnd
) {
3827 /* First setup for CC stuff */
3828 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
3829 net
->prev_cwnd
= net
->cwnd
;
3834 * CMT: Reset CUC and Fast recovery algo variables before
3837 net
->new_pseudo_cumack
= 0;
3838 net
->will_exit_fast_recovery
= 0;
3840 if (SCTP_BASE_SYSCTL(sctp_strict_sacks
)) {
3843 if (!TAILQ_EMPTY(&asoc
->sent_queue
)) {
3844 tp1
= TAILQ_LAST(&asoc
->sent_queue
,
3845 sctpchunk_listhead
);
3846 send_s
= tp1
->rec
.data
.TSN_seq
+ 1;
3848 send_s
= asoc
->sending_seq
;
3850 if ((cumack
== send_s
) ||
3851 compare_with_wrap(cumack
, send_s
, MAX_TSN
)) {
3857 panic("Impossible sack 1");
3861 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + sizeof(uint32_t)),
3862 0, M_DONTWAIT
, 1, MT_DATA
);
3864 struct sctp_paramhdr
*ph
;
3867 SCTP_BUF_LEN(oper
) = sizeof(struct sctp_paramhdr
) +
3869 ph
= mtod(oper
, struct sctp_paramhdr
*);
3870 ph
->param_type
= htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
3871 ph
->param_length
= htons(SCTP_BUF_LEN(oper
));
3872 ippp
= (uint32_t *) (ph
+ 1);
3873 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_25
);
3875 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_25
;
3876 sctp_abort_an_association(stcb
->sctp_ep
, stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
3881 asoc
->this_sack_highest_gap
= cumack
;
3882 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_THRESHOLD_LOGGING
) {
3883 sctp_misc_ints(SCTP_THRESHOLD_CLEAR
,
3884 stcb
->asoc
.overall_error_count
,
3886 SCTP_FROM_SCTP_INDATA
,
3889 stcb
->asoc
.overall_error_count
= 0;
3890 if (compare_with_wrap(cumack
, asoc
->last_acked_seq
, MAX_TSN
)) {
3891 /* process the new consecutive TSN first */
3892 tp1
= TAILQ_FIRST(&asoc
->sent_queue
);
3894 tp2
= TAILQ_NEXT(tp1
, sctp_next
);
3895 if (compare_with_wrap(cumack
, tp1
->rec
.data
.TSN_seq
,
3897 cumack
== tp1
->rec
.data
.TSN_seq
) {
3898 if (tp1
->sent
== SCTP_DATAGRAM_UNSENT
) {
3899 printf("Warning, an unsent is now acked?\n");
3902 * ECN Nonce: Add the nonce to the sender's
3905 asoc
->nonce_sum_expect_base
+= tp1
->rec
.data
.ect_nonce
;
3906 if (tp1
->sent
< SCTP_DATAGRAM_ACKED
) {
3908 * If it is less than ACKED, it is
3909 * now no-longer in flight. Higher
3910 * values may occur during marking
3912 if (tp1
->sent
< SCTP_DATAGRAM_RESEND
) {
3913 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FLIGHT_LOGGING_ENABLE
) {
3914 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA
,
3915 tp1
->whoTo
->flight_size
,
3917 (uintptr_t) tp1
->whoTo
,
3918 tp1
->rec
.data
.TSN_seq
);
3920 sctp_flight_size_decrease(tp1
);
3921 /* sa_ignore NO_NULL_CHK */
3922 sctp_total_flight_decrease(stcb
, tp1
);
3924 tp1
->whoTo
->net_ack
+= tp1
->send_size
;
3925 if (tp1
->snd_count
< 2) {
3927 * True non-retransmited
3930 tp1
->whoTo
->net_ack2
+=
3933 /* update RTO too? */
3940 sctp_calculate_rto(stcb
,
3942 &tp1
->sent_rcv_time
,
3943 sctp_align_safe_nocopy
);
3948 * CMT: CUCv2 algorithm. From the
3949 * cumack'd TSNs, for each TSN being
3950 * acked for the first time, set the
3951 * following variables for the
3952 * corresp destination.
3953 * new_pseudo_cumack will trigger a
3955 * find_(rtx_)pseudo_cumack will
3956 * trigger search for the next
3957 * expected (rtx-)pseudo-cumack.
3959 tp1
->whoTo
->new_pseudo_cumack
= 1;
3960 tp1
->whoTo
->find_pseudo_cumack
= 1;
3961 tp1
->whoTo
->find_rtx_pseudo_cumack
= 1;
3963 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_CWND_LOGGING_ENABLE
) {
3964 /* sa_ignore NO_NULL_CHK */
3965 sctp_log_cwnd(stcb
, tp1
->whoTo
, tp1
->rec
.data
.TSN_seq
, SCTP_CWND_LOG_FROM_SACK
);
3968 if (tp1
->sent
== SCTP_DATAGRAM_RESEND
) {
3969 sctp_ucount_decr(asoc
->sent_queue_retran_cnt
);
3971 if (tp1
->rec
.data
.chunk_was_revoked
) {
3972 /* deflate the cwnd */
3973 tp1
->whoTo
->cwnd
-= tp1
->book_size
;
3974 tp1
->rec
.data
.chunk_was_revoked
= 0;
3976 tp1
->sent
= SCTP_DATAGRAM_ACKED
;
3977 TAILQ_REMOVE(&asoc
->sent_queue
, tp1
, sctp_next
);
3979 /* sa_ignore NO_NULL_CHK */
3980 sctp_free_bufspace(stcb
, asoc
, tp1
, 1);
3981 sctp_m_freem(tp1
->data
);
3983 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SACK_LOGGING_ENABLE
) {
3984 sctp_log_sack(asoc
->last_acked_seq
,
3986 tp1
->rec
.data
.TSN_seq
,
3989 SCTP_LOG_FREE_SENT
);
3992 asoc
->sent_queue_cnt
--;
3993 sctp_free_a_chunk(stcb
, tp1
);
4001 /* sa_ignore NO_NULL_CHK */
4002 if (stcb
->sctp_socket
) {
4003 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4008 SOCKBUF_LOCK(&stcb
->sctp_socket
->so_snd
);
4009 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_WAKE_LOGGING_ENABLE
) {
4010 /* sa_ignore NO_NULL_CHK */
4011 sctp_wakeup_log(stcb
, cumack
, 1, SCTP_WAKESND_FROM_SACK
);
4013 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4014 so
= SCTP_INP_SO(stcb
->sctp_ep
);
4015 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
4016 SCTP_TCB_UNLOCK(stcb
);
4017 SCTP_SOCKET_LOCK(so
, 1);
4018 SCTP_TCB_LOCK(stcb
);
4019 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
4020 if (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
) {
4021 /* assoc was freed while we were unlocked */
4022 SCTP_SOCKET_UNLOCK(so
, 1);
4026 sctp_sowwakeup_locked(stcb
->sctp_ep
, stcb
->sctp_socket
);
4027 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4028 SCTP_SOCKET_UNLOCK(so
, 1);
4031 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_WAKE_LOGGING_ENABLE
) {
4032 sctp_wakeup_log(stcb
, cumack
, 1, SCTP_NOWAKE_FROM_SACK
);
4036 /* JRS - Use the congestion control given in the CC module */
4037 if (asoc
->last_acked_seq
!= cumack
)
4038 asoc
->cc_functions
.sctp_cwnd_update_after_sack(stcb
, asoc
, 1, 0, 0);
4040 asoc
->last_acked_seq
= cumack
;
4042 if (TAILQ_EMPTY(&asoc
->sent_queue
)) {
4043 /* nothing left in-flight */
4044 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
4045 net
->flight_size
= 0;
4046 net
->partial_bytes_acked
= 0;
4048 asoc
->total_flight
= 0;
4049 asoc
->total_flight_count
= 0;
4051 /* Fix up the a-p-a-p for future PR-SCTP sends */
4052 if (compare_with_wrap(cumack
, asoc
->advanced_peer_ack_point
, MAX_TSN
)) {
4053 asoc
->advanced_peer_ack_point
= cumack
;
4055 /* ECN Nonce updates */
4056 if (asoc
->ecn_nonce_allowed
) {
4057 if (asoc
->nonce_sum_check
) {
4058 if (nonce_sum_flag
!= ((asoc
->nonce_sum_expect_base
) & SCTP_SACK_NONCE_SUM
)) {
4059 if (asoc
->nonce_wait_for_ecne
== 0) {
4060 struct sctp_tmit_chunk
*lchk
;
4062 lchk
= TAILQ_FIRST(&asoc
->send_queue
);
4063 asoc
->nonce_wait_for_ecne
= 1;
4065 asoc
->nonce_wait_tsn
= lchk
->rec
.data
.TSN_seq
;
4067 asoc
->nonce_wait_tsn
= asoc
->sending_seq
;
4070 if (compare_with_wrap(asoc
->last_acked_seq
, asoc
->nonce_wait_tsn
, MAX_TSN
) ||
4071 (asoc
->last_acked_seq
== asoc
->nonce_wait_tsn
)) {
4073 * Misbehaving peer. We need
4074 * to react to this guy
4076 asoc
->ecn_allowed
= 0;
4077 asoc
->ecn_nonce_allowed
= 0;
4082 /* See if Resynchronization Possible */
4083 if (compare_with_wrap(asoc
->last_acked_seq
, asoc
->nonce_resync_tsn
, MAX_TSN
)) {
4084 asoc
->nonce_sum_check
= 1;
4086 * now we must calculate what the base is.
4087 * We do this based on two things, we know
4088 * the total's for all the segments
4089 * gap-acked in the SACK (none), We also
4090 * know the SACK's nonce sum, its in
4091 * nonce_sum_flag. So we can build a truth
4092 * table to back-calculate the new value of
4093 * asoc->nonce_sum_expect_base:
4095 * SACK-flag-Value Seg-Sums Base 0 0 0
4099 asoc
->nonce_sum_expect_base
= (0 ^ nonce_sum_flag
) & SCTP_SACK_NONCE_SUM
;
4104 asoc
->peers_rwnd
= sctp_sbspace_sub(rwnd
,
4105 (uint32_t) (asoc
->total_flight
+ (asoc
->sent_queue_cnt
* SCTP_BASE_SYSCTL(sctp_peer_chunk_oh
))));
4106 if (asoc
->peers_rwnd
< stcb
->sctp_ep
->sctp_ep
.sctp_sws_sender
) {
4107 /* SWS sender side engages */
4108 asoc
->peers_rwnd
= 0;
4110 if (asoc
->peers_rwnd
> old_rwnd
) {
4111 win_probe_recovery
= 1;
4113 /* Now assure a timer where data is queued at */
4116 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
4117 if (win_probe_recovery
&& (net
->window_probe
)) {
4118 net
->window_probe
= 0;
4119 win_probe_recovered
= 1;
4121 * Find first chunk that was used with window probe
4122 * and clear the sent
4124 /* sa_ignore FREED_MEMORY */
4125 TAILQ_FOREACH(tp1
, &asoc
->sent_queue
, sctp_next
) {
4126 if (tp1
->window_probe
) {
4127 /* move back to data send queue */
4128 sctp_window_probe_recovery(stcb
, asoc
, net
, tp1
);
4133 if (net
->flight_size
) {
4136 if (net
->RTO
== 0) {
4137 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
4139 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
4142 (void)SCTP_OS_TIMER_START(&net
->rxt_timer
.timer
, to_ticks
,
4143 sctp_timeout_handler
, &net
->rxt_timer
);
4145 if (SCTP_OS_TIMER_PENDING(&net
->rxt_timer
.timer
)) {
4146 sctp_timer_stop(SCTP_TIMER_TYPE_SEND
, stcb
->sctp_ep
,
4148 SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_22
);
4150 if (SCTP_BASE_SYSCTL(sctp_early_fr
)) {
4151 if (SCTP_OS_TIMER_PENDING(&net
->fr_timer
.timer
)) {
4152 SCTP_STAT_INCR(sctps_earlyfrstpidsck4
);
4153 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR
, stcb
->sctp_ep
, stcb
, net
,
4154 SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_23
);
4160 (!TAILQ_EMPTY(&asoc
->sent_queue
)) &&
4161 (asoc
->sent_queue_retran_cnt
== 0) &&
4162 (win_probe_recovered
== 0) &&
4164 /* huh, this should not happen */
4165 sctp_fs_audit(asoc
);
4166 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
4167 net
->flight_size
= 0;
4169 asoc
->total_flight
= 0;
4170 asoc
->total_flight_count
= 0;
4171 asoc
->sent_queue_retran_cnt
= 0;
4172 TAILQ_FOREACH(tp1
, &asoc
->sent_queue
, sctp_next
) {
4173 if (tp1
->sent
< SCTP_DATAGRAM_RESEND
) {
4174 sctp_flight_size_increase(tp1
);
4175 sctp_total_flight_increase(stcb
, tp1
);
4176 } else if (tp1
->sent
== SCTP_DATAGRAM_RESEND
) {
4177 asoc
->sent_queue_retran_cnt
++;
4183 /**********************************/
4184 /* Now what about shutdown issues */
4185 /**********************************/
4186 if (TAILQ_EMPTY(&asoc
->send_queue
) && TAILQ_EMPTY(&asoc
->sent_queue
)) {
4187 /* nothing left on sendqueue.. consider done */
4189 if ((asoc
->stream_queue_cnt
== 1) &&
4190 ((asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
) ||
4191 (asoc
->state
& SCTP_STATE_SHUTDOWN_RECEIVED
)) &&
4192 (asoc
->locked_on_sending
)
4194 struct sctp_stream_queue_pending
*sp
;
4197 * I may be in a state where we got all across.. but
4198 * cannot write more due to a shutdown... we abort
4199 * since the user did not indicate EOR in this case.
4200 * The sp will be cleaned during free of the asoc.
4202 sp
= TAILQ_LAST(&((asoc
->locked_on_sending
)->outqueue
),
4204 if ((sp
) && (sp
->length
== 0)) {
4205 /* Let cleanup code purge it */
4206 if (sp
->msg_is_complete
) {
4207 asoc
->stream_queue_cnt
--;
4209 asoc
->state
|= SCTP_STATE_PARTIAL_MSG_LEFT
;
4210 asoc
->locked_on_sending
= NULL
;
4211 asoc
->stream_queue_cnt
--;
4215 if ((asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
) &&
4216 (asoc
->stream_queue_cnt
== 0)) {
4217 if (asoc
->state
& SCTP_STATE_PARTIAL_MSG_LEFT
) {
4218 /* Need to abort here */
4224 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + sizeof(uint32_t)),
4225 0, M_DONTWAIT
, 1, MT_DATA
);
4227 struct sctp_paramhdr
*ph
;
4230 SCTP_BUF_LEN(oper
) = sizeof(struct sctp_paramhdr
) +
4232 ph
= mtod(oper
, struct sctp_paramhdr
*);
4233 ph
->param_type
= htons(SCTP_CAUSE_USER_INITIATED_ABT
);
4234 ph
->param_length
= htons(SCTP_BUF_LEN(oper
));
4235 ippp
= (uint32_t *) (ph
+ 1);
4236 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_24
);
4238 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_24
;
4239 sctp_abort_an_association(stcb
->sctp_ep
, stcb
, SCTP_RESPONSE_TO_USER_REQ
, oper
, SCTP_SO_NOT_LOCKED
);
4241 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_OPEN
) ||
4242 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
)) {
4243 SCTP_STAT_DECR_GAUGE32(sctps_currestab
);
4245 SCTP_SET_STATE(asoc
, SCTP_STATE_SHUTDOWN_SENT
);
4246 SCTP_CLEAR_SUBSTATE(asoc
, SCTP_STATE_SHUTDOWN_PENDING
);
4247 sctp_stop_timers_for_shutdown(stcb
);
4248 sctp_send_shutdown(stcb
,
4249 stcb
->asoc
.primary_destination
);
4250 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN
,
4251 stcb
->sctp_ep
, stcb
, asoc
->primary_destination
);
4252 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD
,
4253 stcb
->sctp_ep
, stcb
, asoc
->primary_destination
);
4255 } else if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) &&
4256 (asoc
->stream_queue_cnt
== 0)) {
4257 if (asoc
->state
& SCTP_STATE_PARTIAL_MSG_LEFT
) {
4260 SCTP_STAT_DECR_GAUGE32(sctps_currestab
);
4261 SCTP_SET_STATE(asoc
, SCTP_STATE_SHUTDOWN_ACK_SENT
);
4262 SCTP_CLEAR_SUBSTATE(asoc
, SCTP_STATE_SHUTDOWN_PENDING
);
4263 sctp_send_shutdown_ack(stcb
,
4264 stcb
->asoc
.primary_destination
);
4266 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK
,
4267 stcb
->sctp_ep
, stcb
, asoc
->primary_destination
);
4270 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SACK_RWND_LOGGING_ENABLE
) {
4271 sctp_misc_ints(SCTP_SACK_RWND_UPDATE
,
4273 stcb
->asoc
.peers_rwnd
,
4274 stcb
->asoc
.total_flight
,
4275 stcb
->asoc
.total_output_queue_size
);
4280 sctp_handle_sack(struct mbuf
*m
, int offset
,
4281 struct sctp_sack_chunk
*ch
, struct sctp_tcb
*stcb
,
4282 struct sctp_nets
*net_from
, int *abort_now
, int sack_len
, uint32_t rwnd
)
4284 struct sctp_association
*asoc
;
4285 struct sctp_sack
*sack
;
4286 struct sctp_tmit_chunk
*tp1
, *tp2
;
4287 uint32_t cum_ack
, last_tsn
, biggest_tsn_acked
, biggest_tsn_newly_acked
,
4288 this_sack_lowest_newack
;
4289 uint32_t sav_cum_ack
;
4290 uint16_t num_seg
, num_dup
;
4291 uint16_t wake_him
= 0;
4292 unsigned int sack_length
;
4293 uint32_t send_s
= 0;
4295 int accum_moved
= 0;
4296 int will_exit_fast_recovery
= 0;
4297 uint32_t a_rwnd
, old_rwnd
;
4298 int win_probe_recovery
= 0;
4299 int win_probe_recovered
= 0;
4300 struct sctp_nets
*net
= NULL
;
4301 int nonce_sum_flag
, ecn_seg_sums
= 0;
4303 uint8_t reneged_all
= 0;
4304 uint8_t cmt_dac_flag
;
4307 * we take any chance we can to service our queues since we cannot
4308 * get awoken when the socket is read from :<
4311 * Now perform the actual SACK handling: 1) Verify that it is not an
4312 * old sack, if so discard. 2) If there is nothing left in the send
4313 * queue (cum-ack is equal to last acked) then you have a duplicate
4314 * too, update any rwnd change and verify no timers are running.
4315 * then return. 3) Process any new consequtive data i.e. cum-ack
4316 * moved process these first and note that it moved. 4) Process any
4317 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4318 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4319 * sync up flightsizes and things, stop all timers and also check
4320 * for shutdown_pending state. If so then go ahead and send off the
4321 * shutdown. If in shutdown recv, send off the shutdown-ack and
4322 * start that timer, Ret. 9) Strike any non-acked things and do FR
4323 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4324 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4325 * if in shutdown_recv state.
4327 SCTP_TCB_LOCK_ASSERT(stcb
);
4330 this_sack_lowest_newack
= 0;
4332 sack_length
= (unsigned int)sack_len
;
4334 SCTP_STAT_INCR(sctps_slowpath_sack
);
4335 nonce_sum_flag
= ch
->ch
.chunk_flags
& SCTP_SACK_NONCE_SUM
;
4336 cum_ack
= last_tsn
= ntohl(sack
->cum_tsn_ack
);
4337 #ifdef SCTP_ASOCLOG_OF_TSNS
4338 stcb
->asoc
.cumack_log
[stcb
->asoc
.cumack_log_at
] = cum_ack
;
4339 stcb
->asoc
.cumack_log_at
++;
4340 if (stcb
->asoc
.cumack_log_at
> SCTP_TSN_LOG_SIZE
) {
4341 stcb
->asoc
.cumack_log_at
= 0;
4344 num_seg
= ntohs(sack
->num_gap_ack_blks
);
4347 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_LOG_SACK_ARRIVALS_ENABLE
) {
4348 sctp_misc_ints(SCTP_SACK_LOG_NORMAL
, cum_ack
,
4349 rwnd
, stcb
->asoc
.last_acked_seq
, stcb
->asoc
.peers_rwnd
);
4352 cmt_dac_flag
= ch
->ch
.chunk_flags
& SCTP_SACK_CMT_DAC
;
4353 num_dup
= ntohs(sack
->num_dup_tsns
);
4355 old_rwnd
= stcb
->asoc
.peers_rwnd
;
4356 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_THRESHOLD_LOGGING
) {
4357 sctp_misc_ints(SCTP_THRESHOLD_CLEAR
,
4358 stcb
->asoc
.overall_error_count
,
4360 SCTP_FROM_SCTP_INDATA
,
4363 stcb
->asoc
.overall_error_count
= 0;
4365 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SACK_LOGGING_ENABLE
) {
4366 sctp_log_sack(asoc
->last_acked_seq
,
4373 if ((num_dup
) && (SCTP_BASE_SYSCTL(sctp_logging_level
) & (SCTP_FR_LOGGING_ENABLE
| SCTP_EARLYFR_LOGGING_ENABLE
))) {
4374 int off_to_dup
, iii
;
4375 uint32_t *dupdata
, dblock
;
4377 off_to_dup
= (num_seg
* sizeof(struct sctp_gap_ack_block
)) + sizeof(struct sctp_sack_chunk
);
4378 if ((off_to_dup
+ (num_dup
* sizeof(uint32_t))) <= sack_length
) {
4379 dupdata
= (uint32_t *) sctp_m_getptr(m
, off_to_dup
,
4380 sizeof(uint32_t), (uint8_t *) & dblock
);
4381 off_to_dup
+= sizeof(uint32_t);
4383 for (iii
= 0; iii
< num_dup
; iii
++) {
4384 sctp_log_fr(*dupdata
, 0, 0, SCTP_FR_DUPED
);
4385 dupdata
= (uint32_t *) sctp_m_getptr(m
, off_to_dup
,
4386 sizeof(uint32_t), (uint8_t *) & dblock
);
4387 if (dupdata
== NULL
)
4389 off_to_dup
+= sizeof(uint32_t);
4393 SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4394 off_to_dup
, num_dup
, sack_length
, num_seg
);
4397 if (SCTP_BASE_SYSCTL(sctp_strict_sacks
)) {
4399 if (!TAILQ_EMPTY(&asoc
->sent_queue
)) {
4400 tp1
= TAILQ_LAST(&asoc
->sent_queue
,
4401 sctpchunk_listhead
);
4402 send_s
= tp1
->rec
.data
.TSN_seq
+ 1;
4404 send_s
= asoc
->sending_seq
;
4406 if (cum_ack
== send_s
||
4407 compare_with_wrap(cum_ack
, send_s
, MAX_TSN
)) {
4414 panic("Impossible sack 1");
4419 * no way, we have not even sent this TSN out yet.
4420 * Peer is hopelessly messed up with us.
4425 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + sizeof(uint32_t)),
4426 0, M_DONTWAIT
, 1, MT_DATA
);
4428 struct sctp_paramhdr
*ph
;
4431 SCTP_BUF_LEN(oper
) = sizeof(struct sctp_paramhdr
) +
4433 ph
= mtod(oper
, struct sctp_paramhdr
*);
4434 ph
->param_type
= htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
4435 ph
->param_length
= htons(SCTP_BUF_LEN(oper
));
4436 ippp
= (uint32_t *) (ph
+ 1);
4437 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_25
);
4439 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_25
;
4440 sctp_abort_an_association(stcb
->sctp_ep
, stcb
, SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
4445 /**********************/
4446 /* 1) check the range */
4447 /**********************/
4448 if (compare_with_wrap(asoc
->last_acked_seq
, last_tsn
, MAX_TSN
)) {
4449 /* acking something behind */
4452 sav_cum_ack
= asoc
->last_acked_seq
;
4454 /* update the Rwnd of the peer */
4455 if (TAILQ_EMPTY(&asoc
->sent_queue
) &&
4456 TAILQ_EMPTY(&asoc
->send_queue
) &&
4457 (asoc
->stream_queue_cnt
== 0)
4459 /* nothing left on send/sent and strmq */
4460 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_LOG_RWND_ENABLE
) {
4461 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK
,
4462 asoc
->peers_rwnd
, 0, 0, a_rwnd
);
4464 asoc
->peers_rwnd
= a_rwnd
;
4465 if (asoc
->sent_queue_retran_cnt
) {
4466 asoc
->sent_queue_retran_cnt
= 0;
4468 if (asoc
->peers_rwnd
< stcb
->sctp_ep
->sctp_ep
.sctp_sws_sender
) {
4469 /* SWS sender side engages */
4470 asoc
->peers_rwnd
= 0;
4472 /* stop any timers */
4473 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
4474 sctp_timer_stop(SCTP_TIMER_TYPE_SEND
, stcb
->sctp_ep
,
4475 stcb
, net
, SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_26
);
4476 if (SCTP_BASE_SYSCTL(sctp_early_fr
)) {
4477 if (SCTP_OS_TIMER_PENDING(&net
->fr_timer
.timer
)) {
4478 SCTP_STAT_INCR(sctps_earlyfrstpidsck1
);
4479 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR
, stcb
->sctp_ep
, stcb
, net
,
4480 SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_26
);
4483 net
->partial_bytes_acked
= 0;
4484 net
->flight_size
= 0;
4486 asoc
->total_flight
= 0;
4487 asoc
->total_flight_count
= 0;
4491 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4492 * things. The total byte count acked is tracked in netAckSz AND
4493 * netAck2 is used to track the total bytes acked that are un-
4494 * amibguious and were never retransmitted. We track these on a per
4495 * destination address basis.
4497 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
4498 net
->prev_cwnd
= net
->cwnd
;
4503 * CMT: Reset CUC and Fast recovery algo variables before
4506 net
->new_pseudo_cumack
= 0;
4507 net
->will_exit_fast_recovery
= 0;
4509 /* process the new consecutive TSN first */
4510 tp1
= TAILQ_FIRST(&asoc
->sent_queue
);
4512 if (compare_with_wrap(last_tsn
, tp1
->rec
.data
.TSN_seq
,
4514 last_tsn
== tp1
->rec
.data
.TSN_seq
) {
4515 if (tp1
->sent
!= SCTP_DATAGRAM_UNSENT
) {
4517 * ECN Nonce: Add the nonce to the sender's
4520 asoc
->nonce_sum_expect_base
+= tp1
->rec
.data
.ect_nonce
;
4522 if (tp1
->sent
< SCTP_DATAGRAM_ACKED
) {
4524 * If it is less than ACKED, it is
4525 * now no-longer in flight. Higher
4526 * values may occur during marking
4528 if ((tp1
->whoTo
->dest_state
&
4529 SCTP_ADDR_UNCONFIRMED
) &&
4530 (tp1
->snd_count
< 2)) {
4532 * If there was no retran
4533 * and the address is
4534 * un-confirmed and we sent
4536 * sacked.. its confirmed,
4539 tp1
->whoTo
->dest_state
&=
4540 ~SCTP_ADDR_UNCONFIRMED
;
4542 if (tp1
->sent
< SCTP_DATAGRAM_RESEND
) {
4543 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FLIGHT_LOGGING_ENABLE
) {
4544 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA
,
4545 tp1
->whoTo
->flight_size
,
4547 (uintptr_t) tp1
->whoTo
,
4548 tp1
->rec
.data
.TSN_seq
);
4550 sctp_flight_size_decrease(tp1
);
4551 sctp_total_flight_decrease(stcb
, tp1
);
4553 tp1
->whoTo
->net_ack
+= tp1
->send_size
;
4555 /* CMT SFR and DAC algos */
4556 this_sack_lowest_newack
= tp1
->rec
.data
.TSN_seq
;
4557 tp1
->whoTo
->saw_newack
= 1;
4559 if (tp1
->snd_count
< 2) {
4561 * True non-retransmited
4564 tp1
->whoTo
->net_ack2
+=
4567 /* update RTO too? */
4570 sctp_calculate_rto(stcb
,
4572 &tp1
->sent_rcv_time
,
4573 sctp_align_safe_nocopy
);
4578 * CMT: CUCv2 algorithm. From the
4579 * cumack'd TSNs, for each TSN being
4580 * acked for the first time, set the
4581 * following variables for the
4582 * corresp destination.
4583 * new_pseudo_cumack will trigger a
4585 * find_(rtx_)pseudo_cumack will
4586 * trigger search for the next
4587 * expected (rtx-)pseudo-cumack.
4589 tp1
->whoTo
->new_pseudo_cumack
= 1;
4590 tp1
->whoTo
->find_pseudo_cumack
= 1;
4591 tp1
->whoTo
->find_rtx_pseudo_cumack
= 1;
4594 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SACK_LOGGING_ENABLE
) {
4595 sctp_log_sack(asoc
->last_acked_seq
,
4597 tp1
->rec
.data
.TSN_seq
,
4600 SCTP_LOG_TSN_ACKED
);
4602 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_CWND_LOGGING_ENABLE
) {
4603 sctp_log_cwnd(stcb
, tp1
->whoTo
, tp1
->rec
.data
.TSN_seq
, SCTP_CWND_LOG_FROM_SACK
);
4606 if (tp1
->sent
== SCTP_DATAGRAM_RESEND
) {
4607 sctp_ucount_decr(asoc
->sent_queue_retran_cnt
);
4608 #ifdef SCTP_AUDITING_ENABLED
4609 sctp_audit_log(0xB3,
4610 (asoc
->sent_queue_retran_cnt
& 0x000000ff));
4613 if (tp1
->rec
.data
.chunk_was_revoked
) {
4614 /* deflate the cwnd */
4615 tp1
->whoTo
->cwnd
-= tp1
->book_size
;
4616 tp1
->rec
.data
.chunk_was_revoked
= 0;
4618 tp1
->sent
= SCTP_DATAGRAM_ACKED
;
4623 tp1
= TAILQ_NEXT(tp1
, sctp_next
);
4625 biggest_tsn_newly_acked
= biggest_tsn_acked
= last_tsn
;
4626 /* always set this up to cum-ack */
4627 asoc
->this_sack_highest_gap
= last_tsn
;
4629 /* Move offset up to point to gaps/dups */
4630 offset
+= sizeof(struct sctp_sack_chunk
);
4631 if (((num_seg
* (sizeof(struct sctp_gap_ack_block
))) + sizeof(struct sctp_sack_chunk
)) > sack_length
) {
4633 /* skip corrupt segments */
4639 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4640 * to be greater than the cumack. Also reset saw_newack to 0
4643 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
4644 net
->saw_newack
= 0;
4645 net
->this_sack_highest_newack
= last_tsn
;
4649 * thisSackHighestGap will increase while handling NEW
4650 * segments this_sack_highest_newack will increase while
4651 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4652 * used for CMT DAC algo. saw_newack will also change.
4654 sctp_handle_segments(m
, &offset
, stcb
, asoc
, ch
, last_tsn
,
4655 &biggest_tsn_acked
, &biggest_tsn_newly_acked
, &this_sack_lowest_newack
,
4656 num_seg
, &ecn_seg_sums
);
4658 if (SCTP_BASE_SYSCTL(sctp_strict_sacks
)) {
4660 * validate the biggest_tsn_acked in the gap acks if
4661 * strict adherence is wanted.
4663 if ((biggest_tsn_acked
== send_s
) ||
4664 (compare_with_wrap(biggest_tsn_acked
, send_s
, MAX_TSN
))) {
4666 * peer is either confused or we are under
4667 * attack. We must abort.
4674 /*******************************************/
4675 /* cancel ALL T3-send timer if accum moved */
4676 /*******************************************/
4677 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off
)) {
4678 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
4679 if (net
->new_pseudo_cumack
)
4680 sctp_timer_stop(SCTP_TIMER_TYPE_SEND
, stcb
->sctp_ep
,
4682 SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_27
);
4687 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
4688 sctp_timer_stop(SCTP_TIMER_TYPE_SEND
, stcb
->sctp_ep
,
4689 stcb
, net
, SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_28
);
4693 /********************************************/
4694 /* drop the acked chunks from the sendqueue */
4695 /********************************************/
4696 asoc
->last_acked_seq
= cum_ack
;
4698 tp1
= TAILQ_FIRST(&asoc
->sent_queue
);
4702 if (compare_with_wrap(tp1
->rec
.data
.TSN_seq
, cum_ack
,
4706 if (tp1
->sent
== SCTP_DATAGRAM_UNSENT
) {
4707 /* no more sent on list */
4708 printf("Warning, tp1->sent == %d and its now acked?\n",
4711 tp2
= TAILQ_NEXT(tp1
, sctp_next
);
4712 TAILQ_REMOVE(&asoc
->sent_queue
, tp1
, sctp_next
);
4713 if (tp1
->pr_sctp_on
) {
4714 if (asoc
->pr_sctp_cnt
!= 0)
4715 asoc
->pr_sctp_cnt
--;
4717 if ((TAILQ_FIRST(&asoc
->sent_queue
) == NULL
) &&
4718 (asoc
->total_flight
> 0)) {
4720 panic("Warning flight size is postive and should be 0");
4722 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4723 asoc
->total_flight
);
4725 asoc
->total_flight
= 0;
4728 /* sa_ignore NO_NULL_CHK */
4729 sctp_free_bufspace(stcb
, asoc
, tp1
, 1);
4730 sctp_m_freem(tp1
->data
);
4731 if (PR_SCTP_BUF_ENABLED(tp1
->flags
)) {
4732 asoc
->sent_queue_cnt_removeable
--;
4735 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SACK_LOGGING_ENABLE
) {
4736 sctp_log_sack(asoc
->last_acked_seq
,
4738 tp1
->rec
.data
.TSN_seq
,
4741 SCTP_LOG_FREE_SENT
);
4744 asoc
->sent_queue_cnt
--;
4745 sctp_free_a_chunk(stcb
, tp1
);
4748 } while (tp1
!= NULL
);
4751 /* sa_ignore NO_NULL_CHK */
4752 if ((wake_him
) && (stcb
->sctp_socket
)) {
4753 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4757 SOCKBUF_LOCK(&stcb
->sctp_socket
->so_snd
);
4758 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_WAKE_LOGGING_ENABLE
) {
4759 sctp_wakeup_log(stcb
, cum_ack
, wake_him
, SCTP_WAKESND_FROM_SACK
);
4761 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4762 so
= SCTP_INP_SO(stcb
->sctp_ep
);
4763 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
4764 SCTP_TCB_UNLOCK(stcb
);
4765 SCTP_SOCKET_LOCK(so
, 1);
4766 SCTP_TCB_LOCK(stcb
);
4767 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
4768 if (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
) {
4769 /* assoc was freed while we were unlocked */
4770 SCTP_SOCKET_UNLOCK(so
, 1);
4774 sctp_sowwakeup_locked(stcb
->sctp_ep
, stcb
->sctp_socket
);
4775 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4776 SCTP_SOCKET_UNLOCK(so
, 1);
4779 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_WAKE_LOGGING_ENABLE
) {
4780 sctp_wakeup_log(stcb
, cum_ack
, wake_him
, SCTP_NOWAKE_FROM_SACK
);
4784 if (asoc
->fast_retran_loss_recovery
&& accum_moved
) {
4785 if (compare_with_wrap(asoc
->last_acked_seq
,
4786 asoc
->fast_recovery_tsn
, MAX_TSN
) ||
4787 asoc
->last_acked_seq
== asoc
->fast_recovery_tsn
) {
4788 /* Setup so we will exit RFC2582 fast recovery */
4789 will_exit_fast_recovery
= 1;
4793 * Check for revoked fragments:
4795 * if Previous sack - Had no frags then we can't have any revoked if
4796 * Previous sack - Had frag's then - If we now have frags aka
4797 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4798 * some of them. else - The peer revoked all ACKED fragments, since
4799 * we had some before and now we have NONE.
4803 sctp_check_for_revoked(stcb
, asoc
, cum_ack
, biggest_tsn_acked
);
4804 else if (asoc
->saw_sack_with_frags
) {
4805 int cnt_revoked
= 0;
4807 tp1
= TAILQ_FIRST(&asoc
->sent_queue
);
4809 /* Peer revoked all dg's marked or acked */
4810 TAILQ_FOREACH(tp1
, &asoc
->sent_queue
, sctp_next
) {
4811 if ((tp1
->sent
> SCTP_DATAGRAM_RESEND
) &&
4812 (tp1
->sent
< SCTP_FORWARD_TSN_SKIP
)) {
4813 tp1
->sent
= SCTP_DATAGRAM_SENT
;
4814 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_FLIGHT_LOGGING_ENABLE
) {
4815 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE
,
4816 tp1
->whoTo
->flight_size
,
4818 (uintptr_t) tp1
->whoTo
,
4819 tp1
->rec
.data
.TSN_seq
);
4821 sctp_flight_size_increase(tp1
);
4822 sctp_total_flight_increase(stcb
, tp1
);
4823 tp1
->rec
.data
.chunk_was_revoked
= 1;
4825 * To ensure that this increase in
4826 * flightsize, which is artificial,
4827 * does not throttle the sender, we
4828 * also increase the cwnd
4831 tp1
->whoTo
->cwnd
+= tp1
->book_size
;
4839 asoc
->saw_sack_with_frags
= 0;
4842 asoc
->saw_sack_with_frags
= 1;
4844 asoc
->saw_sack_with_frags
= 0;
4846 /* JRS - Use the congestion control given in the CC module */
4847 asoc
->cc_functions
.sctp_cwnd_update_after_sack(stcb
, asoc
, accum_moved
, reneged_all
, will_exit_fast_recovery
);
4849 if (TAILQ_EMPTY(&asoc
->sent_queue
)) {
4850 /* nothing left in-flight */
4851 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
4852 /* stop all timers */
4853 if (SCTP_BASE_SYSCTL(sctp_early_fr
)) {
4854 if (SCTP_OS_TIMER_PENDING(&net
->fr_timer
.timer
)) {
4855 SCTP_STAT_INCR(sctps_earlyfrstpidsck4
);
4856 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR
, stcb
->sctp_ep
, stcb
, net
,
4857 SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_29
);
4860 sctp_timer_stop(SCTP_TIMER_TYPE_SEND
, stcb
->sctp_ep
,
4861 stcb
, net
, SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_30
);
4862 net
->flight_size
= 0;
4863 net
->partial_bytes_acked
= 0;
4865 asoc
->total_flight
= 0;
4866 asoc
->total_flight_count
= 0;
4868 /**********************************/
4869 /* Now what about shutdown issues */
4870 /**********************************/
4871 if (TAILQ_EMPTY(&asoc
->send_queue
) && TAILQ_EMPTY(&asoc
->sent_queue
)) {
4872 /* nothing left on sendqueue.. consider done */
4873 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_LOG_RWND_ENABLE
) {
4874 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK
,
4875 asoc
->peers_rwnd
, 0, 0, a_rwnd
);
4877 asoc
->peers_rwnd
= a_rwnd
;
4878 if (asoc
->peers_rwnd
< stcb
->sctp_ep
->sctp_ep
.sctp_sws_sender
) {
4879 /* SWS sender side engages */
4880 asoc
->peers_rwnd
= 0;
4883 if ((asoc
->stream_queue_cnt
== 1) &&
4884 ((asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
) ||
4885 (asoc
->state
& SCTP_STATE_SHUTDOWN_RECEIVED
)) &&
4886 (asoc
->locked_on_sending
)
4888 struct sctp_stream_queue_pending
*sp
;
4891 * I may be in a state where we got all across.. but
4892 * cannot write more due to a shutdown... we abort
4893 * since the user did not indicate EOR in this case.
4895 sp
= TAILQ_LAST(&((asoc
->locked_on_sending
)->outqueue
),
4897 if ((sp
) && (sp
->length
== 0)) {
4898 asoc
->locked_on_sending
= NULL
;
4899 if (sp
->msg_is_complete
) {
4900 asoc
->stream_queue_cnt
--;
4902 asoc
->state
|= SCTP_STATE_PARTIAL_MSG_LEFT
;
4903 asoc
->stream_queue_cnt
--;
4907 if ((asoc
->state
& SCTP_STATE_SHUTDOWN_PENDING
) &&
4908 (asoc
->stream_queue_cnt
== 0)) {
4909 if (asoc
->state
& SCTP_STATE_PARTIAL_MSG_LEFT
) {
4910 /* Need to abort here */
4916 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + sizeof(uint32_t)),
4917 0, M_DONTWAIT
, 1, MT_DATA
);
4919 struct sctp_paramhdr
*ph
;
4922 SCTP_BUF_LEN(oper
) = sizeof(struct sctp_paramhdr
) +
4924 ph
= mtod(oper
, struct sctp_paramhdr
*);
4925 ph
->param_type
= htons(SCTP_CAUSE_USER_INITIATED_ABT
);
4926 ph
->param_length
= htons(SCTP_BUF_LEN(oper
));
4927 ippp
= (uint32_t *) (ph
+ 1);
4928 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_31
);
4930 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_31
;
4931 sctp_abort_an_association(stcb
->sctp_ep
, stcb
, SCTP_RESPONSE_TO_USER_REQ
, oper
, SCTP_SO_NOT_LOCKED
);
4934 if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_OPEN
) ||
4935 (SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
)) {
4936 SCTP_STAT_DECR_GAUGE32(sctps_currestab
);
4938 SCTP_SET_STATE(asoc
, SCTP_STATE_SHUTDOWN_SENT
);
4939 SCTP_CLEAR_SUBSTATE(asoc
, SCTP_STATE_SHUTDOWN_PENDING
);
4940 sctp_stop_timers_for_shutdown(stcb
);
4941 sctp_send_shutdown(stcb
,
4942 stcb
->asoc
.primary_destination
);
4943 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN
,
4944 stcb
->sctp_ep
, stcb
, asoc
->primary_destination
);
4945 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD
,
4946 stcb
->sctp_ep
, stcb
, asoc
->primary_destination
);
4949 } else if ((SCTP_GET_STATE(asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
) &&
4950 (asoc
->stream_queue_cnt
== 0)) {
4951 if (asoc
->state
& SCTP_STATE_PARTIAL_MSG_LEFT
) {
4954 SCTP_STAT_DECR_GAUGE32(sctps_currestab
);
4955 SCTP_SET_STATE(asoc
, SCTP_STATE_SHUTDOWN_ACK_SENT
);
4956 SCTP_CLEAR_SUBSTATE(asoc
, SCTP_STATE_SHUTDOWN_PENDING
);
4957 sctp_send_shutdown_ack(stcb
,
4958 stcb
->asoc
.primary_destination
);
4960 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK
,
4961 stcb
->sctp_ep
, stcb
, asoc
->primary_destination
);
4966 * Now here we are going to recycle net_ack for a different use...
4969 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
4974 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4975 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4976 * automatically ensure that.
4978 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off
) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac
) && (cmt_dac_flag
== 0)) {
4979 this_sack_lowest_newack
= cum_ack
;
4982 sctp_strike_gap_ack_chunks(stcb
, asoc
, biggest_tsn_acked
,
4983 biggest_tsn_newly_acked
, this_sack_lowest_newack
, accum_moved
);
4985 /*********************************************/
4986 /* Here we perform PR-SCTP procedures */
4988 /*********************************************/
4989 /* C1. update advancedPeerAckPoint */
4990 if (compare_with_wrap(cum_ack
, asoc
->advanced_peer_ack_point
, MAX_TSN
)) {
4991 asoc
->advanced_peer_ack_point
= cum_ack
;
4993 /* C2. try to further move advancedPeerAckPoint ahead */
4994 if ((asoc
->peer_supports_prsctp
) && (asoc
->pr_sctp_cnt
> 0)) {
4995 struct sctp_tmit_chunk
*lchk
;
4997 lchk
= sctp_try_advance_peer_ack_point(stcb
, asoc
);
4998 /* C3. See if we need to send a Fwd-TSN */
4999 if (compare_with_wrap(asoc
->advanced_peer_ack_point
, cum_ack
,
5002 * ISSUE with ECN, see FWD-TSN processing for notes
5003 * on issues that will occur when the ECN NONCE
5004 * stuff is put into SCTP for cross checking.
5006 send_forward_tsn(stcb
, asoc
);
5009 * ECN Nonce: Disable Nonce Sum check when FWD TSN
5010 * is sent and store resync tsn
5012 asoc
->nonce_sum_check
= 0;
5013 asoc
->nonce_resync_tsn
= asoc
->advanced_peer_ack_point
;
5015 /* Assure a timer is up */
5016 sctp_timer_start(SCTP_TIMER_TYPE_SEND
,
5017 stcb
->sctp_ep
, stcb
, lchk
->whoTo
);
5021 /* JRS - Use the congestion control given in the CC module */
5022 asoc
->cc_functions
.sctp_cwnd_update_after_fr(stcb
, asoc
);
5024 /******************************************************************
5025 * Here we do the stuff with ECN Nonce checking.
5026 * We basically check to see if the nonce sum flag was incorrect
5027 * or if resynchronization needs to be done. Also if we catch a
5028 * misbehaving receiver we give him the kick.
5029 ******************************************************************/
5031 if (asoc
->ecn_nonce_allowed
) {
5032 if (asoc
->nonce_sum_check
) {
5033 if (nonce_sum_flag
!= ((asoc
->nonce_sum_expect_base
+ ecn_seg_sums
) & SCTP_SACK_NONCE_SUM
)) {
5034 if (asoc
->nonce_wait_for_ecne
== 0) {
5035 struct sctp_tmit_chunk
*lchk
;
5037 lchk
= TAILQ_FIRST(&asoc
->send_queue
);
5038 asoc
->nonce_wait_for_ecne
= 1;
5040 asoc
->nonce_wait_tsn
= lchk
->rec
.data
.TSN_seq
;
5042 asoc
->nonce_wait_tsn
= asoc
->sending_seq
;
5045 if (compare_with_wrap(asoc
->last_acked_seq
, asoc
->nonce_wait_tsn
, MAX_TSN
) ||
5046 (asoc
->last_acked_seq
== asoc
->nonce_wait_tsn
)) {
5048 * Misbehaving peer. We need
5049 * to react to this guy
5051 asoc
->ecn_allowed
= 0;
5052 asoc
->ecn_nonce_allowed
= 0;
5057 /* See if Resynchronization Possible */
5058 if (compare_with_wrap(asoc
->last_acked_seq
, asoc
->nonce_resync_tsn
, MAX_TSN
)) {
5059 asoc
->nonce_sum_check
= 1;
5061 * now we must calculate what the base is.
5062 * We do this based on two things, we know
5063 * the total's for all the segments
5064 * gap-acked in the SACK, its stored in
5065 * ecn_seg_sums. We also know the SACK's
5066 * nonce sum, its in nonce_sum_flag. So we
5067 * can build a truth table to back-calculate
5069 * asoc->nonce_sum_expect_base:
5071 * SACK-flag-Value Seg-Sums Base 0 0 0
5075 asoc
->nonce_sum_expect_base
= (ecn_seg_sums
^ nonce_sum_flag
) & SCTP_SACK_NONCE_SUM
;
5079 /* Now are we exiting loss recovery ? */
5080 if (will_exit_fast_recovery
) {
5081 /* Ok, we must exit fast recovery */
5082 asoc
->fast_retran_loss_recovery
= 0;
5084 if ((asoc
->sat_t3_loss_recovery
) &&
5085 ((compare_with_wrap(asoc
->last_acked_seq
, asoc
->sat_t3_recovery_tsn
,
5087 (asoc
->last_acked_seq
== asoc
->sat_t3_recovery_tsn
)))) {
5088 /* end satellite t3 loss recovery */
5089 asoc
->sat_t3_loss_recovery
= 0;
5094 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
5095 if (net
->will_exit_fast_recovery
) {
5096 /* Ok, we must exit fast recovery */
5097 net
->fast_retran_loss_recovery
= 0;
5101 /* Adjust and set the new rwnd value */
5102 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_LOG_RWND_ENABLE
) {
5103 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK
,
5104 asoc
->peers_rwnd
, asoc
->total_flight
, (asoc
->sent_queue_cnt
* SCTP_BASE_SYSCTL(sctp_peer_chunk_oh
)), a_rwnd
);
5106 asoc
->peers_rwnd
= sctp_sbspace_sub(a_rwnd
,
5107 (uint32_t) (asoc
->total_flight
+ (asoc
->sent_queue_cnt
* SCTP_BASE_SYSCTL(sctp_peer_chunk_oh
))));
5108 if (asoc
->peers_rwnd
< stcb
->sctp_ep
->sctp_ep
.sctp_sws_sender
) {
5109 /* SWS sender side engages */
5110 asoc
->peers_rwnd
= 0;
5112 if (asoc
->peers_rwnd
> old_rwnd
) {
5113 win_probe_recovery
= 1;
5116 * Now we must setup so we have a timer up for anyone with
5122 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
5123 if (win_probe_recovery
&& (net
->window_probe
)) {
5124 net
->window_probe
= 0;
5125 win_probe_recovered
= 1;
5127 * Find first chunk that was used with
5128 * window probe and clear the event. Put
5129 * it back into the send queue as if has
5132 TAILQ_FOREACH(tp1
, &asoc
->sent_queue
, sctp_next
) {
5133 if (tp1
->window_probe
) {
5134 sctp_window_probe_recovery(stcb
, asoc
, net
, tp1
);
5139 if (net
->flight_size
) {
5141 sctp_timer_start(SCTP_TIMER_TYPE_SEND
,
5142 stcb
->sctp_ep
, stcb
, net
);
5144 if (SCTP_OS_TIMER_PENDING(&net
->rxt_timer
.timer
)) {
5145 sctp_timer_stop(SCTP_TIMER_TYPE_SEND
, stcb
->sctp_ep
,
5147 SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_22
);
5149 if (SCTP_BASE_SYSCTL(sctp_early_fr
)) {
5150 if (SCTP_OS_TIMER_PENDING(&net
->fr_timer
.timer
)) {
5151 SCTP_STAT_INCR(sctps_earlyfrstpidsck4
);
5152 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR
, stcb
->sctp_ep
, stcb
, net
,
5153 SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_23
);
5159 (!TAILQ_EMPTY(&asoc
->sent_queue
)) &&
5160 (asoc
->sent_queue_retran_cnt
== 0) &&
5161 (win_probe_recovered
== 0) &&
5163 /* huh, this should not happen */
5164 sctp_fs_audit(asoc
);
5165 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
5166 net
->flight_size
= 0;
5168 asoc
->total_flight
= 0;
5169 asoc
->total_flight_count
= 0;
5170 asoc
->sent_queue_retran_cnt
= 0;
5171 TAILQ_FOREACH(tp1
, &asoc
->sent_queue
, sctp_next
) {
5172 if (tp1
->sent
< SCTP_DATAGRAM_RESEND
) {
5173 sctp_flight_size_increase(tp1
);
5174 sctp_total_flight_increase(stcb
, tp1
);
5175 } else if (tp1
->sent
== SCTP_DATAGRAM_RESEND
) {
5176 asoc
->sent_queue_retran_cnt
++;
5182 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SACK_RWND_LOGGING_ENABLE
) {
5183 sctp_misc_ints(SCTP_SACK_RWND_UPDATE
,
5185 stcb
->asoc
.peers_rwnd
,
5186 stcb
->asoc
.total_flight
,
5187 stcb
->asoc
.total_output_queue_size
);
5192 sctp_update_acked(struct sctp_tcb
*stcb
, struct sctp_shutdown_chunk
*cp
,
5193 struct sctp_nets
*netp
, int *abort_flag
)
5196 uint32_t cum_ack
, a_rwnd
;
5198 cum_ack
= ntohl(cp
->cumulative_tsn_ack
);
5199 /* Arrange so a_rwnd does NOT change */
5200 a_rwnd
= stcb
->asoc
.peers_rwnd
+ stcb
->asoc
.total_flight
;
5202 /* Now call the express sack handling */
5203 sctp_express_handle_sack(stcb
, cum_ack
, a_rwnd
, 0, abort_flag
);
5207 sctp_kick_prsctp_reorder_queue(struct sctp_tcb
*stcb
,
5208 struct sctp_stream_in
*strmin
)
5210 struct sctp_queued_to_read
*ctl
, *nctl
;
5211 struct sctp_association
*asoc
;
5215 tt
= strmin
->last_sequence_delivered
;
5217 * First deliver anything prior to and including the stream no that
5220 ctl
= TAILQ_FIRST(&strmin
->inqueue
);
5222 nctl
= TAILQ_NEXT(ctl
, next
);
5223 if (compare_with_wrap(tt
, ctl
->sinfo_ssn
, MAX_SEQ
) ||
5224 (tt
== ctl
->sinfo_ssn
)) {
5225 /* this is deliverable now */
5226 TAILQ_REMOVE(&strmin
->inqueue
, ctl
, next
);
5227 /* subtract pending on streams */
5228 asoc
->size_on_all_streams
-= ctl
->length
;
5229 sctp_ucount_decr(asoc
->cnt_on_all_streams
);
5230 /* deliver it to at least the delivery-q */
5231 if (stcb
->sctp_socket
) {
5232 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
5234 &stcb
->sctp_socket
->so_rcv
, 1, SCTP_SO_NOT_LOCKED
);
5237 /* no more delivery now. */
5243 * now we must deliver things in queue the normal way if any are
5246 tt
= strmin
->last_sequence_delivered
+ 1;
5247 ctl
= TAILQ_FIRST(&strmin
->inqueue
);
5249 nctl
= TAILQ_NEXT(ctl
, next
);
5250 if (tt
== ctl
->sinfo_ssn
) {
5251 /* this is deliverable now */
5252 TAILQ_REMOVE(&strmin
->inqueue
, ctl
, next
);
5253 /* subtract pending on streams */
5254 asoc
->size_on_all_streams
-= ctl
->length
;
5255 sctp_ucount_decr(asoc
->cnt_on_all_streams
);
5256 /* deliver it to at least the delivery-q */
5257 strmin
->last_sequence_delivered
= ctl
->sinfo_ssn
;
5258 if (stcb
->sctp_socket
) {
5259 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
5261 &stcb
->sctp_socket
->so_rcv
, 1, SCTP_SO_NOT_LOCKED
);
5263 tt
= strmin
->last_sequence_delivered
+ 1;
5272 sctp_handle_forward_tsn(struct sctp_tcb
*stcb
,
5273 struct sctp_forward_tsn_chunk
*fwd
, int *abort_flag
, struct mbuf
*m
, int offset
)
5276 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5277 * forward TSN, when the SACK comes back that acknowledges the
5278 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5279 * get quite tricky since we may have sent more data interveneing
5280 * and must carefully account for what the SACK says on the nonce
5281 * and any gaps that are reported. This work will NOT be done here,
5282 * but I note it here since it is really related to PR-SCTP and
5286 /* The pr-sctp fwd tsn */
5288 * here we will perform all the data receiver side steps for
5289 * processing FwdTSN, as required in by pr-sctp draft:
5291 * Assume we get FwdTSN(x):
5293 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5294 * others we have 3) examine and update re-ordering queue on
5295 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5296 * report where we are.
5298 struct sctp_association
*asoc
;
5299 uint32_t new_cum_tsn
, gap
;
5300 unsigned int i
, cnt_gone
, fwd_sz
, cumack_set_flag
, m_size
;
5301 struct sctp_stream_in
*strm
;
5302 struct sctp_tmit_chunk
*chk
, *at
;
5304 cumack_set_flag
= 0;
5307 if ((fwd_sz
= ntohs(fwd
->ch
.chunk_length
)) < sizeof(struct sctp_forward_tsn_chunk
)) {
5308 SCTPDBG(SCTP_DEBUG_INDATA1
,
5309 "Bad size too small/big fwd-tsn\n");
5312 m_size
= (stcb
->asoc
.mapping_array_size
<< 3);
5313 /*************************************************************/
5314 /* 1. Here we update local cumTSN and shift the bitmap array */
5315 /*************************************************************/
5316 new_cum_tsn
= ntohl(fwd
->new_cumulative_tsn
);
5318 if (compare_with_wrap(asoc
->cumulative_tsn
, new_cum_tsn
, MAX_TSN
) ||
5319 asoc
->cumulative_tsn
== new_cum_tsn
) {
5320 /* Already got there ... */
5323 if (compare_with_wrap(new_cum_tsn
, asoc
->highest_tsn_inside_map
,
5325 asoc
->highest_tsn_inside_map
= new_cum_tsn
;
5326 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MAP_LOGGING_ENABLE
) {
5327 sctp_log_map(0, 0, asoc
->highest_tsn_inside_map
, SCTP_MAP_SLIDE_RESULT
);
5331 * now we know the new TSN is more advanced, let's find the actual
5334 if ((compare_with_wrap(new_cum_tsn
, asoc
->mapping_array_base_tsn
,
5336 (new_cum_tsn
== asoc
->mapping_array_base_tsn
)) {
5337 gap
= new_cum_tsn
- asoc
->mapping_array_base_tsn
;
5339 /* try to prevent underflow here */
5340 gap
= new_cum_tsn
+ (MAX_TSN
- asoc
->mapping_array_base_tsn
) + 1;
5343 if (gap
>= m_size
) {
5344 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MAP_LOGGING_ENABLE
) {
5345 sctp_log_map(0, 0, asoc
->highest_tsn_inside_map
, SCTP_MAP_SLIDE_RESULT
);
5347 if ((long)gap
> sctp_sbspace(&stcb
->asoc
, &stcb
->sctp_socket
->so_rcv
)) {
5351 * out of range (of single byte chunks in the rwnd I
5352 * give out). This must be an attacker.
5355 oper
= sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr
) + 3 * sizeof(uint32_t)),
5356 0, M_DONTWAIT
, 1, MT_DATA
);
5358 struct sctp_paramhdr
*ph
;
5361 SCTP_BUF_LEN(oper
) = sizeof(struct sctp_paramhdr
) +
5362 (sizeof(uint32_t) * 3);
5363 ph
= mtod(oper
, struct sctp_paramhdr
*);
5364 ph
->param_type
= htons(SCTP_CAUSE_PROTOCOL_VIOLATION
);
5365 ph
->param_length
= htons(SCTP_BUF_LEN(oper
));
5366 ippp
= (uint32_t *) (ph
+ 1);
5367 *ippp
= htonl(SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_33
);
5369 *ippp
= asoc
->highest_tsn_inside_map
;
5371 *ippp
= new_cum_tsn
;
5373 stcb
->sctp_ep
->last_abort_code
= SCTP_FROM_SCTP_INDATA
+ SCTP_LOC_33
;
5374 sctp_abort_an_association(stcb
->sctp_ep
, stcb
,
5375 SCTP_PEER_FAULTY
, oper
, SCTP_SO_NOT_LOCKED
);
5378 SCTP_STAT_INCR(sctps_fwdtsn_map_over
);
5380 memset(stcb
->asoc
.mapping_array
, 0, stcb
->asoc
.mapping_array_size
);
5381 cumack_set_flag
= 1;
5382 asoc
->mapping_array_base_tsn
= new_cum_tsn
+ 1;
5383 asoc
->cumulative_tsn
= asoc
->highest_tsn_inside_map
= new_cum_tsn
;
5385 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MAP_LOGGING_ENABLE
) {
5386 sctp_log_map(0, 3, asoc
->highest_tsn_inside_map
, SCTP_MAP_SLIDE_RESULT
);
5388 asoc
->last_echo_tsn
= asoc
->highest_tsn_inside_map
;
5390 SCTP_TCB_LOCK_ASSERT(stcb
);
5391 if ((compare_with_wrap(((uint32_t) asoc
->cumulative_tsn
+ gap
), asoc
->highest_tsn_inside_map
, MAX_TSN
)) ||
5392 (((uint32_t) asoc
->cumulative_tsn
+ gap
) == asoc
->highest_tsn_inside_map
)) {
5395 for (i
= 0; i
<= gap
; i
++) {
5396 SCTP_SET_TSN_PRESENT(asoc
->mapping_array
, i
);
5400 * Now after marking all, slide thing forward but no sack
5403 sctp_sack_check(stcb
, 0, 0, abort_flag
);
5408 /*************************************************************/
5409 /* 2. Clear up re-assembly queue */
5410 /*************************************************************/
5412 * First service it if pd-api is up, just in case we can progress it
5415 if (asoc
->fragmented_delivery_inprogress
) {
5416 sctp_service_reassembly(stcb
, asoc
);
5418 if (!TAILQ_EMPTY(&asoc
->reasmqueue
)) {
5419 /* For each one on here see if we need to toss it */
5421 * For now large messages held on the reasmqueue that are
5422 * complete will be tossed too. We could in theory do more
5423 * work to spin through and stop after dumping one msg aka
5424 * seeing the start of a new msg at the head, and call the
5425 * delivery function... to see if it can be delivered... But
5426 * for now we just dump everything on the queue.
5428 chk
= TAILQ_FIRST(&asoc
->reasmqueue
);
5430 at
= TAILQ_NEXT(chk
, sctp_next
);
5431 if (compare_with_wrap(asoc
->cumulative_tsn
,
5432 chk
->rec
.data
.TSN_seq
, MAX_TSN
) ||
5433 asoc
->cumulative_tsn
== chk
->rec
.data
.TSN_seq
) {
5434 /* It needs to be tossed */
5435 TAILQ_REMOVE(&asoc
->reasmqueue
, chk
, sctp_next
);
5436 if (compare_with_wrap(chk
->rec
.data
.TSN_seq
,
5437 asoc
->tsn_last_delivered
, MAX_TSN
)) {
5438 asoc
->tsn_last_delivered
=
5439 chk
->rec
.data
.TSN_seq
;
5440 asoc
->str_of_pdapi
=
5441 chk
->rec
.data
.stream_number
;
5442 asoc
->ssn_of_pdapi
=
5443 chk
->rec
.data
.stream_seq
;
5444 asoc
->fragment_flags
=
5445 chk
->rec
.data
.rcv_flags
;
5447 asoc
->size_on_reasm_queue
-= chk
->send_size
;
5448 sctp_ucount_decr(asoc
->cnt_on_reasm_queue
);
5451 /* Clear up any stream problem */
5452 if ((chk
->rec
.data
.rcv_flags
& SCTP_DATA_UNORDERED
) !=
5453 SCTP_DATA_UNORDERED
&&
5454 (compare_with_wrap(chk
->rec
.data
.stream_seq
,
5455 asoc
->strmin
[chk
->rec
.data
.stream_number
].last_sequence_delivered
,
5458 * We must dump forward this streams
5459 * sequence number if the chunk is
5460 * not unordered that is being
5461 * skipped. There is a chance that
5462 * if the peer does not include the
5463 * last fragment in its FWD-TSN we
5464 * WILL have a problem here since
5465 * you would have a partial chunk in
5466 * queue that may not be
5467 * deliverable. Also if a Partial
5468 * delivery API as started the user
5469 * may get a partial chunk. The next
5470 * read returning a new chunk...
5471 * really ugly but I see no way
5472 * around it! Maybe a notify??
5474 asoc
->strmin
[chk
->rec
.data
.stream_number
].last_sequence_delivered
=
5475 chk
->rec
.data
.stream_seq
;
5478 sctp_m_freem(chk
->data
);
5481 sctp_free_a_chunk(stcb
, chk
);
5484 * Ok we have gone beyond the end of the
5485 * fwd-tsn's mark. Some checks...
5487 if ((asoc
->fragmented_delivery_inprogress
) &&
5488 (chk
->rec
.data
.rcv_flags
& SCTP_DATA_FIRST_FRAG
)) {
5492 * Special case PD-API is up and
5493 * what we fwd-tsn' over includes
5494 * one that had the LAST_FRAG. We no
5495 * longer need to do the PD-API.
5497 asoc
->fragmented_delivery_inprogress
= 0;
5499 str_seq
= (asoc
->str_of_pdapi
<< 16) | asoc
->ssn_of_pdapi
;
5500 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION
,
5501 stcb
, SCTP_PARTIAL_DELIVERY_ABORTED
, (void *)&str_seq
, SCTP_SO_NOT_LOCKED
);
5509 if (asoc
->fragmented_delivery_inprogress
) {
5511 * Ok we removed cnt_gone chunks in the PD-API queue that
5512 * were being delivered. So now we must turn off the flag.
5516 str_seq
= (asoc
->str_of_pdapi
<< 16) | asoc
->ssn_of_pdapi
;
5517 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION
,
5518 stcb
, SCTP_PARTIAL_DELIVERY_ABORTED
, (void *)&str_seq
, SCTP_SO_NOT_LOCKED
);
5519 asoc
->fragmented_delivery_inprogress
= 0;
5521 /*************************************************************/
5522 /* 3. Update the PR-stream re-ordering queues */
5523 /*************************************************************/
5524 fwd_sz
-= sizeof(*fwd
);
5527 unsigned int num_str
;
5528 struct sctp_strseq
*stseq
, strseqbuf
;
5530 offset
+= sizeof(*fwd
);
5532 num_str
= fwd_sz
/ sizeof(struct sctp_strseq
);
5533 for (i
= 0; i
< num_str
; i
++) {
5536 stseq
= (struct sctp_strseq
*)sctp_m_getptr(m
, offset
,
5537 sizeof(struct sctp_strseq
),
5538 (uint8_t *) & strseqbuf
);
5539 offset
+= sizeof(struct sctp_strseq
);
5540 if (stseq
== NULL
) {
5544 st
= ntohs(stseq
->stream
);
5546 st
= ntohs(stseq
->sequence
);
5547 stseq
->sequence
= st
;
5549 if (stseq
->stream
>= asoc
->streamincnt
) {
5550 /* screwed up streams, stop! */
5553 strm
= &asoc
->strmin
[stseq
->stream
];
5554 if (compare_with_wrap(stseq
->sequence
,
5555 strm
->last_sequence_delivered
, MAX_SEQ
)) {
5556 /* Update the sequence number */
5557 strm
->last_sequence_delivered
=
5560 /* now kick the stream the new way */
5561 /* sa_ignore NO_NULL_CHK */
5562 sctp_kick_prsctp_reorder_queue(stcb
, strm
);
5565 if (TAILQ_FIRST(&asoc
->reasmqueue
)) {
5566 /* now lets kick out and check for more fragmented delivery */
5567 /* sa_ignore NO_NULL_CHK */
5568 sctp_deliver_reasm_check(stcb
, &stcb
->asoc
);