1 /* SCTP kernel implementation
2 * (C) Copyright Red Hat Inc. 2017
4 * This file is part of the SCTP kernel implementation
6 * These functions manipulate sctp stream queue/scheduling.
8 * This SCTP implementation is free software;
9 * you can redistribute it and/or modify it under the terms of
10 * the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
14 * This SCTP implementation is distributed in the hope that it
15 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
16 * ************************
17 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
18 * See the GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with GNU CC; see the file COPYING. If not, see
22 * <http://www.gnu.org/licenses/>.
24 * Please send any bug reports or fixes you make to the
25 * email addresched(es):
26 * lksctp developers <linux-sctp@vger.kernel.org>
28 * Written or modified by:
29 * Xin Long <lucien.xin@gmail.com>
32 #include <net/busy_poll.h>
33 #include <net/sctp/sctp.h>
34 #include <net/sctp/sm.h>
35 #include <net/sctp/ulpevent.h>
36 #include <linux/sctp.h>
38 static struct sctp_chunk
*sctp_make_idatafrag_empty(
39 const struct sctp_association
*asoc
,
40 const struct sctp_sndrcvinfo
*sinfo
,
41 int len
, __u8 flags
, gfp_t gfp
)
43 struct sctp_chunk
*retval
;
44 struct sctp_idatahdr dp
;
46 memset(&dp
, 0, sizeof(dp
));
47 dp
.stream
= htons(sinfo
->sinfo_stream
);
49 if (sinfo
->sinfo_flags
& SCTP_UNORDERED
)
50 flags
|= SCTP_DATA_UNORDERED
;
52 retval
= sctp_make_idata(asoc
, flags
, sizeof(dp
) + len
, gfp
);
56 retval
->subh
.idata_hdr
= sctp_addto_chunk(retval
, sizeof(dp
), &dp
);
57 memcpy(&retval
->sinfo
, sinfo
, sizeof(struct sctp_sndrcvinfo
));
62 static void sctp_chunk_assign_mid(struct sctp_chunk
*chunk
)
64 struct sctp_stream
*stream
;
65 struct sctp_chunk
*lchunk
;
72 sid
= sctp_chunk_stream_no(chunk
);
73 stream
= &chunk
->asoc
->stream
;
75 list_for_each_entry(lchunk
, &chunk
->msg
->chunks
, frag_list
) {
76 struct sctp_idatahdr
*hdr
;
81 hdr
= lchunk
->subh
.idata_hdr
;
83 if (lchunk
->chunk_hdr
->flags
& SCTP_DATA_FIRST_FRAG
)
84 hdr
->ppid
= lchunk
->sinfo
.sinfo_ppid
;
86 hdr
->fsn
= htonl(cfsn
++);
88 if (lchunk
->chunk_hdr
->flags
& SCTP_DATA_UNORDERED
) {
89 mid
= lchunk
->chunk_hdr
->flags
& SCTP_DATA_LAST_FRAG
?
90 sctp_mid_uo_next(stream
, out
, sid
) :
91 sctp_mid_uo_peek(stream
, out
, sid
);
93 mid
= lchunk
->chunk_hdr
->flags
& SCTP_DATA_LAST_FRAG
?
94 sctp_mid_next(stream
, out
, sid
) :
95 sctp_mid_peek(stream
, out
, sid
);
97 hdr
->mid
= htonl(mid
);
101 static bool sctp_validate_data(struct sctp_chunk
*chunk
)
103 const struct sctp_stream
*stream
;
106 if (chunk
->chunk_hdr
->type
!= SCTP_CID_DATA
)
109 if (chunk
->chunk_hdr
->flags
& SCTP_DATA_UNORDERED
)
112 stream
= &chunk
->asoc
->stream
;
113 sid
= sctp_chunk_stream_no(chunk
);
114 ssn
= ntohs(chunk
->subh
.data_hdr
->ssn
);
116 return !SSN_lt(ssn
, sctp_ssn_peek(stream
, in
, sid
));
119 static bool sctp_validate_idata(struct sctp_chunk
*chunk
)
121 struct sctp_stream
*stream
;
125 if (chunk
->chunk_hdr
->type
!= SCTP_CID_I_DATA
)
128 if (chunk
->chunk_hdr
->flags
& SCTP_DATA_UNORDERED
)
131 stream
= &chunk
->asoc
->stream
;
132 sid
= sctp_chunk_stream_no(chunk
);
133 mid
= ntohl(chunk
->subh
.idata_hdr
->mid
);
135 return !MID_lt(mid
, sctp_mid_peek(stream
, in
, sid
));
138 static void sctp_intl_store_reasm(struct sctp_ulpq
*ulpq
,
139 struct sctp_ulpevent
*event
)
141 struct sctp_ulpevent
*cevent
;
144 pos
= skb_peek_tail(&ulpq
->reasm
);
146 __skb_queue_tail(&ulpq
->reasm
, sctp_event2skb(event
));
150 cevent
= sctp_skb2event(pos
);
152 if (event
->stream
== cevent
->stream
&&
153 event
->mid
== cevent
->mid
&&
154 (cevent
->msg_flags
& SCTP_DATA_FIRST_FRAG
||
155 (!(event
->msg_flags
& SCTP_DATA_FIRST_FRAG
) &&
156 event
->fsn
> cevent
->fsn
))) {
157 __skb_queue_tail(&ulpq
->reasm
, sctp_event2skb(event
));
161 if ((event
->stream
== cevent
->stream
&&
162 MID_lt(cevent
->mid
, event
->mid
)) ||
163 event
->stream
> cevent
->stream
) {
164 __skb_queue_tail(&ulpq
->reasm
, sctp_event2skb(event
));
168 skb_queue_walk(&ulpq
->reasm
, pos
) {
169 cevent
= sctp_skb2event(pos
);
171 if (event
->stream
< cevent
->stream
||
172 (event
->stream
== cevent
->stream
&&
173 MID_lt(event
->mid
, cevent
->mid
)))
176 if (event
->stream
== cevent
->stream
&&
177 event
->mid
== cevent
->mid
&&
178 !(cevent
->msg_flags
& SCTP_DATA_FIRST_FRAG
) &&
179 (event
->msg_flags
& SCTP_DATA_FIRST_FRAG
||
180 event
->fsn
< cevent
->fsn
))
184 __skb_queue_before(&ulpq
->reasm
, pos
, sctp_event2skb(event
));
187 static struct sctp_ulpevent
*sctp_intl_retrieve_partial(
188 struct sctp_ulpq
*ulpq
,
189 struct sctp_ulpevent
*event
)
191 struct sk_buff
*first_frag
= NULL
;
192 struct sk_buff
*last_frag
= NULL
;
193 struct sctp_ulpevent
*retval
;
194 struct sctp_stream_in
*sin
;
199 sin
= sctp_stream_in(ulpq
->asoc
, event
->stream
);
201 skb_queue_walk(&ulpq
->reasm
, pos
) {
202 struct sctp_ulpevent
*cevent
= sctp_skb2event(pos
);
204 if (cevent
->stream
< event
->stream
)
207 if (cevent
->stream
> event
->stream
||
208 cevent
->mid
!= sin
->mid
)
211 switch (cevent
->msg_flags
& SCTP_DATA_FRAG_MASK
) {
212 case SCTP_DATA_FIRST_FRAG
:
214 case SCTP_DATA_MIDDLE_FRAG
:
216 if (cevent
->fsn
== sin
->fsn
) {
219 next_fsn
= cevent
->fsn
+ 1;
221 } else if (cevent
->fsn
== next_fsn
) {
228 case SCTP_DATA_LAST_FRAG
:
230 if (cevent
->fsn
== sin
->fsn
) {
236 } else if (cevent
->fsn
== next_fsn
) {
251 retval
= sctp_make_reassembled_event(sock_net(ulpq
->asoc
->base
.sk
),
252 &ulpq
->reasm
, first_frag
,
257 retval
->msg_flags
|= MSG_EOR
;
265 static struct sctp_ulpevent
*sctp_intl_retrieve_reassembled(
266 struct sctp_ulpq
*ulpq
,
267 struct sctp_ulpevent
*event
)
269 struct sctp_association
*asoc
= ulpq
->asoc
;
270 struct sk_buff
*pos
, *first_frag
= NULL
;
271 struct sctp_ulpevent
*retval
= NULL
;
272 struct sk_buff
*pd_first
= NULL
;
273 struct sk_buff
*pd_last
= NULL
;
274 struct sctp_stream_in
*sin
;
280 sin
= sctp_stream_in(ulpq
->asoc
, event
->stream
);
282 skb_queue_walk(&ulpq
->reasm
, pos
) {
283 struct sctp_ulpevent
*cevent
= sctp_skb2event(pos
);
285 if (cevent
->stream
< event
->stream
)
287 if (cevent
->stream
> event
->stream
)
290 if (MID_lt(cevent
->mid
, event
->mid
))
292 if (MID_lt(event
->mid
, cevent
->mid
))
295 switch (cevent
->msg_flags
& SCTP_DATA_FRAG_MASK
) {
296 case SCTP_DATA_FIRST_FRAG
:
297 if (cevent
->mid
== sin
->mid
) {
308 case SCTP_DATA_MIDDLE_FRAG
:
309 if (first_frag
&& cevent
->mid
== mid
&&
310 cevent
->fsn
== next_fsn
) {
321 case SCTP_DATA_LAST_FRAG
:
322 if (first_frag
&& cevent
->mid
== mid
&&
323 cevent
->fsn
== next_fsn
)
334 pd_point
= sctp_sk(asoc
->base
.sk
)->pd_point
;
335 if (pd_point
&& pd_point
<= pd_len
) {
336 retval
= sctp_make_reassembled_event(sock_net(asoc
->base
.sk
),
347 retval
= sctp_make_reassembled_event(sock_net(asoc
->base
.sk
),
351 retval
->msg_flags
|= MSG_EOR
;
357 static struct sctp_ulpevent
*sctp_intl_reasm(struct sctp_ulpq
*ulpq
,
358 struct sctp_ulpevent
*event
)
360 struct sctp_ulpevent
*retval
= NULL
;
361 struct sctp_stream_in
*sin
;
363 if (SCTP_DATA_NOT_FRAG
== (event
->msg_flags
& SCTP_DATA_FRAG_MASK
)) {
364 event
->msg_flags
|= MSG_EOR
;
368 sctp_intl_store_reasm(ulpq
, event
);
370 sin
= sctp_stream_in(ulpq
->asoc
, event
->stream
);
371 if (sin
->pd_mode
&& event
->mid
== sin
->mid
&&
372 event
->fsn
== sin
->fsn
)
373 retval
= sctp_intl_retrieve_partial(ulpq
, event
);
376 retval
= sctp_intl_retrieve_reassembled(ulpq
, event
);
381 static void sctp_intl_store_ordered(struct sctp_ulpq
*ulpq
,
382 struct sctp_ulpevent
*event
)
384 struct sctp_ulpevent
*cevent
;
387 pos
= skb_peek_tail(&ulpq
->lobby
);
389 __skb_queue_tail(&ulpq
->lobby
, sctp_event2skb(event
));
393 cevent
= (struct sctp_ulpevent
*)pos
->cb
;
394 if (event
->stream
== cevent
->stream
&&
395 MID_lt(cevent
->mid
, event
->mid
)) {
396 __skb_queue_tail(&ulpq
->lobby
, sctp_event2skb(event
));
400 if (event
->stream
> cevent
->stream
) {
401 __skb_queue_tail(&ulpq
->lobby
, sctp_event2skb(event
));
405 skb_queue_walk(&ulpq
->lobby
, pos
) {
406 cevent
= (struct sctp_ulpevent
*)pos
->cb
;
408 if (cevent
->stream
> event
->stream
)
411 if (cevent
->stream
== event
->stream
&&
412 MID_lt(event
->mid
, cevent
->mid
))
416 __skb_queue_before(&ulpq
->lobby
, pos
, sctp_event2skb(event
));
419 static void sctp_intl_retrieve_ordered(struct sctp_ulpq
*ulpq
,
420 struct sctp_ulpevent
*event
)
422 struct sk_buff_head
*event_list
;
423 struct sctp_stream
*stream
;
424 struct sk_buff
*pos
, *tmp
;
425 __u16 sid
= event
->stream
;
427 stream
= &ulpq
->asoc
->stream
;
428 event_list
= (struct sk_buff_head
*)sctp_event2skb(event
)->prev
;
430 sctp_skb_for_each(pos
, &ulpq
->lobby
, tmp
) {
431 struct sctp_ulpevent
*cevent
= (struct sctp_ulpevent
*)pos
->cb
;
433 if (cevent
->stream
> sid
)
436 if (cevent
->stream
< sid
)
439 if (cevent
->mid
!= sctp_mid_peek(stream
, in
, sid
))
442 sctp_mid_next(stream
, in
, sid
);
444 __skb_unlink(pos
, &ulpq
->lobby
);
446 __skb_queue_tail(event_list
, pos
);
450 static struct sctp_ulpevent
*sctp_intl_order(struct sctp_ulpq
*ulpq
,
451 struct sctp_ulpevent
*event
)
453 struct sctp_stream
*stream
;
456 stream
= &ulpq
->asoc
->stream
;
459 if (event
->mid
!= sctp_mid_peek(stream
, in
, sid
)) {
460 sctp_intl_store_ordered(ulpq
, event
);
464 sctp_mid_next(stream
, in
, sid
);
466 sctp_intl_retrieve_ordered(ulpq
, event
);
471 static int sctp_enqueue_event(struct sctp_ulpq
*ulpq
,
472 struct sctp_ulpevent
*event
)
474 struct sk_buff
*skb
= sctp_event2skb(event
);
475 struct sock
*sk
= ulpq
->asoc
->base
.sk
;
476 struct sctp_sock
*sp
= sctp_sk(sk
);
477 struct sk_buff_head
*skb_list
;
479 skb_list
= (struct sk_buff_head
*)skb
->prev
;
481 if (sk
->sk_shutdown
& RCV_SHUTDOWN
&&
482 (sk
->sk_shutdown
& SEND_SHUTDOWN
||
483 !sctp_ulpevent_is_notification(event
)))
486 if (!sctp_ulpevent_is_notification(event
)) {
487 sk_mark_napi_id(sk
, skb
);
488 sk_incoming_cpu_update(sk
);
491 if (!sctp_ulpevent_is_enabled(event
, &sp
->subscribe
))
495 skb_queue_splice_tail_init(skb_list
,
496 &sk
->sk_receive_queue
);
498 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
500 if (!sp
->data_ready_signalled
) {
501 sp
->data_ready_signalled
= 1;
502 sk
->sk_data_ready(sk
);
509 sctp_queue_purge_ulpevents(skb_list
);
511 sctp_ulpevent_free(event
);
516 static void sctp_intl_store_reasm_uo(struct sctp_ulpq
*ulpq
,
517 struct sctp_ulpevent
*event
)
519 struct sctp_ulpevent
*cevent
;
522 pos
= skb_peek_tail(&ulpq
->reasm_uo
);
524 __skb_queue_tail(&ulpq
->reasm_uo
, sctp_event2skb(event
));
528 cevent
= sctp_skb2event(pos
);
530 if (event
->stream
== cevent
->stream
&&
531 event
->mid
== cevent
->mid
&&
532 (cevent
->msg_flags
& SCTP_DATA_FIRST_FRAG
||
533 (!(event
->msg_flags
& SCTP_DATA_FIRST_FRAG
) &&
534 event
->fsn
> cevent
->fsn
))) {
535 __skb_queue_tail(&ulpq
->reasm_uo
, sctp_event2skb(event
));
539 if ((event
->stream
== cevent
->stream
&&
540 MID_lt(cevent
->mid
, event
->mid
)) ||
541 event
->stream
> cevent
->stream
) {
542 __skb_queue_tail(&ulpq
->reasm_uo
, sctp_event2skb(event
));
546 skb_queue_walk(&ulpq
->reasm_uo
, pos
) {
547 cevent
= sctp_skb2event(pos
);
549 if (event
->stream
< cevent
->stream
||
550 (event
->stream
== cevent
->stream
&&
551 MID_lt(event
->mid
, cevent
->mid
)))
554 if (event
->stream
== cevent
->stream
&&
555 event
->mid
== cevent
->mid
&&
556 !(cevent
->msg_flags
& SCTP_DATA_FIRST_FRAG
) &&
557 (event
->msg_flags
& SCTP_DATA_FIRST_FRAG
||
558 event
->fsn
< cevent
->fsn
))
562 __skb_queue_before(&ulpq
->reasm_uo
, pos
, sctp_event2skb(event
));
565 static struct sctp_ulpevent
*sctp_intl_retrieve_partial_uo(
566 struct sctp_ulpq
*ulpq
,
567 struct sctp_ulpevent
*event
)
569 struct sk_buff
*first_frag
= NULL
;
570 struct sk_buff
*last_frag
= NULL
;
571 struct sctp_ulpevent
*retval
;
572 struct sctp_stream_in
*sin
;
577 sin
= sctp_stream_in(ulpq
->asoc
, event
->stream
);
579 skb_queue_walk(&ulpq
->reasm_uo
, pos
) {
580 struct sctp_ulpevent
*cevent
= sctp_skb2event(pos
);
582 if (cevent
->stream
< event
->stream
)
584 if (cevent
->stream
> event
->stream
)
587 if (MID_lt(cevent
->mid
, sin
->mid_uo
))
589 if (MID_lt(sin
->mid_uo
, cevent
->mid
))
592 switch (cevent
->msg_flags
& SCTP_DATA_FRAG_MASK
) {
593 case SCTP_DATA_FIRST_FRAG
:
595 case SCTP_DATA_MIDDLE_FRAG
:
597 if (cevent
->fsn
== sin
->fsn_uo
) {
600 next_fsn
= cevent
->fsn
+ 1;
602 } else if (cevent
->fsn
== next_fsn
) {
609 case SCTP_DATA_LAST_FRAG
:
611 if (cevent
->fsn
== sin
->fsn_uo
) {
617 } else if (cevent
->fsn
== next_fsn
) {
632 retval
= sctp_make_reassembled_event(sock_net(ulpq
->asoc
->base
.sk
),
633 &ulpq
->reasm_uo
, first_frag
,
636 sin
->fsn_uo
= next_fsn
;
638 retval
->msg_flags
|= MSG_EOR
;
646 static struct sctp_ulpevent
*sctp_intl_retrieve_reassembled_uo(
647 struct sctp_ulpq
*ulpq
,
648 struct sctp_ulpevent
*event
)
650 struct sctp_association
*asoc
= ulpq
->asoc
;
651 struct sk_buff
*pos
, *first_frag
= NULL
;
652 struct sctp_ulpevent
*retval
= NULL
;
653 struct sk_buff
*pd_first
= NULL
;
654 struct sk_buff
*pd_last
= NULL
;
655 struct sctp_stream_in
*sin
;
661 sin
= sctp_stream_in(ulpq
->asoc
, event
->stream
);
663 skb_queue_walk(&ulpq
->reasm_uo
, pos
) {
664 struct sctp_ulpevent
*cevent
= sctp_skb2event(pos
);
666 if (cevent
->stream
< event
->stream
)
668 if (cevent
->stream
> event
->stream
)
671 if (MID_lt(cevent
->mid
, event
->mid
))
673 if (MID_lt(event
->mid
, cevent
->mid
))
676 switch (cevent
->msg_flags
& SCTP_DATA_FRAG_MASK
) {
677 case SCTP_DATA_FIRST_FRAG
:
678 if (!sin
->pd_mode_uo
) {
679 sin
->mid_uo
= cevent
->mid
;
690 case SCTP_DATA_MIDDLE_FRAG
:
691 if (first_frag
&& cevent
->mid
== mid
&&
692 cevent
->fsn
== next_fsn
) {
703 case SCTP_DATA_LAST_FRAG
:
704 if (first_frag
&& cevent
->mid
== mid
&&
705 cevent
->fsn
== next_fsn
)
716 pd_point
= sctp_sk(asoc
->base
.sk
)->pd_point
;
717 if (pd_point
&& pd_point
<= pd_len
) {
718 retval
= sctp_make_reassembled_event(sock_net(asoc
->base
.sk
),
722 sin
->fsn_uo
= next_fsn
;
729 retval
= sctp_make_reassembled_event(sock_net(asoc
->base
.sk
),
733 retval
->msg_flags
|= MSG_EOR
;
739 static struct sctp_ulpevent
*sctp_intl_reasm_uo(struct sctp_ulpq
*ulpq
,
740 struct sctp_ulpevent
*event
)
742 struct sctp_ulpevent
*retval
= NULL
;
743 struct sctp_stream_in
*sin
;
745 if (SCTP_DATA_NOT_FRAG
== (event
->msg_flags
& SCTP_DATA_FRAG_MASK
)) {
746 event
->msg_flags
|= MSG_EOR
;
750 sctp_intl_store_reasm_uo(ulpq
, event
);
752 sin
= sctp_stream_in(ulpq
->asoc
, event
->stream
);
753 if (sin
->pd_mode_uo
&& event
->mid
== sin
->mid_uo
&&
754 event
->fsn
== sin
->fsn_uo
)
755 retval
= sctp_intl_retrieve_partial_uo(ulpq
, event
);
758 retval
= sctp_intl_retrieve_reassembled_uo(ulpq
, event
);
763 static struct sctp_ulpevent
*sctp_intl_retrieve_first_uo(struct sctp_ulpq
*ulpq
)
765 struct sctp_stream_in
*csin
, *sin
= NULL
;
766 struct sk_buff
*first_frag
= NULL
;
767 struct sk_buff
*last_frag
= NULL
;
768 struct sctp_ulpevent
*retval
;
773 skb_queue_walk(&ulpq
->reasm_uo
, pos
) {
774 struct sctp_ulpevent
*cevent
= sctp_skb2event(pos
);
776 csin
= sctp_stream_in(ulpq
->asoc
, cevent
->stream
);
777 if (csin
->pd_mode_uo
)
780 switch (cevent
->msg_flags
& SCTP_DATA_FRAG_MASK
) {
781 case SCTP_DATA_FIRST_FRAG
:
788 sid
= cevent
->stream
;
789 sin
->mid_uo
= cevent
->mid
;
791 case SCTP_DATA_MIDDLE_FRAG
:
794 if (cevent
->stream
== sid
&&
795 cevent
->mid
== sin
->mid_uo
&&
796 cevent
->fsn
== next_fsn
) {
803 case SCTP_DATA_LAST_FRAG
:
816 retval
= sctp_make_reassembled_event(sock_net(ulpq
->asoc
->base
.sk
),
817 &ulpq
->reasm_uo
, first_frag
,
820 sin
->fsn_uo
= next_fsn
;
827 static int sctp_ulpevent_idata(struct sctp_ulpq
*ulpq
,
828 struct sctp_chunk
*chunk
, gfp_t gfp
)
830 struct sctp_ulpevent
*event
;
831 struct sk_buff_head temp
;
834 event
= sctp_ulpevent_make_rcvmsg(chunk
->asoc
, chunk
, gfp
);
838 event
->mid
= ntohl(chunk
->subh
.idata_hdr
->mid
);
839 if (event
->msg_flags
& SCTP_DATA_FIRST_FRAG
)
840 event
->ppid
= chunk
->subh
.idata_hdr
->ppid
;
842 event
->fsn
= ntohl(chunk
->subh
.idata_hdr
->fsn
);
844 if (!(event
->msg_flags
& SCTP_DATA_UNORDERED
)) {
845 event
= sctp_intl_reasm(ulpq
, event
);
846 if (event
&& event
->msg_flags
& MSG_EOR
) {
847 skb_queue_head_init(&temp
);
848 __skb_queue_tail(&temp
, sctp_event2skb(event
));
850 event
= sctp_intl_order(ulpq
, event
);
853 event
= sctp_intl_reasm_uo(ulpq
, event
);
857 event_eor
= (event
->msg_flags
& MSG_EOR
) ? 1 : 0;
858 sctp_enqueue_event(ulpq
, event
);
864 static struct sctp_ulpevent
*sctp_intl_retrieve_first(struct sctp_ulpq
*ulpq
)
866 struct sctp_stream_in
*csin
, *sin
= NULL
;
867 struct sk_buff
*first_frag
= NULL
;
868 struct sk_buff
*last_frag
= NULL
;
869 struct sctp_ulpevent
*retval
;
874 skb_queue_walk(&ulpq
->reasm
, pos
) {
875 struct sctp_ulpevent
*cevent
= sctp_skb2event(pos
);
877 csin
= sctp_stream_in(ulpq
->asoc
, cevent
->stream
);
881 switch (cevent
->msg_flags
& SCTP_DATA_FRAG_MASK
) {
882 case SCTP_DATA_FIRST_FRAG
:
885 if (cevent
->mid
== csin
->mid
) {
890 sid
= cevent
->stream
;
893 case SCTP_DATA_MIDDLE_FRAG
:
896 if (cevent
->stream
== sid
&&
897 cevent
->mid
== sin
->mid
&&
898 cevent
->fsn
== next_fsn
) {
905 case SCTP_DATA_LAST_FRAG
:
918 retval
= sctp_make_reassembled_event(sock_net(ulpq
->asoc
->base
.sk
),
919 &ulpq
->reasm
, first_frag
,
929 static void sctp_intl_start_pd(struct sctp_ulpq
*ulpq
, gfp_t gfp
)
931 struct sctp_ulpevent
*event
;
933 if (!skb_queue_empty(&ulpq
->reasm
)) {
935 event
= sctp_intl_retrieve_first(ulpq
);
937 sctp_enqueue_event(ulpq
, event
);
941 if (!skb_queue_empty(&ulpq
->reasm_uo
)) {
943 event
= sctp_intl_retrieve_first_uo(ulpq
);
945 sctp_enqueue_event(ulpq
, event
);
950 static void sctp_renege_events(struct sctp_ulpq
*ulpq
, struct sctp_chunk
*chunk
,
953 struct sctp_association
*asoc
= ulpq
->asoc
;
958 needed
= ntohs(chunk
->chunk_hdr
->length
);
959 needed
-= sizeof(struct sctp_idata_chunk
);
961 needed
= SCTP_DEFAULT_MAXWINDOW
;
964 if (skb_queue_empty(&asoc
->base
.sk
->sk_receive_queue
)) {
965 freed
= sctp_ulpq_renege_list(ulpq
, &ulpq
->lobby
, needed
);
967 freed
+= sctp_ulpq_renege_list(ulpq
, &ulpq
->reasm
,
970 freed
+= sctp_ulpq_renege_list(ulpq
, &ulpq
->reasm_uo
,
974 if (chunk
&& freed
>= needed
)
975 if (sctp_ulpevent_idata(ulpq
, chunk
, gfp
) <= 0)
976 sctp_intl_start_pd(ulpq
, gfp
);
978 sk_mem_reclaim(asoc
->base
.sk
);
981 static void sctp_intl_stream_abort_pd(struct sctp_ulpq
*ulpq
, __u16 sid
,
982 __u32 mid
, __u16 flags
, gfp_t gfp
)
984 struct sock
*sk
= ulpq
->asoc
->base
.sk
;
985 struct sctp_ulpevent
*ev
= NULL
;
987 if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT
,
988 &sctp_sk(sk
)->subscribe
))
991 ev
= sctp_ulpevent_make_pdapi(ulpq
->asoc
, SCTP_PARTIAL_DELIVERY_ABORTED
,
992 sid
, mid
, flags
, gfp
);
994 __skb_queue_tail(&sk
->sk_receive_queue
, sctp_event2skb(ev
));
996 if (!sctp_sk(sk
)->data_ready_signalled
) {
997 sctp_sk(sk
)->data_ready_signalled
= 1;
998 sk
->sk_data_ready(sk
);
1003 static void sctp_intl_reap_ordered(struct sctp_ulpq
*ulpq
, __u16 sid
)
1005 struct sctp_stream
*stream
= &ulpq
->asoc
->stream
;
1006 struct sctp_ulpevent
*cevent
, *event
= NULL
;
1007 struct sk_buff_head
*lobby
= &ulpq
->lobby
;
1008 struct sk_buff
*pos
, *tmp
;
1009 struct sk_buff_head temp
;
1013 skb_queue_head_init(&temp
);
1014 sctp_skb_for_each(pos
, lobby
, tmp
) {
1015 cevent
= (struct sctp_ulpevent
*)pos
->cb
;
1016 csid
= cevent
->stream
;
1025 if (!MID_lt(cmid
, sctp_mid_peek(stream
, in
, csid
)))
1028 __skb_unlink(pos
, lobby
);
1030 event
= sctp_skb2event(pos
);
1032 __skb_queue_tail(&temp
, pos
);
1035 if (!event
&& pos
!= (struct sk_buff
*)lobby
) {
1036 cevent
= (struct sctp_ulpevent
*)pos
->cb
;
1037 csid
= cevent
->stream
;
1040 if (csid
== sid
&& cmid
== sctp_mid_peek(stream
, in
, csid
)) {
1041 sctp_mid_next(stream
, in
, csid
);
1042 __skb_unlink(pos
, lobby
);
1043 __skb_queue_tail(&temp
, pos
);
1044 event
= sctp_skb2event(pos
);
1049 sctp_intl_retrieve_ordered(ulpq
, event
);
1050 sctp_enqueue_event(ulpq
, event
);
1054 static void sctp_intl_abort_pd(struct sctp_ulpq
*ulpq
, gfp_t gfp
)
1056 struct sctp_stream
*stream
= &ulpq
->asoc
->stream
;
1059 for (sid
= 0; sid
< stream
->incnt
; sid
++) {
1060 struct sctp_stream_in
*sin
= &stream
->in
[sid
];
1063 if (sin
->pd_mode_uo
) {
1064 sin
->pd_mode_uo
= 0;
1067 sctp_intl_stream_abort_pd(ulpq
, sid
, mid
, 0x1, gfp
);
1074 sctp_intl_stream_abort_pd(ulpq
, sid
, mid
, 0, gfp
);
1075 sctp_mid_skip(stream
, in
, sid
, mid
);
1077 sctp_intl_reap_ordered(ulpq
, sid
);
1081 /* intl abort pd happens only when all data needs to be cleaned */
1082 sctp_ulpq_flush(ulpq
);
1085 static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip
*skiplist
,
1086 int nskips
, __be16 stream
, __u8 flags
)
1090 for (i
= 0; i
< nskips
; i
++)
1091 if (skiplist
[i
].stream
== stream
&&
1092 skiplist
[i
].flags
== flags
)
1098 #define SCTP_FTSN_U_BIT 0x1
1099 static void sctp_generate_iftsn(struct sctp_outq
*q
, __u32 ctsn
)
1101 struct sctp_ifwdtsn_skip ftsn_skip_arr
[10];
1102 struct sctp_association
*asoc
= q
->asoc
;
1103 struct sctp_chunk
*ftsn_chunk
= NULL
;
1104 struct list_head
*lchunk
, *temp
;
1105 int nskips
= 0, skip_pos
;
1106 struct sctp_chunk
*chunk
;
1109 if (!asoc
->peer
.prsctp_capable
)
1112 if (TSN_lt(asoc
->adv_peer_ack_point
, ctsn
))
1113 asoc
->adv_peer_ack_point
= ctsn
;
1115 list_for_each_safe(lchunk
, temp
, &q
->abandoned
) {
1116 chunk
= list_entry(lchunk
, struct sctp_chunk
, transmitted_list
);
1117 tsn
= ntohl(chunk
->subh
.data_hdr
->tsn
);
1119 if (TSN_lte(tsn
, ctsn
)) {
1120 list_del_init(lchunk
);
1121 sctp_chunk_free(chunk
);
1122 } else if (TSN_lte(tsn
, asoc
->adv_peer_ack_point
+ 1)) {
1123 __be16 sid
= chunk
->subh
.idata_hdr
->stream
;
1124 __be32 mid
= chunk
->subh
.idata_hdr
->mid
;
1127 if (chunk
->chunk_hdr
->flags
& SCTP_DATA_UNORDERED
)
1128 flags
|= SCTP_FTSN_U_BIT
;
1130 asoc
->adv_peer_ack_point
= tsn
;
1131 skip_pos
= sctp_get_skip_pos(&ftsn_skip_arr
[0], nskips
,
1133 ftsn_skip_arr
[skip_pos
].stream
= sid
;
1134 ftsn_skip_arr
[skip_pos
].reserved
= 0;
1135 ftsn_skip_arr
[skip_pos
].flags
= flags
;
1136 ftsn_skip_arr
[skip_pos
].mid
= mid
;
1137 if (skip_pos
== nskips
)
1146 if (asoc
->adv_peer_ack_point
> ctsn
)
1147 ftsn_chunk
= sctp_make_ifwdtsn(asoc
, asoc
->adv_peer_ack_point
,
1148 nskips
, &ftsn_skip_arr
[0]);
1151 list_add_tail(&ftsn_chunk
->list
, &q
->control_chunk_list
);
1152 SCTP_INC_STATS(sock_net(asoc
->base
.sk
), SCTP_MIB_OUTCTRLCHUNKS
);
1156 #define _sctp_walk_ifwdtsn(pos, chunk, end) \
1157 for (pos = chunk->subh.ifwdtsn_hdr->skip; \
1158 (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
1160 #define sctp_walk_ifwdtsn(pos, ch) \
1161 _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
1162 sizeof(struct sctp_ifwdtsn_chunk))
1164 static bool sctp_validate_fwdtsn(struct sctp_chunk
*chunk
)
1166 struct sctp_fwdtsn_skip
*skip
;
1169 if (chunk
->chunk_hdr
->type
!= SCTP_CID_FWD_TSN
)
1172 incnt
= chunk
->asoc
->stream
.incnt
;
1173 sctp_walk_fwdtsn(skip
, chunk
)
1174 if (ntohs(skip
->stream
) >= incnt
)
1180 static bool sctp_validate_iftsn(struct sctp_chunk
*chunk
)
1182 struct sctp_ifwdtsn_skip
*skip
;
1185 if (chunk
->chunk_hdr
->type
!= SCTP_CID_I_FWD_TSN
)
1188 incnt
= chunk
->asoc
->stream
.incnt
;
1189 sctp_walk_ifwdtsn(skip
, chunk
)
1190 if (ntohs(skip
->stream
) >= incnt
)
1196 static void sctp_report_fwdtsn(struct sctp_ulpq
*ulpq
, __u32 ftsn
)
1198 /* Move the Cumulattive TSN Ack ahead. */
1199 sctp_tsnmap_skip(&ulpq
->asoc
->peer
.tsn_map
, ftsn
);
1200 /* purge the fragmentation queue */
1201 sctp_ulpq_reasm_flushtsn(ulpq
, ftsn
);
1202 /* Abort any in progress partial delivery. */
1203 sctp_ulpq_abort_pd(ulpq
, GFP_ATOMIC
);
1206 static void sctp_intl_reasm_flushtsn(struct sctp_ulpq
*ulpq
, __u32 ftsn
)
1208 struct sk_buff
*pos
, *tmp
;
1210 skb_queue_walk_safe(&ulpq
->reasm
, pos
, tmp
) {
1211 struct sctp_ulpevent
*event
= sctp_skb2event(pos
);
1212 __u32 tsn
= event
->tsn
;
1214 if (TSN_lte(tsn
, ftsn
)) {
1215 __skb_unlink(pos
, &ulpq
->reasm
);
1216 sctp_ulpevent_free(event
);
1220 skb_queue_walk_safe(&ulpq
->reasm_uo
, pos
, tmp
) {
1221 struct sctp_ulpevent
*event
= sctp_skb2event(pos
);
1222 __u32 tsn
= event
->tsn
;
1224 if (TSN_lte(tsn
, ftsn
)) {
1225 __skb_unlink(pos
, &ulpq
->reasm_uo
);
1226 sctp_ulpevent_free(event
);
1231 static void sctp_report_iftsn(struct sctp_ulpq
*ulpq
, __u32 ftsn
)
1233 /* Move the Cumulattive TSN Ack ahead. */
1234 sctp_tsnmap_skip(&ulpq
->asoc
->peer
.tsn_map
, ftsn
);
1235 /* purge the fragmentation queue */
1236 sctp_intl_reasm_flushtsn(ulpq
, ftsn
);
1237 /* abort only when it's for all data */
1238 if (ftsn
== sctp_tsnmap_get_max_tsn_seen(&ulpq
->asoc
->peer
.tsn_map
))
1239 sctp_intl_abort_pd(ulpq
, GFP_ATOMIC
);
1242 static void sctp_handle_fwdtsn(struct sctp_ulpq
*ulpq
, struct sctp_chunk
*chunk
)
1244 struct sctp_fwdtsn_skip
*skip
;
1246 /* Walk through all the skipped SSNs */
1247 sctp_walk_fwdtsn(skip
, chunk
)
1248 sctp_ulpq_skip(ulpq
, ntohs(skip
->stream
), ntohs(skip
->ssn
));
1251 static void sctp_intl_skip(struct sctp_ulpq
*ulpq
, __u16 sid
, __u32 mid
,
1254 struct sctp_stream_in
*sin
= sctp_stream_in(ulpq
->asoc
, sid
);
1255 struct sctp_stream
*stream
= &ulpq
->asoc
->stream
;
1257 if (flags
& SCTP_FTSN_U_BIT
) {
1258 if (sin
->pd_mode_uo
&& MID_lt(sin
->mid_uo
, mid
)) {
1259 sin
->pd_mode_uo
= 0;
1260 sctp_intl_stream_abort_pd(ulpq
, sid
, mid
, 0x1,
1266 if (MID_lt(mid
, sctp_mid_peek(stream
, in
, sid
)))
1271 sctp_intl_stream_abort_pd(ulpq
, sid
, mid
, 0x0, GFP_ATOMIC
);
1274 sctp_mid_skip(stream
, in
, sid
, mid
);
1276 sctp_intl_reap_ordered(ulpq
, sid
);
1279 static void sctp_handle_iftsn(struct sctp_ulpq
*ulpq
, struct sctp_chunk
*chunk
)
1281 struct sctp_ifwdtsn_skip
*skip
;
1283 /* Walk through all the skipped MIDs and abort stream pd if possible */
1284 sctp_walk_ifwdtsn(skip
, chunk
)
1285 sctp_intl_skip(ulpq
, ntohs(skip
->stream
),
1286 ntohl(skip
->mid
), skip
->flags
);
1289 static struct sctp_stream_interleave sctp_stream_interleave_0
= {
1290 .data_chunk_len
= sizeof(struct sctp_data_chunk
),
1291 .ftsn_chunk_len
= sizeof(struct sctp_fwdtsn_chunk
),
1292 /* DATA process functions */
1293 .make_datafrag
= sctp_make_datafrag_empty
,
1294 .assign_number
= sctp_chunk_assign_ssn
,
1295 .validate_data
= sctp_validate_data
,
1296 .ulpevent_data
= sctp_ulpq_tail_data
,
1297 .enqueue_event
= sctp_ulpq_tail_event
,
1298 .renege_events
= sctp_ulpq_renege
,
1299 .start_pd
= sctp_ulpq_partial_delivery
,
1300 .abort_pd
= sctp_ulpq_abort_pd
,
1301 /* FORWARD-TSN process functions */
1302 .generate_ftsn
= sctp_generate_fwdtsn
,
1303 .validate_ftsn
= sctp_validate_fwdtsn
,
1304 .report_ftsn
= sctp_report_fwdtsn
,
1305 .handle_ftsn
= sctp_handle_fwdtsn
,
1308 static struct sctp_stream_interleave sctp_stream_interleave_1
= {
1309 .data_chunk_len
= sizeof(struct sctp_idata_chunk
),
1310 .ftsn_chunk_len
= sizeof(struct sctp_ifwdtsn_chunk
),
1311 /* I-DATA process functions */
1312 .make_datafrag
= sctp_make_idatafrag_empty
,
1313 .assign_number
= sctp_chunk_assign_mid
,
1314 .validate_data
= sctp_validate_idata
,
1315 .ulpevent_data
= sctp_ulpevent_idata
,
1316 .enqueue_event
= sctp_enqueue_event
,
1317 .renege_events
= sctp_renege_events
,
1318 .start_pd
= sctp_intl_start_pd
,
1319 .abort_pd
= sctp_intl_abort_pd
,
1320 /* I-FORWARD-TSN process functions */
1321 .generate_ftsn
= sctp_generate_iftsn
,
1322 .validate_ftsn
= sctp_validate_iftsn
,
1323 .report_ftsn
= sctp_report_iftsn
,
1324 .handle_ftsn
= sctp_handle_iftsn
,
1327 void sctp_stream_interleave_init(struct sctp_stream
*stream
)
1329 struct sctp_association
*asoc
;
1331 asoc
= container_of(stream
, struct sctp_association
, stream
);
1332 stream
->si
= asoc
->intl_enable
? &sctp_stream_interleave_1
1333 : &sctp_stream_interleave_0
;