1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This abstraction carries sctp events to the ULP (sockets).
11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This SCTP implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, see
25 * <http://www.gnu.org/licenses/>.
27 * Please send any bug reports or fixes you make to the
29 * lksctp developers <linux-sctp@vger.kernel.org>
31 * Written or modified by:
32 * Jon Grimm <jgrimm@us.ibm.com>
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Sridhar Samudrala <sri@us.ibm.com>
37 #include <linux/slab.h>
38 #include <linux/types.h>
39 #include <linux/skbuff.h>
41 #include <net/busy_poll.h>
42 #include <net/sctp/structs.h>
43 #include <net/sctp/sctp.h>
44 #include <net/sctp/sm.h>
46 /* Forward declarations for internal helpers. */
47 static struct sctp_ulpevent
*sctp_ulpq_reasm(struct sctp_ulpq
*ulpq
,
48 struct sctp_ulpevent
*);
49 static struct sctp_ulpevent
*sctp_ulpq_order(struct sctp_ulpq
*,
50 struct sctp_ulpevent
*);
51 static void sctp_ulpq_reasm_drain(struct sctp_ulpq
*ulpq
);
53 /* 1st Level Abstractions */
55 /* Initialize a ULP queue from a block of memory. */
56 struct sctp_ulpq
*sctp_ulpq_init(struct sctp_ulpq
*ulpq
,
57 struct sctp_association
*asoc
)
59 memset(ulpq
, 0, sizeof(struct sctp_ulpq
));
62 skb_queue_head_init(&ulpq
->reasm
);
63 skb_queue_head_init(&ulpq
->reasm_uo
);
64 skb_queue_head_init(&ulpq
->lobby
);
71 /* Flush the reassembly and ordering queues. */
72 void sctp_ulpq_flush(struct sctp_ulpq
*ulpq
)
75 struct sctp_ulpevent
*event
;
77 while ((skb
= __skb_dequeue(&ulpq
->lobby
)) != NULL
) {
78 event
= sctp_skb2event(skb
);
79 sctp_ulpevent_free(event
);
82 while ((skb
= __skb_dequeue(&ulpq
->reasm
)) != NULL
) {
83 event
= sctp_skb2event(skb
);
84 sctp_ulpevent_free(event
);
87 while ((skb
= __skb_dequeue(&ulpq
->reasm_uo
)) != NULL
) {
88 event
= sctp_skb2event(skb
);
89 sctp_ulpevent_free(event
);
93 /* Dispose of a ulpqueue. */
94 void sctp_ulpq_free(struct sctp_ulpq
*ulpq
)
96 sctp_ulpq_flush(ulpq
);
99 /* Process an incoming DATA chunk. */
100 int sctp_ulpq_tail_data(struct sctp_ulpq
*ulpq
, struct sctp_chunk
*chunk
,
103 struct sk_buff_head temp
;
104 struct sctp_ulpevent
*event
;
107 /* Create an event from the incoming chunk. */
108 event
= sctp_ulpevent_make_rcvmsg(chunk
->asoc
, chunk
, gfp
);
112 event
->ssn
= ntohs(chunk
->subh
.data_hdr
->ssn
);
113 event
->ppid
= chunk
->subh
.data_hdr
->ppid
;
115 /* Do reassembly if needed. */
116 event
= sctp_ulpq_reasm(ulpq
, event
);
118 /* Do ordering if needed. */
119 if ((event
) && (event
->msg_flags
& MSG_EOR
)) {
120 /* Create a temporary list to collect chunks on. */
121 skb_queue_head_init(&temp
);
122 __skb_queue_tail(&temp
, sctp_event2skb(event
));
124 event
= sctp_ulpq_order(ulpq
, event
);
127 /* Send event to the ULP. 'event' is the sctp_ulpevent for
128 * very first SKB on the 'temp' list.
131 event_eor
= (event
->msg_flags
& MSG_EOR
) ? 1 : 0;
132 sctp_ulpq_tail_event(ulpq
, event
);
138 /* Add a new event for propagation to the ULP. */
139 /* Clear the partial delivery mode for this socket. Note: This
140 * assumes that no association is currently in partial delivery mode.
142 int sctp_clear_pd(struct sock
*sk
, struct sctp_association
*asoc
)
144 struct sctp_sock
*sp
= sctp_sk(sk
);
146 if (atomic_dec_and_test(&sp
->pd_mode
)) {
147 /* This means there are no other associations in PD, so
148 * we can go ahead and clear out the lobby in one shot
150 if (!skb_queue_empty(&sp
->pd_lobby
)) {
151 skb_queue_splice_tail_init(&sp
->pd_lobby
,
152 &sk
->sk_receive_queue
);
156 /* There are other associations in PD, so we only need to
157 * pull stuff out of the lobby that belongs to the
158 * associations that is exiting PD (all of its notifications
161 if (!skb_queue_empty(&sp
->pd_lobby
) && asoc
) {
162 struct sk_buff
*skb
, *tmp
;
163 struct sctp_ulpevent
*event
;
165 sctp_skb_for_each(skb
, &sp
->pd_lobby
, tmp
) {
166 event
= sctp_skb2event(skb
);
167 if (event
->asoc
== asoc
) {
168 __skb_unlink(skb
, &sp
->pd_lobby
);
169 __skb_queue_tail(&sk
->sk_receive_queue
,
179 /* Set the pd_mode on the socket and ulpq */
180 static void sctp_ulpq_set_pd(struct sctp_ulpq
*ulpq
)
182 struct sctp_sock
*sp
= sctp_sk(ulpq
->asoc
->base
.sk
);
184 atomic_inc(&sp
->pd_mode
);
188 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
189 static int sctp_ulpq_clear_pd(struct sctp_ulpq
*ulpq
)
192 sctp_ulpq_reasm_drain(ulpq
);
193 return sctp_clear_pd(ulpq
->asoc
->base
.sk
, ulpq
->asoc
);
196 /* If the SKB of 'event' is on a list, it is the first such member
199 int sctp_ulpq_tail_event(struct sctp_ulpq
*ulpq
, struct sctp_ulpevent
*event
)
201 struct sock
*sk
= ulpq
->asoc
->base
.sk
;
202 struct sctp_sock
*sp
= sctp_sk(sk
);
203 struct sk_buff_head
*queue
, *skb_list
;
204 struct sk_buff
*skb
= sctp_event2skb(event
);
207 skb_list
= (struct sk_buff_head
*) skb
->prev
;
209 /* If the socket is just going to throw this away, do not
210 * even try to deliver it.
212 if (sk
->sk_shutdown
& RCV_SHUTDOWN
&&
213 (sk
->sk_shutdown
& SEND_SHUTDOWN
||
214 !sctp_ulpevent_is_notification(event
)))
217 if (!sctp_ulpevent_is_notification(event
)) {
218 sk_mark_napi_id(sk
, skb
);
219 sk_incoming_cpu_update(sk
);
221 /* Check if the user wishes to receive this event. */
222 if (!sctp_ulpevent_is_enabled(event
, &sp
->subscribe
))
225 /* If we are in partial delivery mode, post to the lobby until
226 * partial delivery is cleared, unless, of course _this_ is
227 * the association the cause of the partial delivery.
230 if (atomic_read(&sp
->pd_mode
) == 0) {
231 queue
= &sk
->sk_receive_queue
;
234 /* If the association is in partial delivery, we
235 * need to finish delivering the partially processed
236 * packet before passing any other data. This is
237 * because we don't truly support stream interleaving.
239 if ((event
->msg_flags
& MSG_NOTIFICATION
) ||
240 (SCTP_DATA_NOT_FRAG
==
241 (event
->msg_flags
& SCTP_DATA_FRAG_MASK
)))
242 queue
= &sp
->pd_lobby
;
244 clear_pd
= event
->msg_flags
& MSG_EOR
;
245 queue
= &sk
->sk_receive_queue
;
249 * If fragment interleave is enabled, we
250 * can queue this to the receive queue instead
253 if (sp
->frag_interleave
)
254 queue
= &sk
->sk_receive_queue
;
256 queue
= &sp
->pd_lobby
;
260 /* If we are harvesting multiple skbs they will be
261 * collected on a list.
264 skb_queue_splice_tail_init(skb_list
, queue
);
266 __skb_queue_tail(queue
, skb
);
268 /* Did we just complete partial delivery and need to get
269 * rolling again? Move pending data to the receive
273 sctp_ulpq_clear_pd(ulpq
);
275 if (queue
== &sk
->sk_receive_queue
&& !sp
->data_ready_signalled
) {
276 if (!sock_owned_by_user(sk
))
277 sp
->data_ready_signalled
= 1;
278 sk
->sk_data_ready(sk
);
284 sctp_queue_purge_ulpevents(skb_list
);
286 sctp_ulpevent_free(event
);
291 /* 2nd Level Abstractions */
293 /* Helper function to store chunks that need to be reassembled. */
294 static void sctp_ulpq_store_reasm(struct sctp_ulpq
*ulpq
,
295 struct sctp_ulpevent
*event
)
298 struct sctp_ulpevent
*cevent
;
303 /* See if it belongs at the end. */
304 pos
= skb_peek_tail(&ulpq
->reasm
);
306 __skb_queue_tail(&ulpq
->reasm
, sctp_event2skb(event
));
310 /* Short circuit just dropping it at the end. */
311 cevent
= sctp_skb2event(pos
);
313 if (TSN_lt(ctsn
, tsn
)) {
314 __skb_queue_tail(&ulpq
->reasm
, sctp_event2skb(event
));
318 /* Find the right place in this list. We store them by TSN. */
319 skb_queue_walk(&ulpq
->reasm
, pos
) {
320 cevent
= sctp_skb2event(pos
);
323 if (TSN_lt(tsn
, ctsn
))
327 /* Insert before pos. */
328 __skb_queue_before(&ulpq
->reasm
, pos
, sctp_event2skb(event
));
332 /* Helper function to return an event corresponding to the reassembled
334 * This routine creates a re-assembled skb given the first and last skb's
335 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
336 * payload was fragmented on the way and ip had to reassemble them.
337 * We add the rest of skb's to the first skb's fraglist.
339 struct sctp_ulpevent
*sctp_make_reassembled_event(struct net
*net
,
340 struct sk_buff_head
*queue
,
341 struct sk_buff
*f_frag
,
342 struct sk_buff
*l_frag
)
345 struct sk_buff
*new = NULL
;
346 struct sctp_ulpevent
*event
;
347 struct sk_buff
*pnext
, *last
;
348 struct sk_buff
*list
= skb_shinfo(f_frag
)->frag_list
;
350 /* Store the pointer to the 2nd skb */
351 if (f_frag
== l_frag
)
356 /* Get the last skb in the f_frag's frag_list if present. */
357 for (last
= list
; list
; last
= list
, list
= list
->next
)
360 /* Add the list of remaining fragments to the first fragments
366 if (skb_cloned(f_frag
)) {
367 /* This is a cloned skb, we can't just modify
368 * the frag_list. We need a new skb to do that.
369 * Instead of calling skb_unshare(), we'll do it
370 * ourselves since we need to delay the free.
372 new = skb_copy(f_frag
, GFP_ATOMIC
);
374 return NULL
; /* try again later */
376 sctp_skb_set_owner_r(new, f_frag
->sk
);
378 skb_shinfo(new)->frag_list
= pos
;
380 skb_shinfo(f_frag
)->frag_list
= pos
;
383 /* Remove the first fragment from the reassembly queue. */
384 __skb_unlink(f_frag
, queue
);
386 /* if we did unshare, then free the old skb and re-assign */
396 /* Update the len and data_len fields of the first fragment. */
397 f_frag
->len
+= pos
->len
;
398 f_frag
->data_len
+= pos
->len
;
400 /* Remove the fragment from the reassembly queue. */
401 __skb_unlink(pos
, queue
);
403 /* Break if we have reached the last fragment. */
410 event
= sctp_skb2event(f_frag
);
411 SCTP_INC_STATS(net
, SCTP_MIB_REASMUSRMSGS
);
417 /* Helper function to check if an incoming chunk has filled up the last
418 * missing fragment in a SCTP datagram and return the corresponding event.
420 static struct sctp_ulpevent
*sctp_ulpq_retrieve_reassembled(struct sctp_ulpq
*ulpq
)
423 struct sctp_ulpevent
*cevent
;
424 struct sk_buff
*first_frag
= NULL
;
425 __u32 ctsn
, next_tsn
;
426 struct sctp_ulpevent
*retval
= NULL
;
427 struct sk_buff
*pd_first
= NULL
;
428 struct sk_buff
*pd_last
= NULL
;
430 struct sctp_association
*asoc
;
433 /* Initialized to 0 just to avoid compiler warning message. Will
434 * never be used with this value. It is referenced only after it
435 * is set when we find the first fragment of a message.
439 /* The chunks are held in the reasm queue sorted by TSN.
440 * Walk through the queue sequentially and look for a sequence of
441 * fragmented chunks that complete a datagram.
442 * 'first_frag' and next_tsn are reset when we find a chunk which
443 * is the first fragment of a datagram. Once these 2 fields are set
444 * we expect to find the remaining middle fragments and the last
445 * fragment in order. If not, first_frag is reset to NULL and we
446 * start the next pass when we find another first fragment.
448 * There is a potential to do partial delivery if user sets
449 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
450 * to see if can do PD.
452 skb_queue_walk(&ulpq
->reasm
, pos
) {
453 cevent
= sctp_skb2event(pos
);
456 switch (cevent
->msg_flags
& SCTP_DATA_FRAG_MASK
) {
457 case SCTP_DATA_FIRST_FRAG
:
458 /* If this "FIRST_FRAG" is the first
459 * element in the queue, then count it towards
462 if (pos
== ulpq
->reasm
.next
) {
476 case SCTP_DATA_MIDDLE_FRAG
:
477 if ((first_frag
) && (ctsn
== next_tsn
)) {
487 case SCTP_DATA_LAST_FRAG
:
488 if (first_frag
&& (ctsn
== next_tsn
))
498 /* Make sure we can enter partial deliver.
499 * We can trigger partial delivery only if framgent
500 * interleave is set, or the socket is not already
501 * in partial delivery.
503 if (!sctp_sk(asoc
->base
.sk
)->frag_interleave
&&
504 atomic_read(&sctp_sk(asoc
->base
.sk
)->pd_mode
))
507 cevent
= sctp_skb2event(pd_first
);
508 pd_point
= sctp_sk(asoc
->base
.sk
)->pd_point
;
509 if (pd_point
&& pd_point
<= pd_len
) {
510 retval
= sctp_make_reassembled_event(sock_net(asoc
->base
.sk
),
515 sctp_ulpq_set_pd(ulpq
);
521 retval
= sctp_make_reassembled_event(sock_net(ulpq
->asoc
->base
.sk
),
522 &ulpq
->reasm
, first_frag
, pos
);
524 retval
->msg_flags
|= MSG_EOR
;
528 /* Retrieve the next set of fragments of a partial message. */
529 static struct sctp_ulpevent
*sctp_ulpq_retrieve_partial(struct sctp_ulpq
*ulpq
)
531 struct sk_buff
*pos
, *last_frag
, *first_frag
;
532 struct sctp_ulpevent
*cevent
;
533 __u32 ctsn
, next_tsn
;
535 struct sctp_ulpevent
*retval
;
537 /* The chunks are held in the reasm queue sorted by TSN.
538 * Walk through the queue sequentially and look for the first
539 * sequence of fragmented chunks.
542 if (skb_queue_empty(&ulpq
->reasm
))
545 last_frag
= first_frag
= NULL
;
550 skb_queue_walk(&ulpq
->reasm
, pos
) {
551 cevent
= sctp_skb2event(pos
);
554 switch (cevent
->msg_flags
& SCTP_DATA_FRAG_MASK
) {
555 case SCTP_DATA_FIRST_FRAG
:
559 case SCTP_DATA_MIDDLE_FRAG
:
564 } else if (next_tsn
== ctsn
) {
570 case SCTP_DATA_LAST_FRAG
:
573 else if (ctsn
!= next_tsn
)
583 /* We have the reassembled event. There is no need to look
587 retval
= sctp_make_reassembled_event(sock_net(ulpq
->asoc
->base
.sk
),
588 &ulpq
->reasm
, first_frag
, last_frag
);
589 if (retval
&& is_last
)
590 retval
->msg_flags
|= MSG_EOR
;
596 /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
599 static struct sctp_ulpevent
*sctp_ulpq_reasm(struct sctp_ulpq
*ulpq
,
600 struct sctp_ulpevent
*event
)
602 struct sctp_ulpevent
*retval
= NULL
;
604 /* Check if this is part of a fragmented message. */
605 if (SCTP_DATA_NOT_FRAG
== (event
->msg_flags
& SCTP_DATA_FRAG_MASK
)) {
606 event
->msg_flags
|= MSG_EOR
;
610 sctp_ulpq_store_reasm(ulpq
, event
);
612 retval
= sctp_ulpq_retrieve_reassembled(ulpq
);
616 /* Do not even bother unless this is the next tsn to
620 ctsnap
= sctp_tsnmap_get_ctsn(&ulpq
->asoc
->peer
.tsn_map
);
621 if (TSN_lte(ctsn
, ctsnap
))
622 retval
= sctp_ulpq_retrieve_partial(ulpq
);
628 /* Retrieve the first part (sequential fragments) for partial delivery. */
629 static struct sctp_ulpevent
*sctp_ulpq_retrieve_first(struct sctp_ulpq
*ulpq
)
631 struct sk_buff
*pos
, *last_frag
, *first_frag
;
632 struct sctp_ulpevent
*cevent
;
633 __u32 ctsn
, next_tsn
;
634 struct sctp_ulpevent
*retval
;
636 /* The chunks are held in the reasm queue sorted by TSN.
637 * Walk through the queue sequentially and look for a sequence of
638 * fragmented chunks that start a datagram.
641 if (skb_queue_empty(&ulpq
->reasm
))
644 last_frag
= first_frag
= NULL
;
648 skb_queue_walk(&ulpq
->reasm
, pos
) {
649 cevent
= sctp_skb2event(pos
);
652 switch (cevent
->msg_flags
& SCTP_DATA_FRAG_MASK
) {
653 case SCTP_DATA_FIRST_FRAG
:
662 case SCTP_DATA_MIDDLE_FRAG
:
665 if (ctsn
== next_tsn
) {
672 case SCTP_DATA_LAST_FRAG
:
684 /* We have the reassembled event. There is no need to look
688 retval
= sctp_make_reassembled_event(sock_net(ulpq
->asoc
->base
.sk
),
689 &ulpq
->reasm
, first_frag
, last_frag
);
694 * Flush out stale fragments from the reassembly queue when processing
697 * RFC 3758, Section 3.6
699 * After receiving and processing a FORWARD TSN, the data receiver MUST
700 * take cautions in updating its re-assembly queue. The receiver MUST
701 * remove any partially reassembled message, which is still missing one
702 * or more TSNs earlier than or equal to the new cumulative TSN point.
703 * In the event that the receiver has invoked the partial delivery API,
704 * a notification SHOULD also be generated to inform the upper layer API
705 * that the message being partially delivered will NOT be completed.
707 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq
*ulpq
, __u32 fwd_tsn
)
709 struct sk_buff
*pos
, *tmp
;
710 struct sctp_ulpevent
*event
;
713 if (skb_queue_empty(&ulpq
->reasm
))
716 skb_queue_walk_safe(&ulpq
->reasm
, pos
, tmp
) {
717 event
= sctp_skb2event(pos
);
720 /* Since the entire message must be abandoned by the
721 * sender (item A3 in Section 3.5, RFC 3758), we can
722 * free all fragments on the list that are less then
723 * or equal to ctsn_point
725 if (TSN_lte(tsn
, fwd_tsn
)) {
726 __skb_unlink(pos
, &ulpq
->reasm
);
727 sctp_ulpevent_free(event
);
734 * Drain the reassembly queue. If we just cleared parted delivery, it
735 * is possible that the reassembly queue will contain already reassembled
736 * messages. Retrieve any such messages and give them to the user.
738 static void sctp_ulpq_reasm_drain(struct sctp_ulpq
*ulpq
)
740 struct sctp_ulpevent
*event
= NULL
;
741 struct sk_buff_head temp
;
743 if (skb_queue_empty(&ulpq
->reasm
))
746 while ((event
= sctp_ulpq_retrieve_reassembled(ulpq
)) != NULL
) {
747 /* Do ordering if needed. */
748 if ((event
) && (event
->msg_flags
& MSG_EOR
)) {
749 skb_queue_head_init(&temp
);
750 __skb_queue_tail(&temp
, sctp_event2skb(event
));
752 event
= sctp_ulpq_order(ulpq
, event
);
755 /* Send event to the ULP. 'event' is the
756 * sctp_ulpevent for very first SKB on the temp' list.
759 sctp_ulpq_tail_event(ulpq
, event
);
764 /* Helper function to gather skbs that have possibly become
765 * ordered by an an incoming chunk.
767 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq
*ulpq
,
768 struct sctp_ulpevent
*event
)
770 struct sk_buff_head
*event_list
;
771 struct sk_buff
*pos
, *tmp
;
772 struct sctp_ulpevent
*cevent
;
773 struct sctp_stream
*stream
;
774 __u16 sid
, csid
, cssn
;
777 stream
= &ulpq
->asoc
->stream
;
779 event_list
= (struct sk_buff_head
*) sctp_event2skb(event
)->prev
;
781 /* We are holding the chunks by stream, by SSN. */
782 sctp_skb_for_each(pos
, &ulpq
->lobby
, tmp
) {
783 cevent
= (struct sctp_ulpevent
*) pos
->cb
;
784 csid
= cevent
->stream
;
787 /* Have we gone too far? */
791 /* Have we not gone far enough? */
795 if (cssn
!= sctp_ssn_peek(stream
, in
, sid
))
798 /* Found it, so mark in the stream. */
799 sctp_ssn_next(stream
, in
, sid
);
801 __skb_unlink(pos
, &ulpq
->lobby
);
803 /* Attach all gathered skbs to the event. */
804 __skb_queue_tail(event_list
, pos
);
808 /* Helper function to store chunks needing ordering. */
809 static void sctp_ulpq_store_ordered(struct sctp_ulpq
*ulpq
,
810 struct sctp_ulpevent
*event
)
813 struct sctp_ulpevent
*cevent
;
817 pos
= skb_peek_tail(&ulpq
->lobby
);
819 __skb_queue_tail(&ulpq
->lobby
, sctp_event2skb(event
));
826 cevent
= (struct sctp_ulpevent
*) pos
->cb
;
827 csid
= cevent
->stream
;
830 __skb_queue_tail(&ulpq
->lobby
, sctp_event2skb(event
));
834 if ((sid
== csid
) && SSN_lt(cssn
, ssn
)) {
835 __skb_queue_tail(&ulpq
->lobby
, sctp_event2skb(event
));
839 /* Find the right place in this list. We store them by
840 * stream ID and then by SSN.
842 skb_queue_walk(&ulpq
->lobby
, pos
) {
843 cevent
= (struct sctp_ulpevent
*) pos
->cb
;
844 csid
= cevent
->stream
;
849 if (csid
== sid
&& SSN_lt(ssn
, cssn
))
854 /* Insert before pos. */
855 __skb_queue_before(&ulpq
->lobby
, pos
, sctp_event2skb(event
));
858 static struct sctp_ulpevent
*sctp_ulpq_order(struct sctp_ulpq
*ulpq
,
859 struct sctp_ulpevent
*event
)
862 struct sctp_stream
*stream
;
864 /* Check if this message needs ordering. */
865 if (event
->msg_flags
& SCTP_DATA_UNORDERED
)
868 /* Note: The stream ID must be verified before this routine. */
871 stream
= &ulpq
->asoc
->stream
;
873 /* Is this the expected SSN for this stream ID? */
874 if (ssn
!= sctp_ssn_peek(stream
, in
, sid
)) {
875 /* We've received something out of order, so find where it
876 * needs to be placed. We order by stream and then by SSN.
878 sctp_ulpq_store_ordered(ulpq
, event
);
882 /* Mark that the next chunk has been found. */
883 sctp_ssn_next(stream
, in
, sid
);
885 /* Go find any other chunks that were waiting for
888 sctp_ulpq_retrieve_ordered(ulpq
, event
);
893 /* Helper function to gather skbs that have possibly become
894 * ordered by forward tsn skipping their dependencies.
896 static void sctp_ulpq_reap_ordered(struct sctp_ulpq
*ulpq
, __u16 sid
)
898 struct sk_buff
*pos
, *tmp
;
899 struct sctp_ulpevent
*cevent
;
900 struct sctp_ulpevent
*event
;
901 struct sctp_stream
*stream
;
902 struct sk_buff_head temp
;
903 struct sk_buff_head
*lobby
= &ulpq
->lobby
;
906 stream
= &ulpq
->asoc
->stream
;
908 /* We are holding the chunks by stream, by SSN. */
909 skb_queue_head_init(&temp
);
911 sctp_skb_for_each(pos
, lobby
, tmp
) {
912 cevent
= (struct sctp_ulpevent
*) pos
->cb
;
913 csid
= cevent
->stream
;
916 /* Have we gone too far? */
920 /* Have we not gone far enough? */
924 /* see if this ssn has been marked by skipping */
925 if (!SSN_lt(cssn
, sctp_ssn_peek(stream
, in
, csid
)))
928 __skb_unlink(pos
, lobby
);
930 /* Create a temporary list to collect chunks on. */
931 event
= sctp_skb2event(pos
);
933 /* Attach all gathered skbs to the event. */
934 __skb_queue_tail(&temp
, pos
);
937 /* If we didn't reap any data, see if the next expected SSN
938 * is next on the queue and if so, use that.
940 if (event
== NULL
&& pos
!= (struct sk_buff
*)lobby
) {
941 cevent
= (struct sctp_ulpevent
*) pos
->cb
;
942 csid
= cevent
->stream
;
945 if (csid
== sid
&& cssn
== sctp_ssn_peek(stream
, in
, csid
)) {
946 sctp_ssn_next(stream
, in
, csid
);
947 __skb_unlink(pos
, lobby
);
948 __skb_queue_tail(&temp
, pos
);
949 event
= sctp_skb2event(pos
);
953 /* Send event to the ULP. 'event' is the sctp_ulpevent for
954 * very first SKB on the 'temp' list.
957 /* see if we have more ordered that we can deliver */
958 sctp_ulpq_retrieve_ordered(ulpq
, event
);
959 sctp_ulpq_tail_event(ulpq
, event
);
963 /* Skip over an SSN. This is used during the processing of
964 * Forwared TSN chunk to skip over the abandoned ordered data
966 void sctp_ulpq_skip(struct sctp_ulpq
*ulpq
, __u16 sid
, __u16 ssn
)
968 struct sctp_stream
*stream
;
970 /* Note: The stream ID must be verified before this routine. */
971 stream
= &ulpq
->asoc
->stream
;
973 /* Is this an old SSN? If so ignore. */
974 if (SSN_lt(ssn
, sctp_ssn_peek(stream
, in
, sid
)))
977 /* Mark that we are no longer expecting this SSN or lower. */
978 sctp_ssn_skip(stream
, in
, sid
, ssn
);
980 /* Go find any other chunks that were waiting for
981 * ordering and deliver them if needed.
983 sctp_ulpq_reap_ordered(ulpq
, sid
);
986 __u16
sctp_ulpq_renege_list(struct sctp_ulpq
*ulpq
, struct sk_buff_head
*list
,
991 struct sk_buff
*skb
, *flist
, *last
;
992 struct sctp_ulpevent
*event
;
993 struct sctp_tsnmap
*tsnmap
;
995 tsnmap
= &ulpq
->asoc
->peer
.tsn_map
;
997 while ((skb
= skb_peek_tail(list
)) != NULL
) {
998 event
= sctp_skb2event(skb
);
1001 /* Don't renege below the Cumulative TSN ACK Point. */
1002 if (TSN_lte(tsn
, sctp_tsnmap_get_ctsn(tsnmap
)))
1005 /* Events in ordering queue may have multiple fragments
1006 * corresponding to additional TSNs. Sum the total
1007 * freed space; find the last TSN.
1009 freed
+= skb_headlen(skb
);
1010 flist
= skb_shinfo(skb
)->frag_list
;
1011 for (last
= flist
; flist
; flist
= flist
->next
) {
1013 freed
+= skb_headlen(last
);
1016 last_tsn
= sctp_skb2event(last
)->tsn
;
1020 /* Unlink the event, then renege all applicable TSNs. */
1021 __skb_unlink(skb
, list
);
1022 sctp_ulpevent_free(event
);
1023 while (TSN_lte(tsn
, last_tsn
)) {
1024 sctp_tsnmap_renege(tsnmap
, tsn
);
1027 if (freed
>= needed
)
1034 /* Renege 'needed' bytes from the ordering queue. */
1035 static __u16
sctp_ulpq_renege_order(struct sctp_ulpq
*ulpq
, __u16 needed
)
1037 return sctp_ulpq_renege_list(ulpq
, &ulpq
->lobby
, needed
);
1040 /* Renege 'needed' bytes from the reassembly queue. */
1041 static __u16
sctp_ulpq_renege_frags(struct sctp_ulpq
*ulpq
, __u16 needed
)
1043 return sctp_ulpq_renege_list(ulpq
, &ulpq
->reasm
, needed
);
1046 /* Partial deliver the first message as there is pressure on rwnd. */
1047 void sctp_ulpq_partial_delivery(struct sctp_ulpq
*ulpq
,
1050 struct sctp_ulpevent
*event
;
1051 struct sctp_association
*asoc
;
1052 struct sctp_sock
*sp
;
1054 struct sk_buff
*skb
;
1057 sp
= sctp_sk(asoc
->base
.sk
);
1059 /* If the association is already in Partial Delivery mode
1060 * we have nothing to do.
1065 /* Data must be at or below the Cumulative TSN ACK Point to
1066 * start partial delivery.
1068 skb
= skb_peek(&asoc
->ulpq
.reasm
);
1070 ctsn
= sctp_skb2event(skb
)->tsn
;
1071 if (!TSN_lte(ctsn
, sctp_tsnmap_get_ctsn(&asoc
->peer
.tsn_map
)))
1075 /* If the user enabled fragment interleave socket option,
1076 * multiple associations can enter partial delivery.
1077 * Otherwise, we can only enter partial delivery if the
1078 * socket is not in partial deliver mode.
1080 if (sp
->frag_interleave
|| atomic_read(&sp
->pd_mode
) == 0) {
1081 /* Is partial delivery possible? */
1082 event
= sctp_ulpq_retrieve_first(ulpq
);
1083 /* Send event to the ULP. */
1085 sctp_ulpq_tail_event(ulpq
, event
);
1086 sctp_ulpq_set_pd(ulpq
);
1092 /* Renege some packets to make room for an incoming chunk. */
1093 void sctp_ulpq_renege(struct sctp_ulpq
*ulpq
, struct sctp_chunk
*chunk
,
1096 struct sctp_association
*asoc
= ulpq
->asoc
;
1100 needed
= ntohs(chunk
->chunk_hdr
->length
) -
1101 sizeof(struct sctp_data_chunk
);
1103 if (skb_queue_empty(&asoc
->base
.sk
->sk_receive_queue
)) {
1104 freed
= sctp_ulpq_renege_order(ulpq
, needed
);
1106 freed
+= sctp_ulpq_renege_frags(ulpq
, needed
- freed
);
1108 /* If able to free enough room, accept this chunk. */
1109 if (freed
>= needed
) {
1110 int retval
= sctp_ulpq_tail_data(ulpq
, chunk
, gfp
);
1112 * Enter partial delivery if chunk has not been
1113 * delivered; otherwise, drain the reassembly queue.
1116 sctp_ulpq_partial_delivery(ulpq
, gfp
);
1117 else if (retval
== 1)
1118 sctp_ulpq_reasm_drain(ulpq
);
1121 sk_mem_reclaim(asoc
->base
.sk
);
1126 /* Notify the application if an association is aborted and in
1127 * partial delivery mode. Send up any pending received messages.
1129 void sctp_ulpq_abort_pd(struct sctp_ulpq
*ulpq
, gfp_t gfp
)
1131 struct sctp_ulpevent
*ev
= NULL
;
1133 struct sctp_sock
*sp
;
1138 sk
= ulpq
->asoc
->base
.sk
;
1140 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT
,
1141 &sctp_sk(sk
)->subscribe
))
1142 ev
= sctp_ulpevent_make_pdapi(ulpq
->asoc
,
1143 SCTP_PARTIAL_DELIVERY_ABORTED
,
1146 __skb_queue_tail(&sk
->sk_receive_queue
, sctp_event2skb(ev
));
1148 /* If there is data waiting, send it up the socket now. */
1149 if ((sctp_ulpq_clear_pd(ulpq
) || ev
) && !sp
->data_ready_signalled
) {
1150 sp
->data_ready_signalled
= 1;
1151 sk
->sk_data_ready(sk
);