1 /* SCTP kernel reference Implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This abstraction carries sctp events to the ULP (sockets).
11 * The SCTP reference implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * The SCTP reference implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, write to
25 * the Free Software Foundation, 59 Temple Place - Suite 330,
26 * Boston, MA 02111-1307, USA.
28 * Please send any bug reports or fixes you make to the
30 * lksctp developers <lksctp-developers@lists.sourceforge.net>
32 * Or submit a bug report through the following website:
33 * http://www.sf.net/projects/lksctp
35 * Written or modified by:
36 * Jon Grimm <jgrimm@us.ibm.com>
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Sridhar Samudrala <sri@us.ibm.com>
40 * Any bugs reported given to us we will try to fix... any fixes shared will
41 * be incorporated into the next SCTP release.
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
47 #include <net/sctp/structs.h>
48 #include <net/sctp/sctp.h>
49 #include <net/sctp/sm.h>
51 /* Forward declarations for internal helpers. */
52 static struct sctp_ulpevent
* sctp_ulpq_reasm(struct sctp_ulpq
*ulpq
,
53 struct sctp_ulpevent
*);
54 static struct sctp_ulpevent
* sctp_ulpq_order(struct sctp_ulpq
*,
55 struct sctp_ulpevent
*);
57 /* 1st Level Abstractions */
59 /* Initialize a ULP queue from a block of memory. */
60 struct sctp_ulpq
*sctp_ulpq_init(struct sctp_ulpq
*ulpq
,
61 struct sctp_association
*asoc
)
63 memset(ulpq
, 0, sizeof(struct sctp_ulpq
));
66 skb_queue_head_init(&ulpq
->reasm
);
67 skb_queue_head_init(&ulpq
->lobby
);
75 /* Flush the reassembly and ordering queues. */
76 void sctp_ulpq_flush(struct sctp_ulpq
*ulpq
)
79 struct sctp_ulpevent
*event
;
81 while ((skb
= __skb_dequeue(&ulpq
->lobby
)) != NULL
) {
82 event
= sctp_skb2event(skb
);
83 sctp_ulpevent_free(event
);
86 while ((skb
= __skb_dequeue(&ulpq
->reasm
)) != NULL
) {
87 event
= sctp_skb2event(skb
);
88 sctp_ulpevent_free(event
);
93 /* Dispose of a ulpqueue. */
94 void sctp_ulpq_free(struct sctp_ulpq
*ulpq
)
96 sctp_ulpq_flush(ulpq
);
101 /* Process an incoming DATA chunk. */
102 int sctp_ulpq_tail_data(struct sctp_ulpq
*ulpq
, struct sctp_chunk
*chunk
,
105 struct sk_buff_head temp
;
106 sctp_data_chunk_t
*hdr
;
107 struct sctp_ulpevent
*event
;
109 hdr
= (sctp_data_chunk_t
*) chunk
->chunk_hdr
;
111 /* Create an event from the incoming chunk. */
112 event
= sctp_ulpevent_make_rcvmsg(chunk
->asoc
, chunk
, gfp
);
116 /* Do reassembly if needed. */
117 event
= sctp_ulpq_reasm(ulpq
, event
);
119 /* Do ordering if needed. */
120 if ((event
) && (event
->msg_flags
& MSG_EOR
)){
121 /* Create a temporary list to collect chunks on. */
122 skb_queue_head_init(&temp
);
123 __skb_queue_tail(&temp
, sctp_event2skb(event
));
125 event
= sctp_ulpq_order(ulpq
, event
);
128 /* Send event to the ULP. 'event' is the sctp_ulpevent for
129 * very first SKB on the 'temp' list.
132 sctp_ulpq_tail_event(ulpq
, event
);
137 /* Add a new event for propagation to the ULP. */
138 /* Clear the partial delivery mode for this socket. Note: This
139 * assumes that no association is currently in partial delivery mode.
141 int sctp_clear_pd(struct sock
*sk
)
143 struct sctp_sock
*sp
= sctp_sk(sk
);
146 if (!skb_queue_empty(&sp
->pd_lobby
)) {
147 struct list_head
*list
;
148 sctp_skb_list_tail(&sp
->pd_lobby
, &sk
->sk_receive_queue
);
149 list
= (struct list_head
*)&sctp_sk(sk
)->pd_lobby
;
150 INIT_LIST_HEAD(list
);
156 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
157 static int sctp_ulpq_clear_pd(struct sctp_ulpq
*ulpq
)
160 return sctp_clear_pd(ulpq
->asoc
->base
.sk
);
163 /* If the SKB of 'event' is on a list, it is the first such member
166 int sctp_ulpq_tail_event(struct sctp_ulpq
*ulpq
, struct sctp_ulpevent
*event
)
168 struct sock
*sk
= ulpq
->asoc
->base
.sk
;
169 struct sk_buff_head
*queue
, *skb_list
;
170 struct sk_buff
*skb
= sctp_event2skb(event
);
173 skb_list
= (struct sk_buff_head
*) skb
->prev
;
175 /* If the socket is just going to throw this away, do not
176 * even try to deliver it.
178 if (sock_flag(sk
, SOCK_DEAD
) || (sk
->sk_shutdown
& RCV_SHUTDOWN
))
181 /* Check if the user wishes to receive this event. */
182 if (!sctp_ulpevent_is_enabled(event
, &sctp_sk(sk
)->subscribe
))
185 /* If we are in partial delivery mode, post to the lobby until
186 * partial delivery is cleared, unless, of course _this_ is
187 * the association the cause of the partial delivery.
190 if (!sctp_sk(sk
)->pd_mode
) {
191 queue
= &sk
->sk_receive_queue
;
192 } else if (ulpq
->pd_mode
) {
193 /* If the association is in partial delivery, we
194 * need to finish delivering the partially processed
195 * packet before passing any other data. This is
196 * because we don't truly support stream interleaving.
198 if ((event
->msg_flags
& MSG_NOTIFICATION
) ||
199 (SCTP_DATA_NOT_FRAG
==
200 (event
->msg_flags
& SCTP_DATA_FRAG_MASK
)))
201 queue
= &sctp_sk(sk
)->pd_lobby
;
203 clear_pd
= event
->msg_flags
& MSG_EOR
;
204 queue
= &sk
->sk_receive_queue
;
207 queue
= &sctp_sk(sk
)->pd_lobby
;
210 /* If we are harvesting multiple skbs they will be
211 * collected on a list.
214 sctp_skb_list_tail(skb_list
, queue
);
216 __skb_queue_tail(queue
, skb
);
218 /* Did we just complete partial delivery and need to get
219 * rolling again? Move pending data to the receive
223 sctp_ulpq_clear_pd(ulpq
);
225 if (queue
== &sk
->sk_receive_queue
)
226 sk
->sk_data_ready(sk
, 0);
231 sctp_queue_purge_ulpevents(skb_list
);
233 sctp_ulpevent_free(event
);
238 /* 2nd Level Abstractions */
240 /* Helper function to store chunks that need to be reassembled. */
241 static inline void sctp_ulpq_store_reasm(struct sctp_ulpq
*ulpq
,
242 struct sctp_ulpevent
*event
)
245 struct sctp_ulpevent
*cevent
;
250 /* See if it belongs at the end. */
251 pos
= skb_peek_tail(&ulpq
->reasm
);
253 __skb_queue_tail(&ulpq
->reasm
, sctp_event2skb(event
));
257 /* Short circuit just dropping it at the end. */
258 cevent
= sctp_skb2event(pos
);
260 if (TSN_lt(ctsn
, tsn
)) {
261 __skb_queue_tail(&ulpq
->reasm
, sctp_event2skb(event
));
265 /* Find the right place in this list. We store them by TSN. */
266 skb_queue_walk(&ulpq
->reasm
, pos
) {
267 cevent
= sctp_skb2event(pos
);
270 if (TSN_lt(tsn
, ctsn
))
274 /* Insert before pos. */
275 __skb_insert(sctp_event2skb(event
), pos
->prev
, pos
, &ulpq
->reasm
);
279 /* Helper function to return an event corresponding to the reassembled
281 * This routine creates a re-assembled skb given the first and last skb's
282 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
283 * payload was fragmented on the way and ip had to reassemble them.
284 * We add the rest of skb's to the first skb's fraglist.
286 static struct sctp_ulpevent
*sctp_make_reassembled_event(struct sk_buff_head
*queue
, struct sk_buff
*f_frag
, struct sk_buff
*l_frag
)
289 struct sk_buff
*new = NULL
;
290 struct sctp_ulpevent
*event
;
291 struct sk_buff
*pnext
, *last
;
292 struct sk_buff
*list
= skb_shinfo(f_frag
)->frag_list
;
294 /* Store the pointer to the 2nd skb */
295 if (f_frag
== l_frag
)
300 /* Get the last skb in the f_frag's frag_list if present. */
301 for (last
= list
; list
; last
= list
, list
= list
->next
);
303 /* Add the list of remaining fragments to the first fragments
309 if (skb_cloned(f_frag
)) {
310 /* This is a cloned skb, we can't just modify
311 * the frag_list. We need a new skb to do that.
312 * Instead of calling skb_unshare(), we'll do it
313 * ourselves since we need to delay the free.
315 new = skb_copy(f_frag
, GFP_ATOMIC
);
317 return NULL
; /* try again later */
319 sctp_skb_set_owner_r(new, f_frag
->sk
);
321 skb_shinfo(new)->frag_list
= pos
;
323 skb_shinfo(f_frag
)->frag_list
= pos
;
326 /* Remove the first fragment from the reassembly queue. */
327 __skb_unlink(f_frag
, queue
);
329 /* if we did unshare, then free the old skb and re-assign */
339 /* Update the len and data_len fields of the first fragment. */
340 f_frag
->len
+= pos
->len
;
341 f_frag
->data_len
+= pos
->len
;
343 /* Remove the fragment from the reassembly queue. */
344 __skb_unlink(pos
, queue
);
346 /* Break if we have reached the last fragment. */
353 event
= sctp_skb2event(f_frag
);
354 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS
);
360 /* Helper function to check if an incoming chunk has filled up the last
361 * missing fragment in a SCTP datagram and return the corresponding event.
363 static inline struct sctp_ulpevent
*sctp_ulpq_retrieve_reassembled(struct sctp_ulpq
*ulpq
)
366 struct sctp_ulpevent
*cevent
;
367 struct sk_buff
*first_frag
= NULL
;
368 __u32 ctsn
, next_tsn
;
369 struct sctp_ulpevent
*retval
= NULL
;
371 /* Initialized to 0 just to avoid compiler warning message. Will
372 * never be used with this value. It is referenced only after it
373 * is set when we find the first fragment of a message.
377 /* The chunks are held in the reasm queue sorted by TSN.
378 * Walk through the queue sequentially and look for a sequence of
379 * fragmented chunks that complete a datagram.
380 * 'first_frag' and next_tsn are reset when we find a chunk which
381 * is the first fragment of a datagram. Once these 2 fields are set
382 * we expect to find the remaining middle fragments and the last
383 * fragment in order. If not, first_frag is reset to NULL and we
384 * start the next pass when we find another first fragment.
386 skb_queue_walk(&ulpq
->reasm
, pos
) {
387 cevent
= sctp_skb2event(pos
);
390 switch (cevent
->msg_flags
& SCTP_DATA_FRAG_MASK
) {
391 case SCTP_DATA_FIRST_FRAG
:
396 case SCTP_DATA_MIDDLE_FRAG
:
397 if ((first_frag
) && (ctsn
== next_tsn
))
403 case SCTP_DATA_LAST_FRAG
:
404 if (first_frag
&& (ctsn
== next_tsn
))
415 retval
= sctp_make_reassembled_event(&ulpq
->reasm
, first_frag
, pos
);
417 retval
->msg_flags
|= MSG_EOR
;
421 /* Retrieve the next set of fragments of a partial message. */
422 static inline struct sctp_ulpevent
*sctp_ulpq_retrieve_partial(struct sctp_ulpq
*ulpq
)
424 struct sk_buff
*pos
, *last_frag
, *first_frag
;
425 struct sctp_ulpevent
*cevent
;
426 __u32 ctsn
, next_tsn
;
428 struct sctp_ulpevent
*retval
;
430 /* The chunks are held in the reasm queue sorted by TSN.
431 * Walk through the queue sequentially and look for the first
432 * sequence of fragmented chunks.
435 if (skb_queue_empty(&ulpq
->reasm
))
438 last_frag
= first_frag
= NULL
;
443 skb_queue_walk(&ulpq
->reasm
, pos
) {
444 cevent
= sctp_skb2event(pos
);
447 switch (cevent
->msg_flags
& SCTP_DATA_FRAG_MASK
) {
448 case SCTP_DATA_MIDDLE_FRAG
:
453 } else if (next_tsn
== ctsn
)
458 case SCTP_DATA_LAST_FRAG
:
461 else if (ctsn
!= next_tsn
)
471 /* We have the reassembled event. There is no need to look
475 retval
= sctp_make_reassembled_event(&ulpq
->reasm
, first_frag
, last_frag
);
476 if (retval
&& is_last
)
477 retval
->msg_flags
|= MSG_EOR
;
483 /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
486 static struct sctp_ulpevent
*sctp_ulpq_reasm(struct sctp_ulpq
*ulpq
,
487 struct sctp_ulpevent
*event
)
489 struct sctp_ulpevent
*retval
= NULL
;
491 /* Check if this is part of a fragmented message. */
492 if (SCTP_DATA_NOT_FRAG
== (event
->msg_flags
& SCTP_DATA_FRAG_MASK
)) {
493 event
->msg_flags
|= MSG_EOR
;
497 sctp_ulpq_store_reasm(ulpq
, event
);
499 retval
= sctp_ulpq_retrieve_reassembled(ulpq
);
503 /* Do not even bother unless this is the next tsn to
507 ctsnap
= sctp_tsnmap_get_ctsn(&ulpq
->asoc
->peer
.tsn_map
);
508 if (TSN_lte(ctsn
, ctsnap
))
509 retval
= sctp_ulpq_retrieve_partial(ulpq
);
515 /* Retrieve the first part (sequential fragments) for partial delivery. */
516 static inline struct sctp_ulpevent
*sctp_ulpq_retrieve_first(struct sctp_ulpq
*ulpq
)
518 struct sk_buff
*pos
, *last_frag
, *first_frag
;
519 struct sctp_ulpevent
*cevent
;
520 __u32 ctsn
, next_tsn
;
521 struct sctp_ulpevent
*retval
;
523 /* The chunks are held in the reasm queue sorted by TSN.
524 * Walk through the queue sequentially and look for a sequence of
525 * fragmented chunks that start a datagram.
528 if (skb_queue_empty(&ulpq
->reasm
))
531 last_frag
= first_frag
= NULL
;
535 skb_queue_walk(&ulpq
->reasm
, pos
) {
536 cevent
= sctp_skb2event(pos
);
539 switch (cevent
->msg_flags
& SCTP_DATA_FRAG_MASK
) {
540 case SCTP_DATA_FIRST_FRAG
:
549 case SCTP_DATA_MIDDLE_FRAG
:
552 if (ctsn
== next_tsn
) {
563 /* We have the reassembled event. There is no need to look
567 retval
= sctp_make_reassembled_event(&ulpq
->reasm
, first_frag
, last_frag
);
571 /* Helper function to gather skbs that have possibly become
572 * ordered by an an incoming chunk.
574 static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq
*ulpq
,
575 struct sctp_ulpevent
*event
)
577 struct sk_buff_head
*event_list
;
578 struct sk_buff
*pos
, *tmp
;
579 struct sctp_ulpevent
*cevent
;
580 struct sctp_stream
*in
;
586 in
= &ulpq
->asoc
->ssnmap
->in
;
588 event_list
= (struct sk_buff_head
*) sctp_event2skb(event
)->prev
;
590 /* We are holding the chunks by stream, by SSN. */
591 sctp_skb_for_each(pos
, &ulpq
->lobby
, tmp
) {
592 cevent
= (struct sctp_ulpevent
*) pos
->cb
;
593 csid
= cevent
->stream
;
596 /* Have we gone too far? */
600 /* Have we not gone far enough? */
604 if (cssn
!= sctp_ssn_peek(in
, sid
))
607 /* Found it, so mark in the ssnmap. */
608 sctp_ssn_next(in
, sid
);
610 __skb_unlink(pos
, &ulpq
->lobby
);
612 /* Attach all gathered skbs to the event. */
613 __skb_queue_tail(event_list
, pos
);
617 /* Helper function to store chunks needing ordering. */
618 static inline void sctp_ulpq_store_ordered(struct sctp_ulpq
*ulpq
,
619 struct sctp_ulpevent
*event
)
622 struct sctp_ulpevent
*cevent
;
626 pos
= skb_peek_tail(&ulpq
->lobby
);
628 __skb_queue_tail(&ulpq
->lobby
, sctp_event2skb(event
));
635 cevent
= (struct sctp_ulpevent
*) pos
->cb
;
636 csid
= cevent
->stream
;
639 __skb_queue_tail(&ulpq
->lobby
, sctp_event2skb(event
));
643 if ((sid
== csid
) && SSN_lt(cssn
, ssn
)) {
644 __skb_queue_tail(&ulpq
->lobby
, sctp_event2skb(event
));
648 /* Find the right place in this list. We store them by
649 * stream ID and then by SSN.
651 skb_queue_walk(&ulpq
->lobby
, pos
) {
652 cevent
= (struct sctp_ulpevent
*) pos
->cb
;
653 csid
= cevent
->stream
;
658 if (csid
== sid
&& SSN_lt(ssn
, cssn
))
663 /* Insert before pos. */
664 __skb_insert(sctp_event2skb(event
), pos
->prev
, pos
, &ulpq
->lobby
);
668 static struct sctp_ulpevent
*sctp_ulpq_order(struct sctp_ulpq
*ulpq
,
669 struct sctp_ulpevent
*event
)
672 struct sctp_stream
*in
;
674 /* Check if this message needs ordering. */
675 if (SCTP_DATA_UNORDERED
& event
->msg_flags
)
678 /* Note: The stream ID must be verified before this routine. */
681 in
= &ulpq
->asoc
->ssnmap
->in
;
683 /* Is this the expected SSN for this stream ID? */
684 if (ssn
!= sctp_ssn_peek(in
, sid
)) {
685 /* We've received something out of order, so find where it
686 * needs to be placed. We order by stream and then by SSN.
688 sctp_ulpq_store_ordered(ulpq
, event
);
692 /* Mark that the next chunk has been found. */
693 sctp_ssn_next(in
, sid
);
695 /* Go find any other chunks that were waiting for
698 sctp_ulpq_retrieve_ordered(ulpq
, event
);
703 /* Helper function to gather skbs that have possibly become
704 * ordered by forward tsn skipping their dependencies.
706 static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq
*ulpq
)
708 struct sk_buff
*pos
, *tmp
;
709 struct sctp_ulpevent
*cevent
;
710 struct sctp_ulpevent
*event
;
711 struct sctp_stream
*in
;
712 struct sk_buff_head temp
;
715 in
= &ulpq
->asoc
->ssnmap
->in
;
717 /* We are holding the chunks by stream, by SSN. */
718 skb_queue_head_init(&temp
);
720 sctp_skb_for_each(pos
, &ulpq
->lobby
, tmp
) {
721 cevent
= (struct sctp_ulpevent
*) pos
->cb
;
722 csid
= cevent
->stream
;
725 if (cssn
!= sctp_ssn_peek(in
, csid
))
728 /* Found it, so mark in the ssnmap. */
729 sctp_ssn_next(in
, csid
);
731 __skb_unlink(pos
, &ulpq
->lobby
);
733 /* Create a temporary list to collect chunks on. */
734 event
= sctp_skb2event(pos
);
735 __skb_queue_tail(&temp
, sctp_event2skb(event
));
737 /* Attach all gathered skbs to the event. */
738 __skb_queue_tail(&temp
, pos
);
742 /* Send event to the ULP. 'event' is the sctp_ulpevent for
743 * very first SKB on the 'temp' list.
746 sctp_ulpq_tail_event(ulpq
, event
);
749 /* Skip over an SSN. */
750 void sctp_ulpq_skip(struct sctp_ulpq
*ulpq
, __u16 sid
, __u16 ssn
)
752 struct sctp_stream
*in
;
754 /* Note: The stream ID must be verified before this routine. */
755 in
= &ulpq
->asoc
->ssnmap
->in
;
757 /* Is this an old SSN? If so ignore. */
758 if (SSN_lt(ssn
, sctp_ssn_peek(in
, sid
)))
761 /* Mark that we are no longer expecting this SSN or lower. */
762 sctp_ssn_skip(in
, sid
, ssn
);
764 /* Go find any other chunks that were waiting for
765 * ordering and deliver them if needed.
767 sctp_ulpq_reap_ordered(ulpq
);
771 /* Renege 'needed' bytes from the ordering queue. */
772 static __u16
sctp_ulpq_renege_order(struct sctp_ulpq
*ulpq
, __u16 needed
)
777 struct sctp_ulpevent
*event
;
778 struct sctp_tsnmap
*tsnmap
;
780 tsnmap
= &ulpq
->asoc
->peer
.tsn_map
;
782 while ((skb
= __skb_dequeue_tail(&ulpq
->lobby
)) != NULL
) {
783 freed
+= skb_headlen(skb
);
784 event
= sctp_skb2event(skb
);
787 sctp_ulpevent_free(event
);
788 sctp_tsnmap_renege(tsnmap
, tsn
);
796 /* Renege 'needed' bytes from the reassembly queue. */
797 static __u16
sctp_ulpq_renege_frags(struct sctp_ulpq
*ulpq
, __u16 needed
)
802 struct sctp_ulpevent
*event
;
803 struct sctp_tsnmap
*tsnmap
;
805 tsnmap
= &ulpq
->asoc
->peer
.tsn_map
;
807 /* Walk backwards through the list, reneges the newest tsns. */
808 while ((skb
= __skb_dequeue_tail(&ulpq
->reasm
)) != NULL
) {
809 freed
+= skb_headlen(skb
);
810 event
= sctp_skb2event(skb
);
813 sctp_ulpevent_free(event
);
814 sctp_tsnmap_renege(tsnmap
, tsn
);
822 /* Partial deliver the first message as there is pressure on rwnd. */
823 void sctp_ulpq_partial_delivery(struct sctp_ulpq
*ulpq
,
824 struct sctp_chunk
*chunk
,
827 struct sctp_ulpevent
*event
;
828 struct sctp_association
*asoc
;
832 /* Are we already in partial delivery mode? */
833 if (!sctp_sk(asoc
->base
.sk
)->pd_mode
) {
835 /* Is partial delivery possible? */
836 event
= sctp_ulpq_retrieve_first(ulpq
);
837 /* Send event to the ULP. */
839 sctp_ulpq_tail_event(ulpq
, event
);
840 sctp_sk(asoc
->base
.sk
)->pd_mode
= 1;
847 /* Renege some packets to make room for an incoming chunk. */
848 void sctp_ulpq_renege(struct sctp_ulpq
*ulpq
, struct sctp_chunk
*chunk
,
851 struct sctp_association
*asoc
;
857 needed
= ntohs(chunk
->chunk_hdr
->length
);
858 needed
-= sizeof(sctp_data_chunk_t
);
860 needed
= SCTP_DEFAULT_MAXWINDOW
;
864 if (skb_queue_empty(&asoc
->base
.sk
->sk_receive_queue
)) {
865 freed
= sctp_ulpq_renege_order(ulpq
, needed
);
866 if (freed
< needed
) {
867 freed
+= sctp_ulpq_renege_frags(ulpq
, needed
- freed
);
870 /* If able to free enough room, accept this chunk. */
871 if (chunk
&& (freed
>= needed
)) {
873 tsn
= ntohl(chunk
->subh
.data_hdr
->tsn
);
874 sctp_tsnmap_mark(&asoc
->peer
.tsn_map
, tsn
);
875 sctp_ulpq_tail_data(ulpq
, chunk
, gfp
);
877 sctp_ulpq_partial_delivery(ulpq
, chunk
, gfp
);
885 /* Notify the application if an association is aborted and in
886 * partial delivery mode. Send up any pending received messages.
888 void sctp_ulpq_abort_pd(struct sctp_ulpq
*ulpq
, gfp_t gfp
)
890 struct sctp_ulpevent
*ev
= NULL
;
896 sk
= ulpq
->asoc
->base
.sk
;
897 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT
,
898 &sctp_sk(sk
)->subscribe
))
899 ev
= sctp_ulpevent_make_pdapi(ulpq
->asoc
,
900 SCTP_PARTIAL_DELIVERY_ABORTED
,
903 __skb_queue_tail(&sk
->sk_receive_queue
, sctp_event2skb(ev
));
905 /* If there is data waiting, send it up the socket now. */
906 if (sctp_ulpq_clear_pd(ulpq
) || ev
)
907 sk
->sk_data_ready(sk
, 0);