ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
[linux/fpc-iii.git] / net / sctp / ulpqueue.c
blobf2d1de7f2ffbd5a219759f55bdc546f2edbf19b0
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This abstraction carries sctp events to the ULP (sockets).
11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This SCTP implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, write to
25 * the Free Software Foundation, 59 Temple Place - Suite 330,
26 * Boston, MA 02111-1307, USA.
28 * Please send any bug reports or fixes you make to the
29 * email address(es):
30 * lksctp developers <lksctp-developers@lists.sourceforge.net>
32 * Or submit a bug report through the following website:
33 * http://www.sf.net/projects/lksctp
35 * Written or modified by:
36 * Jon Grimm <jgrimm@us.ibm.com>
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Sridhar Samudrala <sri@us.ibm.com>
40 * Any bugs reported given to us we will try to fix... any fixes shared will
41 * be incorporated into the next SCTP release.
44 #include <linux/slab.h>
45 #include <linux/types.h>
46 #include <linux/skbuff.h>
47 #include <net/sock.h>
48 #include <net/sctp/structs.h>
49 #include <net/sctp/sctp.h>
50 #include <net/sctp/sm.h>
52 /* Forward declarations for internal helpers. */
53 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
54 struct sctp_ulpevent *);
55 static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
56 struct sctp_ulpevent *);
57 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
59 /* 1st Level Abstractions */
61 /* Initialize a ULP queue from a block of memory. */
62 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
63 struct sctp_association *asoc)
65 memset(ulpq, 0, sizeof(struct sctp_ulpq));
67 ulpq->asoc = asoc;
68 skb_queue_head_init(&ulpq->reasm);
69 skb_queue_head_init(&ulpq->lobby);
70 ulpq->pd_mode = 0;
71 ulpq->malloced = 0;
73 return ulpq;
77 /* Flush the reassembly and ordering queues. */
78 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
80 struct sk_buff *skb;
81 struct sctp_ulpevent *event;
83 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
84 event = sctp_skb2event(skb);
85 sctp_ulpevent_free(event);
88 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
89 event = sctp_skb2event(skb);
90 sctp_ulpevent_free(event);
95 /* Dispose of a ulpqueue. */
96 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
98 sctp_ulpq_flush(ulpq);
99 if (ulpq->malloced)
100 kfree(ulpq);
103 /* Process an incoming DATA chunk. */
104 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
105 gfp_t gfp)
107 struct sk_buff_head temp;
108 struct sctp_ulpevent *event;
110 /* Create an event from the incoming chunk. */
111 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
112 if (!event)
113 return -ENOMEM;
115 /* Do reassembly if needed. */
116 event = sctp_ulpq_reasm(ulpq, event);
118 /* Do ordering if needed. */
119 if ((event) && (event->msg_flags & MSG_EOR)){
120 /* Create a temporary list to collect chunks on. */
121 skb_queue_head_init(&temp);
122 __skb_queue_tail(&temp, sctp_event2skb(event));
124 event = sctp_ulpq_order(ulpq, event);
127 /* Send event to the ULP. 'event' is the sctp_ulpevent for
128 * very first SKB on the 'temp' list.
130 if (event)
131 sctp_ulpq_tail_event(ulpq, event);
133 return 0;
136 /* Add a new event for propagation to the ULP. */
137 /* Clear the partial delivery mode for this socket. Note: This
138 * assumes that no association is currently in partial delivery mode.
140 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
142 struct sctp_sock *sp = sctp_sk(sk);
144 if (atomic_dec_and_test(&sp->pd_mode)) {
145 /* This means there are no other associations in PD, so
146 * we can go ahead and clear out the lobby in one shot
148 if (!skb_queue_empty(&sp->pd_lobby)) {
149 struct list_head *list;
150 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
151 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
152 INIT_LIST_HEAD(list);
153 return 1;
155 } else {
156 /* There are other associations in PD, so we only need to
157 * pull stuff out of the lobby that belongs to the
158 * associations that is exiting PD (all of its notifications
159 * are posted here).
161 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
162 struct sk_buff *skb, *tmp;
163 struct sctp_ulpevent *event;
165 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
166 event = sctp_skb2event(skb);
167 if (event->asoc == asoc) {
168 __skb_unlink(skb, &sp->pd_lobby);
169 __skb_queue_tail(&sk->sk_receive_queue,
170 skb);
176 return 0;
179 /* Set the pd_mode on the socket and ulpq */
180 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
182 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
184 atomic_inc(&sp->pd_mode);
185 ulpq->pd_mode = 1;
188 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
189 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
191 ulpq->pd_mode = 0;
192 sctp_ulpq_reasm_drain(ulpq);
193 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
196 /* If the SKB of 'event' is on a list, it is the first such member
197 * of that list.
199 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
201 struct sock *sk = ulpq->asoc->base.sk;
202 struct sk_buff_head *queue, *skb_list;
203 struct sk_buff *skb = sctp_event2skb(event);
204 int clear_pd = 0;
206 skb_list = (struct sk_buff_head *) skb->prev;
208 /* If the socket is just going to throw this away, do not
209 * even try to deliver it.
211 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
212 goto out_free;
214 /* Check if the user wishes to receive this event. */
215 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
216 goto out_free;
218 /* If we are in partial delivery mode, post to the lobby until
219 * partial delivery is cleared, unless, of course _this_ is
220 * the association the cause of the partial delivery.
223 if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
224 queue = &sk->sk_receive_queue;
225 } else {
226 if (ulpq->pd_mode) {
227 /* If the association is in partial delivery, we
228 * need to finish delivering the partially processed
229 * packet before passing any other data. This is
230 * because we don't truly support stream interleaving.
232 if ((event->msg_flags & MSG_NOTIFICATION) ||
233 (SCTP_DATA_NOT_FRAG ==
234 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
235 queue = &sctp_sk(sk)->pd_lobby;
236 else {
237 clear_pd = event->msg_flags & MSG_EOR;
238 queue = &sk->sk_receive_queue;
240 } else {
242 * If fragment interleave is enabled, we
243 * can queue this to the receive queue instead
244 * of the lobby.
246 if (sctp_sk(sk)->frag_interleave)
247 queue = &sk->sk_receive_queue;
248 else
249 queue = &sctp_sk(sk)->pd_lobby;
253 /* If we are harvesting multiple skbs they will be
254 * collected on a list.
256 if (skb_list)
257 sctp_skb_list_tail(skb_list, queue);
258 else
259 __skb_queue_tail(queue, skb);
261 /* Did we just complete partial delivery and need to get
262 * rolling again? Move pending data to the receive
263 * queue.
265 if (clear_pd)
266 sctp_ulpq_clear_pd(ulpq);
268 if (queue == &sk->sk_receive_queue)
269 sk->sk_data_ready(sk, 0);
270 return 1;
272 out_free:
273 if (skb_list)
274 sctp_queue_purge_ulpevents(skb_list);
275 else
276 sctp_ulpevent_free(event);
278 return 0;
281 /* 2nd Level Abstractions */
283 /* Helper function to store chunks that need to be reassembled. */
284 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
285 struct sctp_ulpevent *event)
287 struct sk_buff *pos;
288 struct sctp_ulpevent *cevent;
289 __u32 tsn, ctsn;
291 tsn = event->tsn;
293 /* See if it belongs at the end. */
294 pos = skb_peek_tail(&ulpq->reasm);
295 if (!pos) {
296 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
297 return;
300 /* Short circuit just dropping it at the end. */
301 cevent = sctp_skb2event(pos);
302 ctsn = cevent->tsn;
303 if (TSN_lt(ctsn, tsn)) {
304 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
305 return;
308 /* Find the right place in this list. We store them by TSN. */
309 skb_queue_walk(&ulpq->reasm, pos) {
310 cevent = sctp_skb2event(pos);
311 ctsn = cevent->tsn;
313 if (TSN_lt(tsn, ctsn))
314 break;
317 /* Insert before pos. */
318 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
322 /* Helper function to return an event corresponding to the reassembled
323 * datagram.
324 * This routine creates a re-assembled skb given the first and last skb's
325 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
326 * payload was fragmented on the way and ip had to reassemble them.
327 * We add the rest of skb's to the first skb's fraglist.
329 static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
331 struct sk_buff *pos;
332 struct sk_buff *new = NULL;
333 struct sctp_ulpevent *event;
334 struct sk_buff *pnext, *last;
335 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
337 /* Store the pointer to the 2nd skb */
338 if (f_frag == l_frag)
339 pos = NULL;
340 else
341 pos = f_frag->next;
343 /* Get the last skb in the f_frag's frag_list if present. */
344 for (last = list; list; last = list, list = list->next);
346 /* Add the list of remaining fragments to the first fragments
347 * frag_list.
349 if (last)
350 last->next = pos;
351 else {
352 if (skb_cloned(f_frag)) {
353 /* This is a cloned skb, we can't just modify
354 * the frag_list. We need a new skb to do that.
355 * Instead of calling skb_unshare(), we'll do it
356 * ourselves since we need to delay the free.
358 new = skb_copy(f_frag, GFP_ATOMIC);
359 if (!new)
360 return NULL; /* try again later */
362 sctp_skb_set_owner_r(new, f_frag->sk);
364 skb_shinfo(new)->frag_list = pos;
365 } else
366 skb_shinfo(f_frag)->frag_list = pos;
369 /* Remove the first fragment from the reassembly queue. */
370 __skb_unlink(f_frag, queue);
372 /* if we did unshare, then free the old skb and re-assign */
373 if (new) {
374 kfree_skb(f_frag);
375 f_frag = new;
378 while (pos) {
380 pnext = pos->next;
382 /* Update the len and data_len fields of the first fragment. */
383 f_frag->len += pos->len;
384 f_frag->data_len += pos->len;
386 /* Remove the fragment from the reassembly queue. */
387 __skb_unlink(pos, queue);
389 /* Break if we have reached the last fragment. */
390 if (pos == l_frag)
391 break;
392 pos->next = pnext;
393 pos = pnext;
396 event = sctp_skb2event(f_frag);
397 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
399 return event;
403 /* Helper function to check if an incoming chunk has filled up the last
404 * missing fragment in a SCTP datagram and return the corresponding event.
406 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
408 struct sk_buff *pos;
409 struct sctp_ulpevent *cevent;
410 struct sk_buff *first_frag = NULL;
411 __u32 ctsn, next_tsn;
412 struct sctp_ulpevent *retval = NULL;
413 struct sk_buff *pd_first = NULL;
414 struct sk_buff *pd_last = NULL;
415 size_t pd_len = 0;
416 struct sctp_association *asoc;
417 u32 pd_point;
419 /* Initialized to 0 just to avoid compiler warning message. Will
420 * never be used with this value. It is referenced only after it
421 * is set when we find the first fragment of a message.
423 next_tsn = 0;
425 /* The chunks are held in the reasm queue sorted by TSN.
426 * Walk through the queue sequentially and look for a sequence of
427 * fragmented chunks that complete a datagram.
428 * 'first_frag' and next_tsn are reset when we find a chunk which
429 * is the first fragment of a datagram. Once these 2 fields are set
430 * we expect to find the remaining middle fragments and the last
431 * fragment in order. If not, first_frag is reset to NULL and we
432 * start the next pass when we find another first fragment.
434 * There is a potential to do partial delivery if user sets
435 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
436 * to see if can do PD.
438 skb_queue_walk(&ulpq->reasm, pos) {
439 cevent = sctp_skb2event(pos);
440 ctsn = cevent->tsn;
442 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
443 case SCTP_DATA_FIRST_FRAG:
444 /* If this "FIRST_FRAG" is the first
445 * element in the queue, then count it towards
446 * possible PD.
448 if (pos == ulpq->reasm.next) {
449 pd_first = pos;
450 pd_last = pos;
451 pd_len = pos->len;
452 } else {
453 pd_first = NULL;
454 pd_last = NULL;
455 pd_len = 0;
458 first_frag = pos;
459 next_tsn = ctsn + 1;
460 break;
462 case SCTP_DATA_MIDDLE_FRAG:
463 if ((first_frag) && (ctsn == next_tsn)) {
464 next_tsn++;
465 if (pd_first) {
466 pd_last = pos;
467 pd_len += pos->len;
469 } else
470 first_frag = NULL;
471 break;
473 case SCTP_DATA_LAST_FRAG:
474 if (first_frag && (ctsn == next_tsn))
475 goto found;
476 else
477 first_frag = NULL;
478 break;
482 asoc = ulpq->asoc;
483 if (pd_first) {
484 /* Make sure we can enter partial deliver.
485 * We can trigger partial delivery only if framgent
486 * interleave is set, or the socket is not already
487 * in partial delivery.
489 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
490 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
491 goto done;
493 cevent = sctp_skb2event(pd_first);
494 pd_point = sctp_sk(asoc->base.sk)->pd_point;
495 if (pd_point && pd_point <= pd_len) {
496 retval = sctp_make_reassembled_event(&ulpq->reasm,
497 pd_first,
498 pd_last);
499 if (retval)
500 sctp_ulpq_set_pd(ulpq);
503 done:
504 return retval;
505 found:
506 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
507 if (retval)
508 retval->msg_flags |= MSG_EOR;
509 goto done;
512 /* Retrieve the next set of fragments of a partial message. */
513 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
515 struct sk_buff *pos, *last_frag, *first_frag;
516 struct sctp_ulpevent *cevent;
517 __u32 ctsn, next_tsn;
518 int is_last;
519 struct sctp_ulpevent *retval;
521 /* The chunks are held in the reasm queue sorted by TSN.
522 * Walk through the queue sequentially and look for the first
523 * sequence of fragmented chunks.
526 if (skb_queue_empty(&ulpq->reasm))
527 return NULL;
529 last_frag = first_frag = NULL;
530 retval = NULL;
531 next_tsn = 0;
532 is_last = 0;
534 skb_queue_walk(&ulpq->reasm, pos) {
535 cevent = sctp_skb2event(pos);
536 ctsn = cevent->tsn;
538 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
539 case SCTP_DATA_MIDDLE_FRAG:
540 if (!first_frag) {
541 first_frag = pos;
542 next_tsn = ctsn + 1;
543 last_frag = pos;
544 } else if (next_tsn == ctsn)
545 next_tsn++;
546 else
547 goto done;
548 break;
549 case SCTP_DATA_LAST_FRAG:
550 if (!first_frag)
551 first_frag = pos;
552 else if (ctsn != next_tsn)
553 goto done;
554 last_frag = pos;
555 is_last = 1;
556 goto done;
557 default:
558 return NULL;
562 /* We have the reassembled event. There is no need to look
563 * further.
565 done:
566 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
567 if (retval && is_last)
568 retval->msg_flags |= MSG_EOR;
570 return retval;
574 /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
575 * need reassembling.
577 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
578 struct sctp_ulpevent *event)
580 struct sctp_ulpevent *retval = NULL;
582 /* Check if this is part of a fragmented message. */
583 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
584 event->msg_flags |= MSG_EOR;
585 return event;
588 sctp_ulpq_store_reasm(ulpq, event);
589 if (!ulpq->pd_mode)
590 retval = sctp_ulpq_retrieve_reassembled(ulpq);
591 else {
592 __u32 ctsn, ctsnap;
594 /* Do not even bother unless this is the next tsn to
595 * be delivered.
597 ctsn = event->tsn;
598 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
599 if (TSN_lte(ctsn, ctsnap))
600 retval = sctp_ulpq_retrieve_partial(ulpq);
603 return retval;
606 /* Retrieve the first part (sequential fragments) for partial delivery. */
607 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
609 struct sk_buff *pos, *last_frag, *first_frag;
610 struct sctp_ulpevent *cevent;
611 __u32 ctsn, next_tsn;
612 struct sctp_ulpevent *retval;
614 /* The chunks are held in the reasm queue sorted by TSN.
615 * Walk through the queue sequentially and look for a sequence of
616 * fragmented chunks that start a datagram.
619 if (skb_queue_empty(&ulpq->reasm))
620 return NULL;
622 last_frag = first_frag = NULL;
623 retval = NULL;
624 next_tsn = 0;
626 skb_queue_walk(&ulpq->reasm, pos) {
627 cevent = sctp_skb2event(pos);
628 ctsn = cevent->tsn;
630 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
631 case SCTP_DATA_FIRST_FRAG:
632 if (!first_frag) {
633 first_frag = pos;
634 next_tsn = ctsn + 1;
635 last_frag = pos;
636 } else
637 goto done;
638 break;
640 case SCTP_DATA_MIDDLE_FRAG:
641 if (!first_frag)
642 return NULL;
643 if (ctsn == next_tsn) {
644 next_tsn++;
645 last_frag = pos;
646 } else
647 goto done;
648 break;
649 default:
650 return NULL;
654 /* We have the reassembled event. There is no need to look
655 * further.
657 done:
658 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
659 return retval;
663 * Flush out stale fragments from the reassembly queue when processing
664 * a Forward TSN.
666 * RFC 3758, Section 3.6
668 * After receiving and processing a FORWARD TSN, the data receiver MUST
669 * take cautions in updating its re-assembly queue. The receiver MUST
670 * remove any partially reassembled message, which is still missing one
671 * or more TSNs earlier than or equal to the new cumulative TSN point.
672 * In the event that the receiver has invoked the partial delivery API,
673 * a notification SHOULD also be generated to inform the upper layer API
674 * that the message being partially delivered will NOT be completed.
676 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
678 struct sk_buff *pos, *tmp;
679 struct sctp_ulpevent *event;
680 __u32 tsn;
682 if (skb_queue_empty(&ulpq->reasm))
683 return;
685 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
686 event = sctp_skb2event(pos);
687 tsn = event->tsn;
689 /* Since the entire message must be abandoned by the
690 * sender (item A3 in Section 3.5, RFC 3758), we can
691 * free all fragments on the list that are less then
692 * or equal to ctsn_point
694 if (TSN_lte(tsn, fwd_tsn)) {
695 __skb_unlink(pos, &ulpq->reasm);
696 sctp_ulpevent_free(event);
697 } else
698 break;
703 * Drain the reassembly queue. If we just cleared parted delivery, it
704 * is possible that the reassembly queue will contain already reassembled
705 * messages. Retrieve any such messages and give them to the user.
707 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
709 struct sctp_ulpevent *event = NULL;
710 struct sk_buff_head temp;
712 if (skb_queue_empty(&ulpq->reasm))
713 return;
715 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
716 /* Do ordering if needed. */
717 if ((event) && (event->msg_flags & MSG_EOR)){
718 skb_queue_head_init(&temp);
719 __skb_queue_tail(&temp, sctp_event2skb(event));
721 event = sctp_ulpq_order(ulpq, event);
724 /* Send event to the ULP. 'event' is the
725 * sctp_ulpevent for very first SKB on the temp' list.
727 if (event)
728 sctp_ulpq_tail_event(ulpq, event);
733 /* Helper function to gather skbs that have possibly become
734 * ordered by an an incoming chunk.
736 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
737 struct sctp_ulpevent *event)
739 struct sk_buff_head *event_list;
740 struct sk_buff *pos, *tmp;
741 struct sctp_ulpevent *cevent;
742 struct sctp_stream *in;
743 __u16 sid, csid, cssn;
745 sid = event->stream;
746 in = &ulpq->asoc->ssnmap->in;
748 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
750 /* We are holding the chunks by stream, by SSN. */
751 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
752 cevent = (struct sctp_ulpevent *) pos->cb;
753 csid = cevent->stream;
754 cssn = cevent->ssn;
756 /* Have we gone too far? */
757 if (csid > sid)
758 break;
760 /* Have we not gone far enough? */
761 if (csid < sid)
762 continue;
764 if (cssn != sctp_ssn_peek(in, sid))
765 break;
767 /* Found it, so mark in the ssnmap. */
768 sctp_ssn_next(in, sid);
770 __skb_unlink(pos, &ulpq->lobby);
772 /* Attach all gathered skbs to the event. */
773 __skb_queue_tail(event_list, pos);
777 /* Helper function to store chunks needing ordering. */
778 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
779 struct sctp_ulpevent *event)
781 struct sk_buff *pos;
782 struct sctp_ulpevent *cevent;
783 __u16 sid, csid;
784 __u16 ssn, cssn;
786 pos = skb_peek_tail(&ulpq->lobby);
787 if (!pos) {
788 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
789 return;
792 sid = event->stream;
793 ssn = event->ssn;
795 cevent = (struct sctp_ulpevent *) pos->cb;
796 csid = cevent->stream;
797 cssn = cevent->ssn;
798 if (sid > csid) {
799 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
800 return;
803 if ((sid == csid) && SSN_lt(cssn, ssn)) {
804 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
805 return;
808 /* Find the right place in this list. We store them by
809 * stream ID and then by SSN.
811 skb_queue_walk(&ulpq->lobby, pos) {
812 cevent = (struct sctp_ulpevent *) pos->cb;
813 csid = cevent->stream;
814 cssn = cevent->ssn;
816 if (csid > sid)
817 break;
818 if (csid == sid && SSN_lt(ssn, cssn))
819 break;
823 /* Insert before pos. */
824 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
827 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
828 struct sctp_ulpevent *event)
830 __u16 sid, ssn;
831 struct sctp_stream *in;
833 /* Check if this message needs ordering. */
834 if (SCTP_DATA_UNORDERED & event->msg_flags)
835 return event;
837 /* Note: The stream ID must be verified before this routine. */
838 sid = event->stream;
839 ssn = event->ssn;
840 in = &ulpq->asoc->ssnmap->in;
842 /* Is this the expected SSN for this stream ID? */
843 if (ssn != sctp_ssn_peek(in, sid)) {
844 /* We've received something out of order, so find where it
845 * needs to be placed. We order by stream and then by SSN.
847 sctp_ulpq_store_ordered(ulpq, event);
848 return NULL;
851 /* Mark that the next chunk has been found. */
852 sctp_ssn_next(in, sid);
854 /* Go find any other chunks that were waiting for
855 * ordering.
857 sctp_ulpq_retrieve_ordered(ulpq, event);
859 return event;
862 /* Helper function to gather skbs that have possibly become
863 * ordered by forward tsn skipping their dependencies.
865 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
867 struct sk_buff *pos, *tmp;
868 struct sctp_ulpevent *cevent;
869 struct sctp_ulpevent *event;
870 struct sctp_stream *in;
871 struct sk_buff_head temp;
872 struct sk_buff_head *lobby = &ulpq->lobby;
873 __u16 csid, cssn;
875 in = &ulpq->asoc->ssnmap->in;
877 /* We are holding the chunks by stream, by SSN. */
878 skb_queue_head_init(&temp);
879 event = NULL;
880 sctp_skb_for_each(pos, lobby, tmp) {
881 cevent = (struct sctp_ulpevent *) pos->cb;
882 csid = cevent->stream;
883 cssn = cevent->ssn;
885 /* Have we gone too far? */
886 if (csid > sid)
887 break;
889 /* Have we not gone far enough? */
890 if (csid < sid)
891 continue;
893 /* see if this ssn has been marked by skipping */
894 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
895 break;
897 __skb_unlink(pos, lobby);
898 if (!event)
899 /* Create a temporary list to collect chunks on. */
900 event = sctp_skb2event(pos);
902 /* Attach all gathered skbs to the event. */
903 __skb_queue_tail(&temp, pos);
906 /* If we didn't reap any data, see if the next expected SSN
907 * is next on the queue and if so, use that.
909 if (event == NULL && pos != (struct sk_buff *)lobby) {
910 cevent = (struct sctp_ulpevent *) pos->cb;
911 csid = cevent->stream;
912 cssn = cevent->ssn;
914 if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
915 sctp_ssn_next(in, csid);
916 __skb_unlink(pos, lobby);
917 __skb_queue_tail(&temp, pos);
918 event = sctp_skb2event(pos);
922 /* Send event to the ULP. 'event' is the sctp_ulpevent for
923 * very first SKB on the 'temp' list.
925 if (event) {
926 /* see if we have more ordered that we can deliver */
927 sctp_ulpq_retrieve_ordered(ulpq, event);
928 sctp_ulpq_tail_event(ulpq, event);
932 /* Skip over an SSN. This is used during the processing of
933 * Forwared TSN chunk to skip over the abandoned ordered data
935 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
937 struct sctp_stream *in;
939 /* Note: The stream ID must be verified before this routine. */
940 in = &ulpq->asoc->ssnmap->in;
942 /* Is this an old SSN? If so ignore. */
943 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
944 return;
946 /* Mark that we are no longer expecting this SSN or lower. */
947 sctp_ssn_skip(in, sid, ssn);
949 /* Go find any other chunks that were waiting for
950 * ordering and deliver them if needed.
952 sctp_ulpq_reap_ordered(ulpq, sid);
955 static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
956 struct sk_buff_head *list, __u16 needed)
958 __u16 freed = 0;
959 __u32 tsn;
960 struct sk_buff *skb;
961 struct sctp_ulpevent *event;
962 struct sctp_tsnmap *tsnmap;
964 tsnmap = &ulpq->asoc->peer.tsn_map;
966 while ((skb = __skb_dequeue_tail(list)) != NULL) {
967 freed += skb_headlen(skb);
968 event = sctp_skb2event(skb);
969 tsn = event->tsn;
971 sctp_ulpevent_free(event);
972 sctp_tsnmap_renege(tsnmap, tsn);
973 if (freed >= needed)
974 return freed;
977 return freed;
980 /* Renege 'needed' bytes from the ordering queue. */
981 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
983 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
986 /* Renege 'needed' bytes from the reassembly queue. */
987 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
989 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
992 /* Partial deliver the first message as there is pressure on rwnd. */
993 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
994 struct sctp_chunk *chunk,
995 gfp_t gfp)
997 struct sctp_ulpevent *event;
998 struct sctp_association *asoc;
999 struct sctp_sock *sp;
1001 asoc = ulpq->asoc;
1002 sp = sctp_sk(asoc->base.sk);
1004 /* If the association is already in Partial Delivery mode
1005 * we have noting to do.
1007 if (ulpq->pd_mode)
1008 return;
1010 /* If the user enabled fragment interleave socket option,
1011 * multiple associations can enter partial delivery.
1012 * Otherwise, we can only enter partial delivery if the
1013 * socket is not in partial deliver mode.
1015 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1016 /* Is partial delivery possible? */
1017 event = sctp_ulpq_retrieve_first(ulpq);
1018 /* Send event to the ULP. */
1019 if (event) {
1020 sctp_ulpq_tail_event(ulpq, event);
1021 sctp_ulpq_set_pd(ulpq);
1022 return;
1027 /* Renege some packets to make room for an incoming chunk. */
1028 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1029 gfp_t gfp)
1031 struct sctp_association *asoc;
1032 __u16 needed, freed;
1034 asoc = ulpq->asoc;
1036 if (chunk) {
1037 needed = ntohs(chunk->chunk_hdr->length);
1038 needed -= sizeof(sctp_data_chunk_t);
1039 } else
1040 needed = SCTP_DEFAULT_MAXWINDOW;
1042 freed = 0;
1044 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1045 freed = sctp_ulpq_renege_order(ulpq, needed);
1046 if (freed < needed) {
1047 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1050 /* If able to free enough room, accept this chunk. */
1051 if (chunk && (freed >= needed)) {
1052 __u32 tsn;
1053 tsn = ntohl(chunk->subh.data_hdr->tsn);
1054 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
1055 sctp_ulpq_tail_data(ulpq, chunk, gfp);
1057 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
1060 sk_mem_reclaim(asoc->base.sk);
1065 /* Notify the application if an association is aborted and in
1066 * partial delivery mode. Send up any pending received messages.
1068 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1070 struct sctp_ulpevent *ev = NULL;
1071 struct sock *sk;
1073 if (!ulpq->pd_mode)
1074 return;
1076 sk = ulpq->asoc->base.sk;
1077 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1078 &sctp_sk(sk)->subscribe))
1079 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1080 SCTP_PARTIAL_DELIVERY_ABORTED,
1081 gfp);
1082 if (ev)
1083 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1085 /* If there is data waiting, send it up the socket now. */
1086 if (sctp_ulpq_clear_pd(ulpq) || ev)
1087 sk->sk_data_ready(sk, 0);