1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
7 * This file is part of the SCTP kernel implementation
9 * This file contains sctp stream maniuplation primitives and helpers.
11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This SCTP implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, see
25 * <http://www.gnu.org/licenses/>.
27 * Please send any bug reports or fixes you make to the
29 * lksctp developers <linux-sctp@vger.kernel.org>
31 * Written or modified by:
32 * Xin Long <lucien.xin@gmail.com>
35 #include <linux/list.h>
36 #include <net/sctp/sctp.h>
37 #include <net/sctp/sm.h>
38 #include <net/sctp/stream_sched.h>
40 static struct flex_array
*fa_alloc(size_t elem_size
, size_t elem_count
,
43 struct flex_array
*result
;
46 result
= flex_array_alloc(elem_size
, elem_count
, gfp
);
48 err
= flex_array_prealloc(result
, 0, elem_count
, gfp
);
50 flex_array_free(result
);
58 static void fa_free(struct flex_array
*fa
)
64 static void fa_copy(struct flex_array
*fa
, struct flex_array
*from
,
65 size_t index
, size_t count
)
70 elem
= flex_array_get(from
, index
);
71 flex_array_put(fa
, index
, elem
, 0);
76 static void fa_zero(struct flex_array
*fa
, size_t index
, size_t count
)
81 elem
= flex_array_get(fa
, index
);
82 memset(elem
, 0, fa
->element_size
);
87 static size_t fa_index(struct flex_array
*fa
, void *elem
, size_t count
)
92 if (elem
== flex_array_get(fa
, index
))
100 /* Migrates chunks from stream queues to new stream queues if needed,
101 * but not across associations. Also, removes those chunks to streams
102 * higher than the new max.
104 static void sctp_stream_outq_migrate(struct sctp_stream
*stream
,
105 struct sctp_stream
*new, __u16 outcnt
)
107 struct sctp_association
*asoc
;
108 struct sctp_chunk
*ch
, *temp
;
109 struct sctp_outq
*outq
;
112 asoc
= container_of(stream
, struct sctp_association
, stream
);
113 outq
= &asoc
->outqueue
;
115 list_for_each_entry_safe(ch
, temp
, &outq
->out_chunk_list
, list
) {
116 __u16 sid
= sctp_chunk_stream_no(ch
);
121 sctp_sched_dequeue_common(outq
, ch
);
122 /* No need to call dequeue_done here because
123 * the chunks are not scheduled by now.
126 /* Mark as failed send. */
127 sctp_chunk_fail(ch
, (__force __u32
)SCTP_ERROR_INV_STRM
);
128 if (asoc
->peer
.prsctp_capable
&&
129 SCTP_PR_PRIO_ENABLED(ch
->sinfo
.sinfo_flags
))
130 asoc
->sent_cnt_removable
--;
136 /* Here we actually move the old ext stuff into the new
137 * buffer, because we want to keep it. Then
138 * sctp_stream_update will swap ->out pointers.
140 for (i
= 0; i
< outcnt
; i
++) {
141 kfree(SCTP_SO(new, i
)->ext
);
142 SCTP_SO(new, i
)->ext
= SCTP_SO(stream
, i
)->ext
;
143 SCTP_SO(stream
, i
)->ext
= NULL
;
147 for (i
= outcnt
; i
< stream
->outcnt
; i
++) {
148 kfree(SCTP_SO(stream
, i
)->ext
);
149 SCTP_SO(stream
, i
)->ext
= NULL
;
153 static int sctp_stream_alloc_out(struct sctp_stream
*stream
, __u16 outcnt
,
156 struct flex_array
*out
;
157 size_t elem_size
= sizeof(struct sctp_stream_out
);
159 out
= fa_alloc(elem_size
, outcnt
, gfp
);
164 fa_copy(out
, stream
->out
, 0, min(outcnt
, stream
->outcnt
));
165 if (stream
->out_curr
) {
166 size_t index
= fa_index(stream
->out
, stream
->out_curr
,
169 BUG_ON(index
== stream
->outcnt
);
170 stream
->out_curr
= flex_array_get(out
, index
);
172 fa_free(stream
->out
);
175 if (outcnt
> stream
->outcnt
)
176 fa_zero(out
, stream
->outcnt
, (outcnt
- stream
->outcnt
));
183 static int sctp_stream_alloc_in(struct sctp_stream
*stream
, __u16 incnt
,
186 struct flex_array
*in
;
187 size_t elem_size
= sizeof(struct sctp_stream_in
);
189 in
= fa_alloc(elem_size
, incnt
, gfp
);
194 fa_copy(in
, stream
->in
, 0, min(incnt
, stream
->incnt
));
198 if (incnt
> stream
->incnt
)
199 fa_zero(in
, stream
->incnt
, (incnt
- stream
->incnt
));
206 int sctp_stream_init(struct sctp_stream
*stream
, __u16 outcnt
, __u16 incnt
,
209 struct sctp_sched_ops
*sched
= sctp_sched_ops_from_stream(stream
);
214 /* Initial stream->out size may be very big, so free it and alloc
215 * a new one with new outcnt to save memory if needed.
217 if (outcnt
== stream
->outcnt
)
220 /* Filter out chunks queued on streams that won't exist anymore */
221 sched
->unsched_all(stream
);
222 sctp_stream_outq_migrate(stream
, NULL
, outcnt
);
223 sched
->sched_all(stream
);
225 ret
= sctp_stream_alloc_out(stream
, outcnt
, gfp
);
229 stream
->outcnt
= outcnt
;
230 for (i
= 0; i
< stream
->outcnt
; i
++)
231 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_OPEN
;
234 sctp_stream_interleave_init(stream
);
238 ret
= sctp_stream_alloc_in(stream
, incnt
, gfp
);
241 fa_free(stream
->out
);
247 stream
->incnt
= incnt
;
253 int sctp_stream_init_ext(struct sctp_stream
*stream
, __u16 sid
)
255 struct sctp_stream_out_ext
*soute
;
258 soute
= kzalloc(sizeof(*soute
), GFP_KERNEL
);
261 SCTP_SO(stream
, sid
)->ext
= soute
;
263 ret
= sctp_sched_init_sid(stream
, sid
, GFP_KERNEL
);
265 kfree(SCTP_SO(stream
, sid
)->ext
);
266 SCTP_SO(stream
, sid
)->ext
= NULL
;
272 void sctp_stream_free(struct sctp_stream
*stream
)
274 struct sctp_sched_ops
*sched
= sctp_sched_ops_from_stream(stream
);
278 for (i
= 0; i
< stream
->outcnt
; i
++)
279 kfree(SCTP_SO(stream
, i
)->ext
);
280 fa_free(stream
->out
);
284 void sctp_stream_clear(struct sctp_stream
*stream
)
288 for (i
= 0; i
< stream
->outcnt
; i
++) {
289 SCTP_SO(stream
, i
)->mid
= 0;
290 SCTP_SO(stream
, i
)->mid_uo
= 0;
293 for (i
= 0; i
< stream
->incnt
; i
++)
294 SCTP_SI(stream
, i
)->mid
= 0;
297 void sctp_stream_update(struct sctp_stream
*stream
, struct sctp_stream
*new)
299 struct sctp_sched_ops
*sched
= sctp_sched_ops_from_stream(stream
);
301 sched
->unsched_all(stream
);
302 sctp_stream_outq_migrate(stream
, new, new->outcnt
);
303 sctp_stream_free(stream
);
305 stream
->out
= new->out
;
306 stream
->in
= new->in
;
307 stream
->outcnt
= new->outcnt
;
308 stream
->incnt
= new->incnt
;
310 sched
->sched_all(stream
);
318 static int sctp_send_reconf(struct sctp_association
*asoc
,
319 struct sctp_chunk
*chunk
)
321 struct net
*net
= sock_net(asoc
->base
.sk
);
324 retval
= sctp_primitive_RECONF(net
, asoc
, chunk
);
326 sctp_chunk_free(chunk
);
331 static bool sctp_stream_outq_is_empty(struct sctp_stream
*stream
,
332 __u16 str_nums
, __be16
*str_list
)
334 struct sctp_association
*asoc
;
337 asoc
= container_of(stream
, struct sctp_association
, stream
);
338 if (!asoc
->outqueue
.out_qlen
)
344 for (i
= 0; i
< str_nums
; i
++) {
345 __u16 sid
= ntohs(str_list
[i
]);
347 if (SCTP_SO(stream
, sid
)->ext
&&
348 !list_empty(&SCTP_SO(stream
, sid
)->ext
->outq
))
355 int sctp_send_reset_streams(struct sctp_association
*asoc
,
356 struct sctp_reset_streams
*params
)
358 struct sctp_stream
*stream
= &asoc
->stream
;
359 __u16 i
, str_nums
, *str_list
;
360 struct sctp_chunk
*chunk
;
361 int retval
= -EINVAL
;
365 if (!asoc
->peer
.reconf_capable
||
366 !(asoc
->strreset_enable
& SCTP_ENABLE_RESET_STREAM_REQ
)) {
367 retval
= -ENOPROTOOPT
;
371 if (asoc
->strreset_outstanding
) {
372 retval
= -EINPROGRESS
;
376 out
= params
->srs_flags
& SCTP_STREAM_RESET_OUTGOING
;
377 in
= params
->srs_flags
& SCTP_STREAM_RESET_INCOMING
;
381 str_nums
= params
->srs_number_streams
;
382 str_list
= params
->srs_stream_list
;
387 for (i
= 0; i
< str_nums
; i
++)
388 if (str_list
[i
] >= stream
->outcnt
)
391 param_len
= str_nums
* sizeof(__u16
) +
392 sizeof(struct sctp_strreset_outreq
);
396 for (i
= 0; i
< str_nums
; i
++)
397 if (str_list
[i
] >= stream
->incnt
)
400 param_len
+= str_nums
* sizeof(__u16
) +
401 sizeof(struct sctp_strreset_inreq
);
404 if (param_len
> SCTP_MAX_CHUNK_LEN
-
405 sizeof(struct sctp_reconf_chunk
))
409 nstr_list
= kcalloc(str_nums
, sizeof(__be16
), GFP_KERNEL
);
415 for (i
= 0; i
< str_nums
; i
++)
416 nstr_list
[i
] = htons(str_list
[i
]);
418 if (out
&& !sctp_stream_outq_is_empty(stream
, str_nums
, nstr_list
)) {
424 chunk
= sctp_make_strreset_req(asoc
, str_nums
, nstr_list
, out
, in
);
435 for (i
= 0; i
< str_nums
; i
++)
436 SCTP_SO(stream
, str_list
[i
])->state
=
439 for (i
= 0; i
< stream
->outcnt
; i
++)
440 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_CLOSED
;
443 asoc
->strreset_chunk
= chunk
;
444 sctp_chunk_hold(asoc
->strreset_chunk
);
446 retval
= sctp_send_reconf(asoc
, chunk
);
448 sctp_chunk_put(asoc
->strreset_chunk
);
449 asoc
->strreset_chunk
= NULL
;
454 for (i
= 0; i
< str_nums
; i
++)
455 SCTP_SO(stream
, str_list
[i
])->state
=
458 for (i
= 0; i
< stream
->outcnt
; i
++)
459 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_OPEN
;
464 asoc
->strreset_outstanding
= out
+ in
;
470 int sctp_send_reset_assoc(struct sctp_association
*asoc
)
472 struct sctp_stream
*stream
= &asoc
->stream
;
473 struct sctp_chunk
*chunk
= NULL
;
477 if (!asoc
->peer
.reconf_capable
||
478 !(asoc
->strreset_enable
& SCTP_ENABLE_RESET_ASSOC_REQ
))
481 if (asoc
->strreset_outstanding
)
484 if (!sctp_outq_is_empty(&asoc
->outqueue
))
487 chunk
= sctp_make_strreset_tsnreq(asoc
);
491 /* Block further xmit of data until this request is completed */
492 for (i
= 0; i
< stream
->outcnt
; i
++)
493 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_CLOSED
;
495 asoc
->strreset_chunk
= chunk
;
496 sctp_chunk_hold(asoc
->strreset_chunk
);
498 retval
= sctp_send_reconf(asoc
, chunk
);
500 sctp_chunk_put(asoc
->strreset_chunk
);
501 asoc
->strreset_chunk
= NULL
;
503 for (i
= 0; i
< stream
->outcnt
; i
++)
504 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_OPEN
;
509 asoc
->strreset_outstanding
= 1;
514 int sctp_send_add_streams(struct sctp_association
*asoc
,
515 struct sctp_add_streams
*params
)
517 struct sctp_stream
*stream
= &asoc
->stream
;
518 struct sctp_chunk
*chunk
= NULL
;
523 if (!asoc
->peer
.reconf_capable
||
524 !(asoc
->strreset_enable
& SCTP_ENABLE_CHANGE_ASSOC_REQ
)) {
525 retval
= -ENOPROTOOPT
;
529 if (asoc
->strreset_outstanding
) {
530 retval
= -EINPROGRESS
;
534 out
= params
->sas_outstrms
;
535 in
= params
->sas_instrms
;
536 outcnt
= stream
->outcnt
+ out
;
537 incnt
= stream
->incnt
+ in
;
538 if (outcnt
> SCTP_MAX_STREAM
|| incnt
> SCTP_MAX_STREAM
||
545 retval
= sctp_stream_alloc_out(stream
, outcnt
, GFP_KERNEL
);
550 chunk
= sctp_make_strreset_addstrm(asoc
, out
, in
);
556 asoc
->strreset_chunk
= chunk
;
557 sctp_chunk_hold(asoc
->strreset_chunk
);
559 retval
= sctp_send_reconf(asoc
, chunk
);
561 sctp_chunk_put(asoc
->strreset_chunk
);
562 asoc
->strreset_chunk
= NULL
;
566 stream
->outcnt
= outcnt
;
568 asoc
->strreset_outstanding
= !!out
+ !!in
;
574 static struct sctp_paramhdr
*sctp_chunk_lookup_strreset_param(
575 struct sctp_association
*asoc
, __be32 resp_seq
,
578 struct sctp_chunk
*chunk
= asoc
->strreset_chunk
;
579 struct sctp_reconf_chunk
*hdr
;
580 union sctp_params param
;
585 hdr
= (struct sctp_reconf_chunk
*)chunk
->chunk_hdr
;
586 sctp_walk_params(param
, hdr
, params
) {
587 /* sctp_strreset_tsnreq is actually the basic structure
588 * of all stream reconf params, so it's safe to use it
589 * to access request_seq.
591 struct sctp_strreset_tsnreq
*req
= param
.v
;
593 if ((!resp_seq
|| req
->request_seq
== resp_seq
) &&
594 (!type
|| type
== req
->param_hdr
.type
))
601 static void sctp_update_strreset_result(struct sctp_association
*asoc
,
604 asoc
->strreset_result
[1] = asoc
->strreset_result
[0];
605 asoc
->strreset_result
[0] = result
;
608 struct sctp_chunk
*sctp_process_strreset_outreq(
609 struct sctp_association
*asoc
,
610 union sctp_params param
,
611 struct sctp_ulpevent
**evp
)
613 struct sctp_strreset_outreq
*outreq
= param
.v
;
614 struct sctp_stream
*stream
= &asoc
->stream
;
615 __u32 result
= SCTP_STRRESET_DENIED
;
616 __be16
*str_p
= NULL
;
620 request_seq
= ntohl(outreq
->request_seq
);
622 if (ntohl(outreq
->send_reset_at_tsn
) >
623 sctp_tsnmap_get_ctsn(&asoc
->peer
.tsn_map
)) {
624 result
= SCTP_STRRESET_IN_PROGRESS
;
628 if (TSN_lt(asoc
->strreset_inseq
, request_seq
) ||
629 TSN_lt(request_seq
, asoc
->strreset_inseq
- 2)) {
630 result
= SCTP_STRRESET_ERR_BAD_SEQNO
;
632 } else if (TSN_lt(request_seq
, asoc
->strreset_inseq
)) {
633 i
= asoc
->strreset_inseq
- request_seq
- 1;
634 result
= asoc
->strreset_result
[i
];
637 asoc
->strreset_inseq
++;
639 /* Check strreset_enable after inseq inc, as sender cannot tell
640 * the peer doesn't enable strreset after receiving response with
641 * result denied, as well as to keep consistent with bsd.
643 if (!(asoc
->strreset_enable
& SCTP_ENABLE_RESET_STREAM_REQ
))
646 nums
= (ntohs(param
.p
->length
) - sizeof(*outreq
)) / sizeof(__u16
);
647 str_p
= outreq
->list_of_streams
;
648 for (i
= 0; i
< nums
; i
++) {
649 if (ntohs(str_p
[i
]) >= stream
->incnt
) {
650 result
= SCTP_STRRESET_ERR_WRONG_SSN
;
655 if (asoc
->strreset_chunk
) {
656 if (!sctp_chunk_lookup_strreset_param(
657 asoc
, outreq
->response_seq
,
658 SCTP_PARAM_RESET_IN_REQUEST
)) {
659 /* same process with outstanding isn't 0 */
660 result
= SCTP_STRRESET_ERR_IN_PROGRESS
;
664 asoc
->strreset_outstanding
--;
665 asoc
->strreset_outseq
++;
667 if (!asoc
->strreset_outstanding
) {
668 struct sctp_transport
*t
;
670 t
= asoc
->strreset_chunk
->transport
;
671 if (del_timer(&t
->reconf_timer
))
672 sctp_transport_put(t
);
674 sctp_chunk_put(asoc
->strreset_chunk
);
675 asoc
->strreset_chunk
= NULL
;
680 for (i
= 0; i
< nums
; i
++)
681 SCTP_SI(stream
, ntohs(str_p
[i
]))->mid
= 0;
683 for (i
= 0; i
< stream
->incnt
; i
++)
684 SCTP_SI(stream
, i
)->mid
= 0;
686 result
= SCTP_STRRESET_PERFORMED
;
688 *evp
= sctp_ulpevent_make_stream_reset_event(asoc
,
689 SCTP_STREAM_RESET_INCOMING_SSN
, nums
, str_p
, GFP_ATOMIC
);
692 sctp_update_strreset_result(asoc
, result
);
694 return sctp_make_strreset_resp(asoc
, result
, request_seq
);
697 struct sctp_chunk
*sctp_process_strreset_inreq(
698 struct sctp_association
*asoc
,
699 union sctp_params param
,
700 struct sctp_ulpevent
**evp
)
702 struct sctp_strreset_inreq
*inreq
= param
.v
;
703 struct sctp_stream
*stream
= &asoc
->stream
;
704 __u32 result
= SCTP_STRRESET_DENIED
;
705 struct sctp_chunk
*chunk
= NULL
;
710 request_seq
= ntohl(inreq
->request_seq
);
711 if (TSN_lt(asoc
->strreset_inseq
, request_seq
) ||
712 TSN_lt(request_seq
, asoc
->strreset_inseq
- 2)) {
713 result
= SCTP_STRRESET_ERR_BAD_SEQNO
;
715 } else if (TSN_lt(request_seq
, asoc
->strreset_inseq
)) {
716 i
= asoc
->strreset_inseq
- request_seq
- 1;
717 result
= asoc
->strreset_result
[i
];
718 if (result
== SCTP_STRRESET_PERFORMED
)
722 asoc
->strreset_inseq
++;
724 if (!(asoc
->strreset_enable
& SCTP_ENABLE_RESET_STREAM_REQ
))
727 if (asoc
->strreset_outstanding
) {
728 result
= SCTP_STRRESET_ERR_IN_PROGRESS
;
732 nums
= (ntohs(param
.p
->length
) - sizeof(*inreq
)) / sizeof(__u16
);
733 str_p
= inreq
->list_of_streams
;
734 for (i
= 0; i
< nums
; i
++) {
735 if (ntohs(str_p
[i
]) >= stream
->outcnt
) {
736 result
= SCTP_STRRESET_ERR_WRONG_SSN
;
741 if (!sctp_stream_outq_is_empty(stream
, nums
, str_p
)) {
742 result
= SCTP_STRRESET_IN_PROGRESS
;
743 asoc
->strreset_inseq
--;
747 chunk
= sctp_make_strreset_req(asoc
, nums
, str_p
, 1, 0);
752 for (i
= 0; i
< nums
; i
++)
753 SCTP_SO(stream
, ntohs(str_p
[i
]))->state
=
756 for (i
= 0; i
< stream
->outcnt
; i
++)
757 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_CLOSED
;
759 asoc
->strreset_chunk
= chunk
;
760 asoc
->strreset_outstanding
= 1;
761 sctp_chunk_hold(asoc
->strreset_chunk
);
763 result
= SCTP_STRRESET_PERFORMED
;
766 sctp_update_strreset_result(asoc
, result
);
769 chunk
= sctp_make_strreset_resp(asoc
, result
, request_seq
);
774 struct sctp_chunk
*sctp_process_strreset_tsnreq(
775 struct sctp_association
*asoc
,
776 union sctp_params param
,
777 struct sctp_ulpevent
**evp
)
779 __u32 init_tsn
= 0, next_tsn
= 0, max_tsn_seen
;
780 struct sctp_strreset_tsnreq
*tsnreq
= param
.v
;
781 struct sctp_stream
*stream
= &asoc
->stream
;
782 __u32 result
= SCTP_STRRESET_DENIED
;
786 request_seq
= ntohl(tsnreq
->request_seq
);
787 if (TSN_lt(asoc
->strreset_inseq
, request_seq
) ||
788 TSN_lt(request_seq
, asoc
->strreset_inseq
- 2)) {
789 result
= SCTP_STRRESET_ERR_BAD_SEQNO
;
791 } else if (TSN_lt(request_seq
, asoc
->strreset_inseq
)) {
792 i
= asoc
->strreset_inseq
- request_seq
- 1;
793 result
= asoc
->strreset_result
[i
];
794 if (result
== SCTP_STRRESET_PERFORMED
) {
795 next_tsn
= asoc
->ctsn_ack_point
+ 1;
797 sctp_tsnmap_get_ctsn(&asoc
->peer
.tsn_map
) + 1;
802 if (!sctp_outq_is_empty(&asoc
->outqueue
)) {
803 result
= SCTP_STRRESET_IN_PROGRESS
;
807 asoc
->strreset_inseq
++;
809 if (!(asoc
->strreset_enable
& SCTP_ENABLE_RESET_ASSOC_REQ
))
812 if (asoc
->strreset_outstanding
) {
813 result
= SCTP_STRRESET_ERR_IN_PROGRESS
;
817 /* G4: The same processing as though a FWD-TSN chunk (as defined in
818 * [RFC3758]) with all streams affected and a new cumulative TSN
819 * ACK of the Receiver's Next TSN minus 1 were received MUST be
822 max_tsn_seen
= sctp_tsnmap_get_max_tsn_seen(&asoc
->peer
.tsn_map
);
823 asoc
->stream
.si
->report_ftsn(&asoc
->ulpq
, max_tsn_seen
);
825 /* G1: Compute an appropriate value for the Receiver's Next TSN -- the
826 * TSN that the peer should use to send the next DATA chunk. The
827 * value SHOULD be the smallest TSN not acknowledged by the
828 * receiver of the request plus 2^31.
830 init_tsn
= sctp_tsnmap_get_ctsn(&asoc
->peer
.tsn_map
) + (1 << 31);
831 sctp_tsnmap_init(&asoc
->peer
.tsn_map
, SCTP_TSN_MAP_INITIAL
,
832 init_tsn
, GFP_ATOMIC
);
834 /* G3: The same processing as though a SACK chunk with no gap report
835 * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
836 * received MUST be performed.
838 sctp_outq_free(&asoc
->outqueue
);
840 /* G2: Compute an appropriate value for the local endpoint's next TSN,
841 * i.e., the next TSN assigned by the receiver of the SSN/TSN reset
842 * chunk. The value SHOULD be the highest TSN sent by the receiver
843 * of the request plus 1.
845 next_tsn
= asoc
->next_tsn
;
846 asoc
->ctsn_ack_point
= next_tsn
- 1;
847 asoc
->adv_peer_ack_point
= asoc
->ctsn_ack_point
;
849 /* G5: The next expected and outgoing SSNs MUST be reset to 0 for all
850 * incoming and outgoing streams.
852 for (i
= 0; i
< stream
->outcnt
; i
++) {
853 SCTP_SO(stream
, i
)->mid
= 0;
854 SCTP_SO(stream
, i
)->mid_uo
= 0;
856 for (i
= 0; i
< stream
->incnt
; i
++)
857 SCTP_SI(stream
, i
)->mid
= 0;
859 result
= SCTP_STRRESET_PERFORMED
;
861 *evp
= sctp_ulpevent_make_assoc_reset_event(asoc
, 0, init_tsn
,
862 next_tsn
, GFP_ATOMIC
);
865 sctp_update_strreset_result(asoc
, result
);
867 return sctp_make_strreset_tsnresp(asoc
, result
, request_seq
,
871 struct sctp_chunk
*sctp_process_strreset_addstrm_out(
872 struct sctp_association
*asoc
,
873 union sctp_params param
,
874 struct sctp_ulpevent
**evp
)
876 struct sctp_strreset_addstrm
*addstrm
= param
.v
;
877 struct sctp_stream
*stream
= &asoc
->stream
;
878 __u32 result
= SCTP_STRRESET_DENIED
;
879 __u32 request_seq
, incnt
;
882 request_seq
= ntohl(addstrm
->request_seq
);
883 if (TSN_lt(asoc
->strreset_inseq
, request_seq
) ||
884 TSN_lt(request_seq
, asoc
->strreset_inseq
- 2)) {
885 result
= SCTP_STRRESET_ERR_BAD_SEQNO
;
887 } else if (TSN_lt(request_seq
, asoc
->strreset_inseq
)) {
888 i
= asoc
->strreset_inseq
- request_seq
- 1;
889 result
= asoc
->strreset_result
[i
];
892 asoc
->strreset_inseq
++;
894 if (!(asoc
->strreset_enable
& SCTP_ENABLE_CHANGE_ASSOC_REQ
))
897 in
= ntohs(addstrm
->number_of_streams
);
898 incnt
= stream
->incnt
+ in
;
899 if (!in
|| incnt
> SCTP_MAX_STREAM
)
902 if (sctp_stream_alloc_in(stream
, incnt
, GFP_ATOMIC
))
905 if (asoc
->strreset_chunk
) {
906 if (!sctp_chunk_lookup_strreset_param(
907 asoc
, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS
)) {
908 /* same process with outstanding isn't 0 */
909 result
= SCTP_STRRESET_ERR_IN_PROGRESS
;
913 asoc
->strreset_outstanding
--;
914 asoc
->strreset_outseq
++;
916 if (!asoc
->strreset_outstanding
) {
917 struct sctp_transport
*t
;
919 t
= asoc
->strreset_chunk
->transport
;
920 if (del_timer(&t
->reconf_timer
))
921 sctp_transport_put(t
);
923 sctp_chunk_put(asoc
->strreset_chunk
);
924 asoc
->strreset_chunk
= NULL
;
928 stream
->incnt
= incnt
;
930 result
= SCTP_STRRESET_PERFORMED
;
932 *evp
= sctp_ulpevent_make_stream_change_event(asoc
,
933 0, ntohs(addstrm
->number_of_streams
), 0, GFP_ATOMIC
);
936 sctp_update_strreset_result(asoc
, result
);
938 return sctp_make_strreset_resp(asoc
, result
, request_seq
);
941 struct sctp_chunk
*sctp_process_strreset_addstrm_in(
942 struct sctp_association
*asoc
,
943 union sctp_params param
,
944 struct sctp_ulpevent
**evp
)
946 struct sctp_strreset_addstrm
*addstrm
= param
.v
;
947 struct sctp_stream
*stream
= &asoc
->stream
;
948 __u32 result
= SCTP_STRRESET_DENIED
;
949 struct sctp_chunk
*chunk
= NULL
;
950 __u32 request_seq
, outcnt
;
954 request_seq
= ntohl(addstrm
->request_seq
);
955 if (TSN_lt(asoc
->strreset_inseq
, request_seq
) ||
956 TSN_lt(request_seq
, asoc
->strreset_inseq
- 2)) {
957 result
= SCTP_STRRESET_ERR_BAD_SEQNO
;
959 } else if (TSN_lt(request_seq
, asoc
->strreset_inseq
)) {
960 i
= asoc
->strreset_inseq
- request_seq
- 1;
961 result
= asoc
->strreset_result
[i
];
962 if (result
== SCTP_STRRESET_PERFORMED
)
966 asoc
->strreset_inseq
++;
968 if (!(asoc
->strreset_enable
& SCTP_ENABLE_CHANGE_ASSOC_REQ
))
971 if (asoc
->strreset_outstanding
) {
972 result
= SCTP_STRRESET_ERR_IN_PROGRESS
;
976 out
= ntohs(addstrm
->number_of_streams
);
977 outcnt
= stream
->outcnt
+ out
;
978 if (!out
|| outcnt
> SCTP_MAX_STREAM
)
981 ret
= sctp_stream_alloc_out(stream
, outcnt
, GFP_ATOMIC
);
985 chunk
= sctp_make_strreset_addstrm(asoc
, out
, 0);
989 asoc
->strreset_chunk
= chunk
;
990 asoc
->strreset_outstanding
= 1;
991 sctp_chunk_hold(asoc
->strreset_chunk
);
993 stream
->outcnt
= outcnt
;
995 result
= SCTP_STRRESET_PERFORMED
;
998 sctp_update_strreset_result(asoc
, result
);
1001 chunk
= sctp_make_strreset_resp(asoc
, result
, request_seq
);
1006 struct sctp_chunk
*sctp_process_strreset_resp(
1007 struct sctp_association
*asoc
,
1008 union sctp_params param
,
1009 struct sctp_ulpevent
**evp
)
1011 struct sctp_stream
*stream
= &asoc
->stream
;
1012 struct sctp_strreset_resp
*resp
= param
.v
;
1013 struct sctp_transport
*t
;
1014 __u16 i
, nums
, flags
= 0;
1015 struct sctp_paramhdr
*req
;
1018 req
= sctp_chunk_lookup_strreset_param(asoc
, resp
->response_seq
, 0);
1022 result
= ntohl(resp
->result
);
1023 if (result
!= SCTP_STRRESET_PERFORMED
) {
1024 /* if in progress, do nothing but retransmit */
1025 if (result
== SCTP_STRRESET_IN_PROGRESS
)
1027 else if (result
== SCTP_STRRESET_DENIED
)
1028 flags
= SCTP_STREAM_RESET_DENIED
;
1030 flags
= SCTP_STREAM_RESET_FAILED
;
1033 if (req
->type
== SCTP_PARAM_RESET_OUT_REQUEST
) {
1034 struct sctp_strreset_outreq
*outreq
;
1037 outreq
= (struct sctp_strreset_outreq
*)req
;
1038 str_p
= outreq
->list_of_streams
;
1039 nums
= (ntohs(outreq
->param_hdr
.length
) - sizeof(*outreq
)) /
1042 if (result
== SCTP_STRRESET_PERFORMED
) {
1043 struct sctp_stream_out
*sout
;
1045 for (i
= 0; i
< nums
; i
++) {
1046 sout
= SCTP_SO(stream
, ntohs(str_p
[i
]));
1051 for (i
= 0; i
< stream
->outcnt
; i
++) {
1052 sout
= SCTP_SO(stream
, i
);
1059 flags
|= SCTP_STREAM_RESET_OUTGOING_SSN
;
1061 for (i
= 0; i
< stream
->outcnt
; i
++)
1062 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_OPEN
;
1064 *evp
= sctp_ulpevent_make_stream_reset_event(asoc
, flags
,
1065 nums
, str_p
, GFP_ATOMIC
);
1066 } else if (req
->type
== SCTP_PARAM_RESET_IN_REQUEST
) {
1067 struct sctp_strreset_inreq
*inreq
;
1070 /* if the result is performed, it's impossible for inreq */
1071 if (result
== SCTP_STRRESET_PERFORMED
)
1074 inreq
= (struct sctp_strreset_inreq
*)req
;
1075 str_p
= inreq
->list_of_streams
;
1076 nums
= (ntohs(inreq
->param_hdr
.length
) - sizeof(*inreq
)) /
1079 flags
|= SCTP_STREAM_RESET_INCOMING_SSN
;
1081 *evp
= sctp_ulpevent_make_stream_reset_event(asoc
, flags
,
1082 nums
, str_p
, GFP_ATOMIC
);
1083 } else if (req
->type
== SCTP_PARAM_RESET_TSN_REQUEST
) {
1084 struct sctp_strreset_resptsn
*resptsn
;
1087 /* check for resptsn, as sctp_verify_reconf didn't do it*/
1088 if (ntohs(param
.p
->length
) != sizeof(*resptsn
))
1091 resptsn
= (struct sctp_strreset_resptsn
*)resp
;
1092 stsn
= ntohl(resptsn
->senders_next_tsn
);
1093 rtsn
= ntohl(resptsn
->receivers_next_tsn
);
1095 if (result
== SCTP_STRRESET_PERFORMED
) {
1096 __u32 mtsn
= sctp_tsnmap_get_max_tsn_seen(
1097 &asoc
->peer
.tsn_map
);
1100 asoc
->stream
.si
->report_ftsn(&asoc
->ulpq
, mtsn
);
1102 sctp_tsnmap_init(&asoc
->peer
.tsn_map
,
1103 SCTP_TSN_MAP_INITIAL
,
1106 /* Clean up sacked and abandoned queues only. As the
1107 * out_chunk_list may not be empty, splice it to temp,
1108 * then get it back after sctp_outq_free is done.
1110 list_splice_init(&asoc
->outqueue
.out_chunk_list
, &temp
);
1111 sctp_outq_free(&asoc
->outqueue
);
1112 list_splice_init(&temp
, &asoc
->outqueue
.out_chunk_list
);
1114 asoc
->next_tsn
= rtsn
;
1115 asoc
->ctsn_ack_point
= asoc
->next_tsn
- 1;
1116 asoc
->adv_peer_ack_point
= asoc
->ctsn_ack_point
;
1118 for (i
= 0; i
< stream
->outcnt
; i
++) {
1119 SCTP_SO(stream
, i
)->mid
= 0;
1120 SCTP_SO(stream
, i
)->mid_uo
= 0;
1122 for (i
= 0; i
< stream
->incnt
; i
++)
1123 SCTP_SI(stream
, i
)->mid
= 0;
1126 for (i
= 0; i
< stream
->outcnt
; i
++)
1127 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_OPEN
;
1129 *evp
= sctp_ulpevent_make_assoc_reset_event(asoc
, flags
,
1130 stsn
, rtsn
, GFP_ATOMIC
);
1131 } else if (req
->type
== SCTP_PARAM_RESET_ADD_OUT_STREAMS
) {
1132 struct sctp_strreset_addstrm
*addstrm
;
1135 addstrm
= (struct sctp_strreset_addstrm
*)req
;
1136 nums
= ntohs(addstrm
->number_of_streams
);
1137 number
= stream
->outcnt
- nums
;
1139 if (result
== SCTP_STRRESET_PERFORMED
)
1140 for (i
= number
; i
< stream
->outcnt
; i
++)
1141 SCTP_SO(stream
, i
)->state
= SCTP_STREAM_OPEN
;
1143 stream
->outcnt
= number
;
1145 *evp
= sctp_ulpevent_make_stream_change_event(asoc
, flags
,
1146 0, nums
, GFP_ATOMIC
);
1147 } else if (req
->type
== SCTP_PARAM_RESET_ADD_IN_STREAMS
) {
1148 struct sctp_strreset_addstrm
*addstrm
;
1150 /* if the result is performed, it's impossible for addstrm in
1153 if (result
== SCTP_STRRESET_PERFORMED
)
1156 addstrm
= (struct sctp_strreset_addstrm
*)req
;
1157 nums
= ntohs(addstrm
->number_of_streams
);
1159 *evp
= sctp_ulpevent_make_stream_change_event(asoc
, flags
,
1160 nums
, 0, GFP_ATOMIC
);
1163 asoc
->strreset_outstanding
--;
1164 asoc
->strreset_outseq
++;
1166 /* remove everything for this reconf request */
1167 if (!asoc
->strreset_outstanding
) {
1168 t
= asoc
->strreset_chunk
->transport
;
1169 if (del_timer(&t
->reconf_timer
))
1170 sctp_transport_put(t
);
1172 sctp_chunk_put(asoc
->strreset_chunk
);
1173 asoc
->strreset_chunk
= NULL
;