1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
7 * This file is part of the SCTP kernel implementation
9 * These functions work with the state functions in sctp_sm_statefuns.c
10 * to implement that state operations. These functions implement the
11 * steps which require modifying existing data structures.
13 * Please send any bug reports or fixes you make to the
15 * lksctp developers <linux-sctp@vger.kernel.org>
17 * Written or modified by:
18 * La Monte H.P. Yarroll <piggy@acm.org>
19 * Karl Knutson <karl@athena.chicago.il.us>
20 * Jon Grimm <jgrimm@austin.ibm.com>
21 * Hui Huang <hui.huang@nokia.com>
22 * Dajiang Zhang <dajiang.zhang@nokia.com>
23 * Daisy Chang <daisyc@us.ibm.com>
24 * Sridhar Samudrala <sri@us.ibm.com>
25 * Ardelle Fan <ardelle.fan@intel.com>
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 #include <linux/skbuff.h>
31 #include <linux/types.h>
32 #include <linux/socket.h>
34 #include <linux/gfp.h>
36 #include <net/sctp/sctp.h>
37 #include <net/sctp/sm.h>
38 #include <net/sctp/stream_sched.h>
40 static int sctp_cmd_interpreter(enum sctp_event_type event_type
,
41 union sctp_subtype subtype
,
42 enum sctp_state state
,
43 struct sctp_endpoint
*ep
,
44 struct sctp_association
*asoc
,
46 enum sctp_disposition status
,
47 struct sctp_cmd_seq
*commands
,
49 static int sctp_side_effects(enum sctp_event_type event_type
,
50 union sctp_subtype subtype
,
51 enum sctp_state state
,
52 struct sctp_endpoint
*ep
,
53 struct sctp_association
**asoc
,
55 enum sctp_disposition status
,
56 struct sctp_cmd_seq
*commands
,
59 /********************************************************************
61 ********************************************************************/
63 /* A helper function for delayed processing of INET ECN CE bit. */
64 static void sctp_do_ecn_ce_work(struct sctp_association
*asoc
,
67 /* Save the TSN away for comparison when we receive CWR */
69 asoc
->last_ecne_tsn
= lowest_tsn
;
73 /* Helper function for delayed processing of SCTP ECNE chunk. */
74 /* RFC 2960 Appendix A
76 * RFC 2481 details a specific bit for a sender to send in
77 * the header of its next outbound TCP segment to indicate to
78 * its peer that it has reduced its congestion window. This
79 * is termed the CWR bit. For SCTP the same indication is made
80 * by including the CWR chunk. This chunk contains one data
81 * element, i.e. the TSN number that was sent in the ECNE chunk.
82 * This element represents the lowest TSN number in the datagram
83 * that was originally marked with the CE bit.
85 static struct sctp_chunk
*sctp_do_ecn_ecne_work(struct sctp_association
*asoc
,
87 struct sctp_chunk
*chunk
)
89 struct sctp_chunk
*repl
;
91 /* Our previously transmitted packet ran into some congestion
92 * so we should take action by reducing cwnd and ssthresh
93 * and then ACK our peer that we we've done so by
97 /* First, try to determine if we want to actually lower
98 * our cwnd variables. Only lower them if the ECNE looks more
99 * recent than the last response.
101 if (TSN_lt(asoc
->last_cwr_tsn
, lowest_tsn
)) {
102 struct sctp_transport
*transport
;
104 /* Find which transport's congestion variables
105 * need to be adjusted.
107 transport
= sctp_assoc_lookup_tsn(asoc
, lowest_tsn
);
109 /* Update the congestion variables. */
111 sctp_transport_lower_cwnd(transport
,
112 SCTP_LOWER_CWND_ECNE
);
113 asoc
->last_cwr_tsn
= lowest_tsn
;
116 /* Always try to quiet the other end. In case of lost CWR,
117 * resend last_cwr_tsn.
119 repl
= sctp_make_cwr(asoc
, asoc
->last_cwr_tsn
, chunk
);
121 /* If we run out of memory, it will look like a lost CWR. We'll
122 * get back in sync eventually.
127 /* Helper function to do delayed processing of ECN CWR chunk. */
128 static void sctp_do_ecn_cwr_work(struct sctp_association
*asoc
,
131 /* Turn off ECNE getting auto-prepended to every outgoing
137 /* Generate SACK if necessary. We call this at the end of a packet. */
138 static int sctp_gen_sack(struct sctp_association
*asoc
, int force
,
139 struct sctp_cmd_seq
*commands
)
141 struct sctp_transport
*trans
= asoc
->peer
.last_data_from
;
142 __u32 ctsn
, max_tsn_seen
;
143 struct sctp_chunk
*sack
;
147 (!trans
&& (asoc
->param_flags
& SPP_SACKDELAY_DISABLE
)) ||
148 (trans
&& (trans
->param_flags
& SPP_SACKDELAY_DISABLE
)))
149 asoc
->peer
.sack_needed
= 1;
151 ctsn
= sctp_tsnmap_get_ctsn(&asoc
->peer
.tsn_map
);
152 max_tsn_seen
= sctp_tsnmap_get_max_tsn_seen(&asoc
->peer
.tsn_map
);
154 /* From 12.2 Parameters necessary per association (i.e. the TCB):
156 * Ack State : This flag indicates if the next received packet
157 * : is to be responded to with a SACK. ...
158 * : When DATA chunks are out of order, SACK's
159 * : are not delayed (see Section 6).
161 * [This is actually not mentioned in Section 6, but we
162 * implement it here anyway. --piggy]
164 if (max_tsn_seen
!= ctsn
)
165 asoc
->peer
.sack_needed
= 1;
167 /* From 6.2 Acknowledgement on Reception of DATA Chunks:
169 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
170 * an acknowledgement SHOULD be generated for at least every
171 * second packet (not every second DATA chunk) received, and
172 * SHOULD be generated within 200 ms of the arrival of any
173 * unacknowledged DATA chunk. ...
175 if (!asoc
->peer
.sack_needed
) {
176 asoc
->peer
.sack_cnt
++;
178 /* Set the SACK delay timeout based on the
179 * SACK delay for the last transport
180 * data was received from, or the default
181 * for the association.
184 /* We will need a SACK for the next packet. */
185 if (asoc
->peer
.sack_cnt
>= trans
->sackfreq
- 1)
186 asoc
->peer
.sack_needed
= 1;
188 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_SACK
] =
191 /* We will need a SACK for the next packet. */
192 if (asoc
->peer
.sack_cnt
>= asoc
->sackfreq
- 1)
193 asoc
->peer
.sack_needed
= 1;
195 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_SACK
] =
199 /* Restart the SACK timer. */
200 sctp_add_cmd_sf(commands
, SCTP_CMD_TIMER_RESTART
,
201 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK
));
203 __u32 old_a_rwnd
= asoc
->a_rwnd
;
205 asoc
->a_rwnd
= asoc
->rwnd
;
206 sack
= sctp_make_sack(asoc
);
208 asoc
->a_rwnd
= old_a_rwnd
;
212 asoc
->peer
.sack_needed
= 0;
213 asoc
->peer
.sack_cnt
= 0;
215 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
, SCTP_CHUNK(sack
));
217 /* Stop the SACK timer. */
218 sctp_add_cmd_sf(commands
, SCTP_CMD_TIMER_STOP
,
219 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK
));
228 /* When the T3-RTX timer expires, it calls this function to create the
229 * relevant state machine event.
231 void sctp_generate_t3_rtx_event(struct timer_list
*t
)
233 struct sctp_transport
*transport
=
234 from_timer(transport
, t
, T3_rtx_timer
);
235 struct sctp_association
*asoc
= transport
->asoc
;
236 struct sock
*sk
= asoc
->base
.sk
;
237 struct net
*net
= sock_net(sk
);
240 /* Check whether a task is in the sock. */
243 if (sock_owned_by_user(sk
)) {
244 pr_debug("%s: sock is busy\n", __func__
);
246 /* Try again later. */
247 if (!mod_timer(&transport
->T3_rtx_timer
, jiffies
+ (HZ
/20)))
248 sctp_transport_hold(transport
);
252 /* Run through the state machine. */
253 error
= sctp_do_sm(net
, SCTP_EVENT_T_TIMEOUT
,
254 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX
),
257 transport
, GFP_ATOMIC
);
264 sctp_transport_put(transport
);
267 /* This is a sa interface for producing timeout events. It works
268 * for timeouts which use the association as their parameter.
270 static void sctp_generate_timeout_event(struct sctp_association
*asoc
,
271 enum sctp_event_timeout timeout_type
)
273 struct sock
*sk
= asoc
->base
.sk
;
274 struct net
*net
= sock_net(sk
);
278 if (sock_owned_by_user(sk
)) {
279 pr_debug("%s: sock is busy: timer %d\n", __func__
,
282 /* Try again later. */
283 if (!mod_timer(&asoc
->timers
[timeout_type
], jiffies
+ (HZ
/20)))
284 sctp_association_hold(asoc
);
288 /* Is this association really dead and just waiting around for
289 * the timer to let go of the reference?
294 /* Run through the state machine. */
295 error
= sctp_do_sm(net
, SCTP_EVENT_T_TIMEOUT
,
296 SCTP_ST_TIMEOUT(timeout_type
),
297 asoc
->state
, asoc
->ep
, asoc
,
298 (void *)timeout_type
, GFP_ATOMIC
);
305 sctp_association_put(asoc
);
308 static void sctp_generate_t1_cookie_event(struct timer_list
*t
)
310 struct sctp_association
*asoc
=
311 from_timer(asoc
, t
, timers
[SCTP_EVENT_TIMEOUT_T1_COOKIE
]);
313 sctp_generate_timeout_event(asoc
, SCTP_EVENT_TIMEOUT_T1_COOKIE
);
316 static void sctp_generate_t1_init_event(struct timer_list
*t
)
318 struct sctp_association
*asoc
=
319 from_timer(asoc
, t
, timers
[SCTP_EVENT_TIMEOUT_T1_INIT
]);
321 sctp_generate_timeout_event(asoc
, SCTP_EVENT_TIMEOUT_T1_INIT
);
324 static void sctp_generate_t2_shutdown_event(struct timer_list
*t
)
326 struct sctp_association
*asoc
=
327 from_timer(asoc
, t
, timers
[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
]);
329 sctp_generate_timeout_event(asoc
, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
);
332 static void sctp_generate_t4_rto_event(struct timer_list
*t
)
334 struct sctp_association
*asoc
=
335 from_timer(asoc
, t
, timers
[SCTP_EVENT_TIMEOUT_T4_RTO
]);
337 sctp_generate_timeout_event(asoc
, SCTP_EVENT_TIMEOUT_T4_RTO
);
340 static void sctp_generate_t5_shutdown_guard_event(struct timer_list
*t
)
342 struct sctp_association
*asoc
=
344 timers
[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
]);
346 sctp_generate_timeout_event(asoc
,
347 SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
);
349 } /* sctp_generate_t5_shutdown_guard_event() */
351 static void sctp_generate_autoclose_event(struct timer_list
*t
)
353 struct sctp_association
*asoc
=
354 from_timer(asoc
, t
, timers
[SCTP_EVENT_TIMEOUT_AUTOCLOSE
]);
356 sctp_generate_timeout_event(asoc
, SCTP_EVENT_TIMEOUT_AUTOCLOSE
);
359 /* Generate a heart beat event. If the sock is busy, reschedule. Make
360 * sure that the transport is still valid.
362 void sctp_generate_heartbeat_event(struct timer_list
*t
)
364 struct sctp_transport
*transport
= from_timer(transport
, t
, hb_timer
);
365 struct sctp_association
*asoc
= transport
->asoc
;
366 struct sock
*sk
= asoc
->base
.sk
;
367 struct net
*net
= sock_net(sk
);
368 u32 elapsed
, timeout
;
372 if (sock_owned_by_user(sk
)) {
373 pr_debug("%s: sock is busy\n", __func__
);
375 /* Try again later. */
376 if (!mod_timer(&transport
->hb_timer
, jiffies
+ (HZ
/20)))
377 sctp_transport_hold(transport
);
381 /* Check if we should still send the heartbeat or reschedule */
382 elapsed
= jiffies
- transport
->last_time_sent
;
383 timeout
= sctp_transport_timeout(transport
);
384 if (elapsed
< timeout
) {
385 elapsed
= timeout
- elapsed
;
386 if (!mod_timer(&transport
->hb_timer
, jiffies
+ elapsed
))
387 sctp_transport_hold(transport
);
391 error
= sctp_do_sm(net
, SCTP_EVENT_T_TIMEOUT
,
392 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT
),
393 asoc
->state
, asoc
->ep
, asoc
,
394 transport
, GFP_ATOMIC
);
401 sctp_transport_put(transport
);
404 /* Handle the timeout of the ICMP protocol unreachable timer. Trigger
405 * the correct state machine transition that will close the association.
407 void sctp_generate_proto_unreach_event(struct timer_list
*t
)
409 struct sctp_transport
*transport
=
410 from_timer(transport
, t
, proto_unreach_timer
);
411 struct sctp_association
*asoc
= transport
->asoc
;
412 struct sock
*sk
= asoc
->base
.sk
;
413 struct net
*net
= sock_net(sk
);
416 if (sock_owned_by_user(sk
)) {
417 pr_debug("%s: sock is busy\n", __func__
);
419 /* Try again later. */
420 if (!mod_timer(&transport
->proto_unreach_timer
,
422 sctp_transport_hold(transport
);
426 /* Is this structure just waiting around for us to actually
432 sctp_do_sm(net
, SCTP_EVENT_T_OTHER
,
433 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH
),
434 asoc
->state
, asoc
->ep
, asoc
, transport
, GFP_ATOMIC
);
438 sctp_transport_put(transport
);
441 /* Handle the timeout of the RE-CONFIG timer. */
442 void sctp_generate_reconf_event(struct timer_list
*t
)
444 struct sctp_transport
*transport
=
445 from_timer(transport
, t
, reconf_timer
);
446 struct sctp_association
*asoc
= transport
->asoc
;
447 struct sock
*sk
= asoc
->base
.sk
;
448 struct net
*net
= sock_net(sk
);
452 if (sock_owned_by_user(sk
)) {
453 pr_debug("%s: sock is busy\n", __func__
);
455 /* Try again later. */
456 if (!mod_timer(&transport
->reconf_timer
, jiffies
+ (HZ
/ 20)))
457 sctp_transport_hold(transport
);
461 /* This happens when the response arrives after the timer is triggered. */
462 if (!asoc
->strreset_chunk
)
465 error
= sctp_do_sm(net
, SCTP_EVENT_T_TIMEOUT
,
466 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF
),
467 asoc
->state
, asoc
->ep
, asoc
,
468 transport
, GFP_ATOMIC
);
475 sctp_transport_put(transport
);
478 /* Handle the timeout of the probe timer. */
479 void sctp_generate_probe_event(struct timer_list
*t
)
481 struct sctp_transport
*transport
= from_timer(transport
, t
, probe_timer
);
482 struct sctp_association
*asoc
= transport
->asoc
;
483 struct sock
*sk
= asoc
->base
.sk
;
484 struct net
*net
= sock_net(sk
);
488 if (sock_owned_by_user(sk
)) {
489 pr_debug("%s: sock is busy\n", __func__
);
491 /* Try again later. */
492 if (!mod_timer(&transport
->probe_timer
, jiffies
+ (HZ
/ 20)))
493 sctp_transport_hold(transport
);
497 error
= sctp_do_sm(net
, SCTP_EVENT_T_TIMEOUT
,
498 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_PROBE
),
499 asoc
->state
, asoc
->ep
, asoc
,
500 transport
, GFP_ATOMIC
);
507 sctp_transport_put(transport
);
510 /* Inject a SACK Timeout event into the state machine. */
511 static void sctp_generate_sack_event(struct timer_list
*t
)
513 struct sctp_association
*asoc
=
514 from_timer(asoc
, t
, timers
[SCTP_EVENT_TIMEOUT_SACK
]);
516 sctp_generate_timeout_event(asoc
, SCTP_EVENT_TIMEOUT_SACK
);
519 sctp_timer_event_t
*sctp_timer_events
[SCTP_NUM_TIMEOUT_TYPES
] = {
520 [SCTP_EVENT_TIMEOUT_NONE
] = NULL
,
521 [SCTP_EVENT_TIMEOUT_T1_COOKIE
] = sctp_generate_t1_cookie_event
,
522 [SCTP_EVENT_TIMEOUT_T1_INIT
] = sctp_generate_t1_init_event
,
523 [SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
] = sctp_generate_t2_shutdown_event
,
524 [SCTP_EVENT_TIMEOUT_T3_RTX
] = NULL
,
525 [SCTP_EVENT_TIMEOUT_T4_RTO
] = sctp_generate_t4_rto_event
,
526 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
] =
527 sctp_generate_t5_shutdown_guard_event
,
528 [SCTP_EVENT_TIMEOUT_HEARTBEAT
] = NULL
,
529 [SCTP_EVENT_TIMEOUT_RECONF
] = NULL
,
530 [SCTP_EVENT_TIMEOUT_SACK
] = sctp_generate_sack_event
,
531 [SCTP_EVENT_TIMEOUT_AUTOCLOSE
] = sctp_generate_autoclose_event
,
535 /* RFC 2960 8.2 Path Failure Detection
537 * When its peer endpoint is multi-homed, an endpoint should keep a
538 * error counter for each of the destination transport addresses of the
541 * Each time the T3-rtx timer expires on any address, or when a
542 * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
543 * the error counter of that destination address will be incremented.
544 * When the value in the error counter exceeds the protocol parameter
545 * 'Path.Max.Retrans' of that destination address, the endpoint should
546 * mark the destination transport address as inactive, and a
547 * notification SHOULD be sent to the upper layer.
550 static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq
*commands
,
551 struct sctp_association
*asoc
,
552 struct sctp_transport
*transport
,
555 /* The check for association's overall error counter exceeding the
556 * threshold is done in the state function.
558 /* We are here due to a timer expiration. If the timer was
559 * not a HEARTBEAT, then normal error tracking is done.
560 * If the timer was a heartbeat, we only increment error counts
561 * when we already have an outstanding HEARTBEAT that has not
563 * Additionally, some tranport states inhibit error increments.
566 asoc
->overall_error_count
++;
567 if (transport
->state
!= SCTP_INACTIVE
)
568 transport
->error_count
++;
569 } else if (transport
->hb_sent
) {
570 if (transport
->state
!= SCTP_UNCONFIRMED
)
571 asoc
->overall_error_count
++;
572 if (transport
->state
!= SCTP_INACTIVE
)
573 transport
->error_count
++;
576 /* If the transport error count is greater than the pf_retrans
577 * threshold, and less than pathmaxrtx, and if the current state
578 * is SCTP_ACTIVE, then mark this transport as Partially Failed,
579 * see SCTP Quick Failover Draft, section 5.1
581 if (asoc
->base
.net
->sctp
.pf_enable
&&
582 transport
->state
== SCTP_ACTIVE
&&
583 transport
->error_count
< transport
->pathmaxrxt
&&
584 transport
->error_count
> transport
->pf_retrans
) {
586 sctp_assoc_control_transport(asoc
, transport
,
590 /* Update the hb timer to resend a heartbeat every rto */
591 sctp_transport_reset_hb_timer(transport
);
594 if (transport
->state
!= SCTP_INACTIVE
&&
595 (transport
->error_count
> transport
->pathmaxrxt
)) {
596 pr_debug("%s: association:%p transport addr:%pISpc failed\n",
597 __func__
, asoc
, &transport
->ipaddr
.sa
);
599 sctp_assoc_control_transport(asoc
, transport
,
601 SCTP_FAILED_THRESHOLD
);
604 if (transport
->error_count
> transport
->ps_retrans
&&
605 asoc
->peer
.primary_path
== transport
&&
606 asoc
->peer
.active_path
!= transport
)
607 sctp_assoc_set_primary(asoc
, asoc
->peer
.active_path
);
609 /* E2) For the destination address for which the timer
610 * expires, set RTO <- RTO * 2 ("back off the timer"). The
611 * maximum value discussed in rule C7 above (RTO.max) may be
612 * used to provide an upper bound to this doubling operation.
614 * Special Case: the first HB doesn't trigger exponential backoff.
615 * The first unacknowledged HB triggers it. We do this with a flag
616 * that indicates that we have an outstanding HB.
618 if (!is_hb
|| transport
->hb_sent
) {
619 transport
->rto
= min((transport
->rto
* 2), transport
->asoc
->rto_max
);
620 sctp_max_rto(asoc
, transport
);
624 /* Worker routine to handle INIT command failure. */
625 static void sctp_cmd_init_failed(struct sctp_cmd_seq
*commands
,
626 struct sctp_association
*asoc
,
629 struct sctp_ulpevent
*event
;
631 event
= sctp_ulpevent_make_assoc_change(asoc
, 0, SCTP_CANT_STR_ASSOC
,
632 (__u16
)error
, 0, 0, NULL
,
636 sctp_add_cmd_sf(commands
, SCTP_CMD_EVENT_ULP
,
637 SCTP_ULPEVENT(event
));
639 sctp_add_cmd_sf(commands
, SCTP_CMD_NEW_STATE
,
640 SCTP_STATE(SCTP_STATE_CLOSED
));
642 /* SEND_FAILED sent later when cleaning up the association. */
643 asoc
->outqueue
.error
= error
;
644 sctp_add_cmd_sf(commands
, SCTP_CMD_DELETE_TCB
, SCTP_NULL());
647 /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
648 static void sctp_cmd_assoc_failed(struct sctp_cmd_seq
*commands
,
649 struct sctp_association
*asoc
,
650 enum sctp_event_type event_type
,
651 union sctp_subtype subtype
,
652 struct sctp_chunk
*chunk
,
655 struct sctp_ulpevent
*event
;
656 struct sctp_chunk
*abort
;
658 /* Cancel any partial delivery in progress. */
659 asoc
->stream
.si
->abort_pd(&asoc
->ulpq
, GFP_ATOMIC
);
661 if (event_type
== SCTP_EVENT_T_CHUNK
&& subtype
.chunk
== SCTP_CID_ABORT
)
662 event
= sctp_ulpevent_make_assoc_change(asoc
, 0, SCTP_COMM_LOST
,
663 (__u16
)error
, 0, 0, chunk
,
666 event
= sctp_ulpevent_make_assoc_change(asoc
, 0, SCTP_COMM_LOST
,
667 (__u16
)error
, 0, 0, NULL
,
670 sctp_add_cmd_sf(commands
, SCTP_CMD_EVENT_ULP
,
671 SCTP_ULPEVENT(event
));
673 if (asoc
->overall_error_count
>= asoc
->max_retrans
) {
674 abort
= sctp_make_violation_max_retrans(asoc
, chunk
);
676 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
,
680 sctp_add_cmd_sf(commands
, SCTP_CMD_NEW_STATE
,
681 SCTP_STATE(SCTP_STATE_CLOSED
));
683 /* SEND_FAILED sent later when cleaning up the association. */
684 asoc
->outqueue
.error
= error
;
685 sctp_add_cmd_sf(commands
, SCTP_CMD_DELETE_TCB
, SCTP_NULL());
688 /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
689 * inside the cookie. In reality, this is only used for INIT-ACK processing
690 * since all other cases use "temporary" associations and can do all
691 * their work in statefuns directly.
693 static int sctp_cmd_process_init(struct sctp_cmd_seq
*commands
,
694 struct sctp_association
*asoc
,
695 struct sctp_chunk
*chunk
,
696 struct sctp_init_chunk
*peer_init
,
701 /* We only process the init as a sideeffect in a single
702 * case. This is when we process the INIT-ACK. If we
703 * fail during INIT processing (due to malloc problems),
704 * just return the error and stop processing the stack.
706 if (!sctp_process_init(asoc
, chunk
, sctp_source(chunk
), peer_init
, gfp
))
714 /* Helper function to break out starting up of heartbeat timers. */
715 static void sctp_cmd_hb_timers_start(struct sctp_cmd_seq
*cmds
,
716 struct sctp_association
*asoc
)
718 struct sctp_transport
*t
;
720 /* Start a heartbeat timer for each transport on the association.
721 * hold a reference on the transport to make sure none of
722 * the needed data structures go away.
724 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
, transports
)
725 sctp_transport_reset_hb_timer(t
);
728 static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq
*cmds
,
729 struct sctp_association
*asoc
)
731 struct sctp_transport
*t
;
733 /* Stop all heartbeat timers. */
735 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
737 if (del_timer(&t
->hb_timer
))
738 sctp_transport_put(t
);
742 /* Helper function to stop any pending T3-RTX timers */
743 static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq
*cmds
,
744 struct sctp_association
*asoc
)
746 struct sctp_transport
*t
;
748 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
750 if (del_timer(&t
->T3_rtx_timer
))
751 sctp_transport_put(t
);
756 /* Helper function to handle the reception of an HEARTBEAT ACK. */
757 static void sctp_cmd_transport_on(struct sctp_cmd_seq
*cmds
,
758 struct sctp_association
*asoc
,
759 struct sctp_transport
*t
,
760 struct sctp_chunk
*chunk
)
762 struct sctp_sender_hb_info
*hbinfo
;
763 int was_unconfirmed
= 0;
765 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
766 * HEARTBEAT should clear the error counter of the destination
767 * transport address to which the HEARTBEAT was sent.
772 * Although RFC4960 specifies that the overall error count must
773 * be cleared when a HEARTBEAT ACK is received, we make an
774 * exception while in SHUTDOWN PENDING. If the peer keeps its
775 * window shut forever, we may never be able to transmit our
776 * outstanding data and rely on the retransmission limit be reached
777 * to shutdown the association.
779 if (t
->asoc
->state
< SCTP_STATE_SHUTDOWN_PENDING
)
780 t
->asoc
->overall_error_count
= 0;
782 /* Clear the hb_sent flag to signal that we had a good
787 /* Mark the destination transport address as active if it is not so
790 if ((t
->state
== SCTP_INACTIVE
) || (t
->state
== SCTP_UNCONFIRMED
)) {
792 sctp_assoc_control_transport(asoc
, t
, SCTP_TRANSPORT_UP
,
793 SCTP_HEARTBEAT_SUCCESS
);
796 if (t
->state
== SCTP_PF
)
797 sctp_assoc_control_transport(asoc
, t
, SCTP_TRANSPORT_UP
,
798 SCTP_HEARTBEAT_SUCCESS
);
800 /* HB-ACK was received for a the proper HB. Consider this
804 sctp_transport_dst_confirm(t
);
806 /* The receiver of the HEARTBEAT ACK should also perform an
807 * RTT measurement for that destination transport address
808 * using the time value carried in the HEARTBEAT ACK chunk.
809 * If the transport's rto_pending variable has been cleared,
810 * it was most likely due to a retransmit. However, we want
811 * to re-enable it to properly update the rto.
813 if (t
->rto_pending
== 0)
816 hbinfo
= (struct sctp_sender_hb_info
*)chunk
->skb
->data
;
817 sctp_transport_update_rto(t
, (jiffies
- hbinfo
->sent_at
));
819 /* Update the heartbeat timer. */
820 sctp_transport_reset_hb_timer(t
);
822 if (was_unconfirmed
&& asoc
->peer
.transport_count
== 1)
823 sctp_transport_immediate_rtx(t
);
827 /* Helper function to process the process SACK command. */
828 static int sctp_cmd_process_sack(struct sctp_cmd_seq
*cmds
,
829 struct sctp_association
*asoc
,
830 struct sctp_chunk
*chunk
)
834 if (sctp_outq_sack(&asoc
->outqueue
, chunk
)) {
835 /* There are no more TSNs awaiting SACK. */
836 err
= sctp_do_sm(asoc
->base
.net
, SCTP_EVENT_T_OTHER
,
837 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN
),
838 asoc
->state
, asoc
->ep
, asoc
, NULL
,
845 /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
846 * the transport for a shutdown chunk.
848 static void sctp_cmd_setup_t2(struct sctp_cmd_seq
*cmds
,
849 struct sctp_association
*asoc
,
850 struct sctp_chunk
*chunk
)
852 struct sctp_transport
*t
;
854 if (chunk
->transport
)
855 t
= chunk
->transport
;
857 t
= sctp_assoc_choose_alter_transport(asoc
,
858 asoc
->shutdown_last_sent_to
);
859 chunk
->transport
= t
;
861 asoc
->shutdown_last_sent_to
= t
;
862 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
] = t
->rto
;
865 /* Helper function to change the state of an association. */
866 static void sctp_cmd_new_state(struct sctp_cmd_seq
*cmds
,
867 struct sctp_association
*asoc
,
868 enum sctp_state state
)
870 struct sock
*sk
= asoc
->base
.sk
;
874 pr_debug("%s: asoc:%p[%s]\n", __func__
, asoc
, sctp_state_tbl
[state
]);
876 if (sctp_style(sk
, TCP
)) {
877 /* Change the sk->sk_state of a TCP-style socket that has
878 * successfully completed a connect() call.
880 if (sctp_state(asoc
, ESTABLISHED
) && sctp_sstate(sk
, CLOSED
))
881 inet_sk_set_state(sk
, SCTP_SS_ESTABLISHED
);
883 /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
884 if (sctp_state(asoc
, SHUTDOWN_RECEIVED
) &&
885 sctp_sstate(sk
, ESTABLISHED
)) {
886 inet_sk_set_state(sk
, SCTP_SS_CLOSING
);
887 sk
->sk_shutdown
|= RCV_SHUTDOWN
;
891 if (sctp_state(asoc
, COOKIE_WAIT
)) {
892 /* Reset init timeouts since they may have been
893 * increased due to timer expirations.
895 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T1_INIT
] =
897 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T1_COOKIE
] =
901 if (sctp_state(asoc
, ESTABLISHED
)) {
902 kfree(asoc
->peer
.cookie
);
903 asoc
->peer
.cookie
= NULL
;
906 if (sctp_state(asoc
, ESTABLISHED
) ||
907 sctp_state(asoc
, CLOSED
) ||
908 sctp_state(asoc
, SHUTDOWN_RECEIVED
)) {
909 /* Wake up any processes waiting in the asoc's wait queue in
910 * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
912 if (waitqueue_active(&asoc
->wait
))
913 wake_up_interruptible(&asoc
->wait
);
915 /* Wake up any processes waiting in the sk's sleep queue of
916 * a TCP-style or UDP-style peeled-off socket in
917 * sctp_wait_for_accept() or sctp_wait_for_packet().
918 * For a UDP-style socket, the waiters are woken up by the
921 if (!sctp_style(sk
, UDP
))
922 sk
->sk_state_change(sk
);
925 if (sctp_state(asoc
, SHUTDOWN_PENDING
) &&
926 !sctp_outq_is_empty(&asoc
->outqueue
))
927 sctp_outq_uncork(&asoc
->outqueue
, GFP_ATOMIC
);
930 /* Helper function to delete an association. */
931 static void sctp_cmd_delete_tcb(struct sctp_cmd_seq
*cmds
,
932 struct sctp_association
*asoc
)
934 struct sock
*sk
= asoc
->base
.sk
;
936 /* If it is a non-temporary association belonging to a TCP-style
937 * listening socket that is not closed, do not free it so that accept()
938 * can pick it up later.
940 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
) &&
941 (!asoc
->temp
) && (sk
->sk_shutdown
!= SHUTDOWN_MASK
))
944 sctp_association_free(asoc
);
948 * ADDIP Section 4.1 ASCONF Chunk Procedures
949 * A4) Start a T-4 RTO timer, using the RTO value of the selected
950 * destination address (we use active path instead of primary path just
951 * because primary path may be inactive.
953 static void sctp_cmd_setup_t4(struct sctp_cmd_seq
*cmds
,
954 struct sctp_association
*asoc
,
955 struct sctp_chunk
*chunk
)
957 struct sctp_transport
*t
;
959 t
= sctp_assoc_choose_alter_transport(asoc
, chunk
->transport
);
960 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T4_RTO
] = t
->rto
;
961 chunk
->transport
= t
;
964 /* Process an incoming Operation Error Chunk. */
965 static void sctp_cmd_process_operr(struct sctp_cmd_seq
*cmds
,
966 struct sctp_association
*asoc
,
967 struct sctp_chunk
*chunk
)
969 struct sctp_errhdr
*err_hdr
;
970 struct sctp_ulpevent
*ev
;
972 while (chunk
->chunk_end
> chunk
->skb
->data
) {
973 err_hdr
= (struct sctp_errhdr
*)(chunk
->skb
->data
);
975 ev
= sctp_ulpevent_make_remote_error(asoc
, chunk
, 0,
980 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
, ev
);
982 switch (err_hdr
->cause
) {
983 case SCTP_ERROR_UNKNOWN_CHUNK
:
985 struct sctp_chunkhdr
*unk_chunk_hdr
;
987 unk_chunk_hdr
= (struct sctp_chunkhdr
*)(err_hdr
+ 1);
988 switch (unk_chunk_hdr
->type
) {
989 /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
990 * an ERROR chunk reporting that it did not recognized
991 * the ASCONF chunk type, the sender of the ASCONF MUST
992 * NOT send any further ASCONF chunks and MUST stop its
995 case SCTP_CID_ASCONF
:
996 if (asoc
->peer
.asconf_capable
== 0)
999 asoc
->peer
.asconf_capable
= 0;
1000 sctp_add_cmd_sf(cmds
, SCTP_CMD_TIMER_STOP
,
1001 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO
));
1014 /* Helper function to remove the association non-primary peer
1017 static void sctp_cmd_del_non_primary(struct sctp_association
*asoc
)
1019 struct sctp_transport
*t
;
1020 struct list_head
*temp
;
1021 struct list_head
*pos
;
1023 list_for_each_safe(pos
, temp
, &asoc
->peer
.transport_addr_list
) {
1024 t
= list_entry(pos
, struct sctp_transport
, transports
);
1025 if (!sctp_cmp_addr_exact(&t
->ipaddr
,
1026 &asoc
->peer
.primary_addr
)) {
1027 sctp_assoc_rm_peer(asoc
, t
);
1032 /* Helper function to set sk_err on a 1-1 style socket. */
1033 static void sctp_cmd_set_sk_err(struct sctp_association
*asoc
, int error
)
1035 struct sock
*sk
= asoc
->base
.sk
;
1037 if (!sctp_style(sk
, UDP
))
1041 /* Helper function to generate an association change event */
1042 static void sctp_cmd_assoc_change(struct sctp_cmd_seq
*commands
,
1043 struct sctp_association
*asoc
,
1046 struct sctp_ulpevent
*ev
;
1048 ev
= sctp_ulpevent_make_assoc_change(asoc
, 0, state
, 0,
1049 asoc
->c
.sinit_num_ostreams
,
1050 asoc
->c
.sinit_max_instreams
,
1053 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
, ev
);
1056 static void sctp_cmd_peer_no_auth(struct sctp_cmd_seq
*commands
,
1057 struct sctp_association
*asoc
)
1059 struct sctp_ulpevent
*ev
;
1061 ev
= sctp_ulpevent_make_authkey(asoc
, 0, SCTP_AUTH_NO_AUTH
, GFP_ATOMIC
);
1063 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
, ev
);
1066 /* Helper function to generate an adaptation indication event */
1067 static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq
*commands
,
1068 struct sctp_association
*asoc
)
1070 struct sctp_ulpevent
*ev
;
1072 ev
= sctp_ulpevent_make_adaptation_indication(asoc
, GFP_ATOMIC
);
1075 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
, ev
);
1079 static void sctp_cmd_t1_timer_update(struct sctp_association
*asoc
,
1080 enum sctp_event_timeout timer
,
1083 struct sctp_transport
*t
;
1085 t
= asoc
->init_last_sent_to
;
1086 asoc
->init_err_counter
++;
1088 if (t
->init_sent_count
> (asoc
->init_cycle
+ 1)) {
1089 asoc
->timeouts
[timer
] *= 2;
1090 if (asoc
->timeouts
[timer
] > asoc
->max_init_timeo
) {
1091 asoc
->timeouts
[timer
] = asoc
->max_init_timeo
;
1095 pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d"
1096 " cycle:%d timeout:%ld\n", __func__
, name
,
1097 asoc
->init_err_counter
, asoc
->init_cycle
,
1098 asoc
->timeouts
[timer
]);
1103 /* Send the whole message, chunk by chunk, to the outqueue.
1104 * This way the whole message is queued up and bundling if
1105 * encouraged for small fragments.
1107 static void sctp_cmd_send_msg(struct sctp_association
*asoc
,
1108 struct sctp_datamsg
*msg
, gfp_t gfp
)
1110 struct sctp_chunk
*chunk
;
1112 list_for_each_entry(chunk
, &msg
->chunks
, frag_list
)
1113 sctp_outq_tail(&asoc
->outqueue
, chunk
, gfp
);
1115 asoc
->outqueue
.sched
->enqueue(&asoc
->outqueue
, msg
);
1119 /* These three macros allow us to pull the debugging code out of the
1120 * main flow of sctp_do_sm() to keep attention focused on the real
1121 * functionality there.
1123 #define debug_pre_sfn() \
1124 pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \
1125 ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \
1126 asoc, sctp_state_tbl[state], state_fn->name)
1128 #define debug_post_sfn() \
1129 pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \
1130 sctp_status_tbl[status])
1132 #define debug_post_sfx() \
1133 pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \
1134 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
1135 sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED])
1138 * This is the master state machine processing function.
1140 * If you want to understand all of lksctp, this is a
1141 * good place to start.
1143 int sctp_do_sm(struct net
*net
, enum sctp_event_type event_type
,
1144 union sctp_subtype subtype
, enum sctp_state state
,
1145 struct sctp_endpoint
*ep
, struct sctp_association
*asoc
,
1146 void *event_arg
, gfp_t gfp
)
1148 typedef const char *(printfn_t
)(union sctp_subtype
);
1149 static printfn_t
*table
[] = {
1150 NULL
, sctp_cname
, sctp_tname
, sctp_oname
, sctp_pname
,
1152 printfn_t
*debug_fn
__attribute__ ((unused
)) = table
[event_type
];
1153 const struct sctp_sm_table_entry
*state_fn
;
1154 struct sctp_cmd_seq commands
;
1155 enum sctp_disposition status
;
1158 /* Look up the state function, run it, and then process the
1159 * side effects. These three steps are the heart of lksctp.
1161 state_fn
= sctp_sm_lookup_event(net
, event_type
, state
, subtype
);
1163 sctp_init_cmd_seq(&commands
);
1166 status
= state_fn
->fn(net
, ep
, asoc
, subtype
, event_arg
, &commands
);
1169 error
= sctp_side_effects(event_type
, subtype
, state
,
1170 ep
, &asoc
, event_arg
, status
,
1177 /*****************************************************************
1178 * This the master state function side effect processing function.
1179 *****************************************************************/
1180 static int sctp_side_effects(enum sctp_event_type event_type
,
1181 union sctp_subtype subtype
,
1182 enum sctp_state state
,
1183 struct sctp_endpoint
*ep
,
1184 struct sctp_association
**asoc
,
1186 enum sctp_disposition status
,
1187 struct sctp_cmd_seq
*commands
,
1192 /* FIXME - Most of the dispositions left today would be categorized
1193 * as "exceptional" dispositions. For those dispositions, it
1194 * may not be proper to run through any of the commands at all.
1195 * For example, the command interpreter might be run only with
1196 * disposition SCTP_DISPOSITION_CONSUME.
1198 if (0 != (error
= sctp_cmd_interpreter(event_type
, subtype
, state
,
1205 case SCTP_DISPOSITION_DISCARD
:
1206 pr_debug("%s: ignored sctp protocol event - state:%d, "
1207 "event_type:%d, event_id:%d\n", __func__
, state
,
1208 event_type
, subtype
.chunk
);
1211 case SCTP_DISPOSITION_NOMEM
:
1212 /* We ran out of memory, so we need to discard this
1215 /* BUG--we should now recover some memory, probably by
1221 case SCTP_DISPOSITION_DELETE_TCB
:
1222 case SCTP_DISPOSITION_ABORT
:
1223 /* This should now be a command. */
1227 case SCTP_DISPOSITION_CONSUME
:
1229 * We should no longer have much work to do here as the
1230 * real work has been done as explicit commands above.
1234 case SCTP_DISPOSITION_VIOLATION
:
1235 net_err_ratelimited("protocol violation state %d chunkid %d\n",
1236 state
, subtype
.chunk
);
1239 case SCTP_DISPOSITION_NOT_IMPL
:
1240 pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
1241 state
, event_type
, subtype
.chunk
);
1244 case SCTP_DISPOSITION_BUG
:
1245 pr_err("bug in state %d, event_type %d, event_id %d\n",
1246 state
, event_type
, subtype
.chunk
);
1251 pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
1252 status
, state
, event_type
, subtype
.chunk
);
1264 /********************************************************************
1265 * 2nd Level Abstractions
1266 ********************************************************************/
1268 /* This is the side-effect interpreter. */
1269 static int sctp_cmd_interpreter(enum sctp_event_type event_type
,
1270 union sctp_subtype subtype
,
1271 enum sctp_state state
,
1272 struct sctp_endpoint
*ep
,
1273 struct sctp_association
*asoc
,
1275 enum sctp_disposition status
,
1276 struct sctp_cmd_seq
*commands
,
1279 struct sctp_sock
*sp
= sctp_sk(ep
->base
.sk
);
1280 struct sctp_chunk
*chunk
= NULL
, *new_obj
;
1281 struct sctp_packet
*packet
;
1282 struct sctp_sackhdr sackh
;
1283 struct timer_list
*timer
;
1284 struct sctp_transport
*t
;
1285 unsigned long timeout
;
1286 struct sctp_cmd
*cmd
;
1291 if (SCTP_EVENT_T_TIMEOUT
!= event_type
)
1294 /* Note: This whole file is a huge candidate for rework.
1295 * For example, each command could either have its own handler, so
1296 * the loop would look like:
1298 * cmd->handle(x, y, z)
1301 while (NULL
!= (cmd
= sctp_next_cmd(commands
))) {
1302 switch (cmd
->verb
) {
1307 case SCTP_CMD_NEW_ASOC
:
1308 /* Register a new association. */
1310 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1314 /* Register with the endpoint. */
1315 asoc
= cmd
->obj
.asoc
;
1316 BUG_ON(asoc
->peer
.primary_path
== NULL
);
1317 sctp_endpoint_add_asoc(ep
, asoc
);
1320 case SCTP_CMD_PURGE_OUTQUEUE
:
1321 sctp_outq_teardown(&asoc
->outqueue
);
1324 case SCTP_CMD_DELETE_TCB
:
1326 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1329 /* Delete the current association. */
1330 sctp_cmd_delete_tcb(commands
, asoc
);
1334 case SCTP_CMD_NEW_STATE
:
1335 /* Enter a new state. */
1336 sctp_cmd_new_state(commands
, asoc
, cmd
->obj
.state
);
1339 case SCTP_CMD_REPORT_TSN
:
1340 /* Record the arrival of a TSN. */
1341 error
= sctp_tsnmap_mark(&asoc
->peer
.tsn_map
,
1342 cmd
->obj
.u32
, NULL
);
1345 case SCTP_CMD_REPORT_FWDTSN
:
1346 asoc
->stream
.si
->report_ftsn(&asoc
->ulpq
, cmd
->obj
.u32
);
1349 case SCTP_CMD_PROCESS_FWDTSN
:
1350 asoc
->stream
.si
->handle_ftsn(&asoc
->ulpq
,
1354 case SCTP_CMD_GEN_SACK
:
1355 /* Generate a Selective ACK.
1356 * The argument tells us whether to just count
1357 * the packet and MAYBE generate a SACK, or
1360 force
= cmd
->obj
.i32
;
1361 error
= sctp_gen_sack(asoc
, force
, commands
);
1364 case SCTP_CMD_PROCESS_SACK
:
1365 /* Process an inbound SACK. */
1366 error
= sctp_cmd_process_sack(commands
, asoc
,
1370 case SCTP_CMD_GEN_INIT_ACK
:
1371 /* Generate an INIT ACK chunk. */
1372 new_obj
= sctp_make_init_ack(asoc
, chunk
, GFP_ATOMIC
,
1379 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
,
1380 SCTP_CHUNK(new_obj
));
1383 case SCTP_CMD_PEER_INIT
:
1384 /* Process a unified INIT from the peer.
1385 * Note: Only used during INIT-ACK processing. If
1386 * there is an error just return to the outter
1387 * layer which will bail.
1389 error
= sctp_cmd_process_init(commands
, asoc
, chunk
,
1390 cmd
->obj
.init
, gfp
);
1393 case SCTP_CMD_GEN_COOKIE_ECHO
:
1394 /* Generate a COOKIE ECHO chunk. */
1395 new_obj
= sctp_make_cookie_echo(asoc
, chunk
);
1398 sctp_chunk_free(cmd
->obj
.chunk
);
1402 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
,
1403 SCTP_CHUNK(new_obj
));
1405 /* If there is an ERROR chunk to be sent along with
1406 * the COOKIE_ECHO, send it, too.
1409 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
,
1410 SCTP_CHUNK(cmd
->obj
.chunk
));
1412 if (new_obj
->transport
) {
1413 new_obj
->transport
->init_sent_count
++;
1414 asoc
->init_last_sent_to
= new_obj
->transport
;
1417 /* FIXME - Eventually come up with a cleaner way to
1418 * enabling COOKIE-ECHO + DATA bundling during
1419 * multihoming stale cookie scenarios, the following
1420 * command plays with asoc->peer.retran_path to
1421 * avoid the problem of sending the COOKIE-ECHO and
1422 * DATA in different paths, which could result
1423 * in the association being ABORTed if the DATA chunk
1424 * is processed first by the server. Checking the
1425 * init error counter simply causes this command
1426 * to be executed only during failed attempts of
1427 * association establishment.
1429 if ((asoc
->peer
.retran_path
!=
1430 asoc
->peer
.primary_path
) &&
1431 (asoc
->init_err_counter
> 0)) {
1432 sctp_add_cmd_sf(commands
,
1433 SCTP_CMD_FORCE_PRIM_RETRAN
,
1439 case SCTP_CMD_GEN_SHUTDOWN
:
1440 /* Generate SHUTDOWN when in SHUTDOWN_SENT state.
1441 * Reset error counts.
1443 asoc
->overall_error_count
= 0;
1445 /* Generate a SHUTDOWN chunk. */
1446 new_obj
= sctp_make_shutdown(asoc
, chunk
);
1451 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
,
1452 SCTP_CHUNK(new_obj
));
1455 case SCTP_CMD_CHUNK_ULP
:
1456 /* Send a chunk to the sockets layer. */
1457 pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
1458 __func__
, cmd
->obj
.chunk
, &asoc
->ulpq
);
1460 asoc
->stream
.si
->ulpevent_data(&asoc
->ulpq
,
1465 case SCTP_CMD_EVENT_ULP
:
1466 /* Send a notification to the sockets layer. */
1467 pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
1468 __func__
, cmd
->obj
.ulpevent
, &asoc
->ulpq
);
1470 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
,
1474 case SCTP_CMD_REPLY
:
1475 /* If an caller has not already corked, do cork. */
1476 if (!asoc
->outqueue
.cork
) {
1477 sctp_outq_cork(&asoc
->outqueue
);
1480 /* Send a chunk to our peer. */
1481 sctp_outq_tail(&asoc
->outqueue
, cmd
->obj
.chunk
, gfp
);
1484 case SCTP_CMD_SEND_PKT
:
1485 /* Send a full packet to our peer. */
1486 packet
= cmd
->obj
.packet
;
1487 sctp_packet_transmit(packet
, gfp
);
1488 sctp_ootb_pkt_free(packet
);
1491 case SCTP_CMD_T1_RETRAN
:
1492 /* Mark a transport for retransmission. */
1493 sctp_retransmit(&asoc
->outqueue
, cmd
->obj
.transport
,
1497 case SCTP_CMD_RETRAN
:
1498 /* Mark a transport for retransmission. */
1499 sctp_retransmit(&asoc
->outqueue
, cmd
->obj
.transport
,
1503 case SCTP_CMD_ECN_CE
:
1504 /* Do delayed CE processing. */
1505 sctp_do_ecn_ce_work(asoc
, cmd
->obj
.u32
);
1508 case SCTP_CMD_ECN_ECNE
:
1509 /* Do delayed ECNE processing. */
1510 new_obj
= sctp_do_ecn_ecne_work(asoc
, cmd
->obj
.u32
,
1513 sctp_add_cmd_sf(commands
, SCTP_CMD_REPLY
,
1514 SCTP_CHUNK(new_obj
));
1517 case SCTP_CMD_ECN_CWR
:
1518 /* Do delayed CWR processing. */
1519 sctp_do_ecn_cwr_work(asoc
, cmd
->obj
.u32
);
1522 case SCTP_CMD_SETUP_T2
:
1523 sctp_cmd_setup_t2(commands
, asoc
, cmd
->obj
.chunk
);
1526 case SCTP_CMD_TIMER_START_ONCE
:
1527 timer
= &asoc
->timers
[cmd
->obj
.to
];
1529 if (timer_pending(timer
))
1533 case SCTP_CMD_TIMER_START
:
1534 timer
= &asoc
->timers
[cmd
->obj
.to
];
1535 timeout
= asoc
->timeouts
[cmd
->obj
.to
];
1539 * SCTP has a hard time with timer starts. Because we process
1540 * timer starts as side effects, it can be hard to tell if we
1541 * have already started a timer or not, which leads to BUG
1542 * halts when we call add_timer. So here, instead of just starting
1543 * a timer, if the timer is already started, and just mod
1544 * the timer with the shorter of the two expiration times
1546 if (!timer_pending(timer
))
1547 sctp_association_hold(asoc
);
1548 timer_reduce(timer
, jiffies
+ timeout
);
1551 case SCTP_CMD_TIMER_RESTART
:
1552 timer
= &asoc
->timers
[cmd
->obj
.to
];
1553 timeout
= asoc
->timeouts
[cmd
->obj
.to
];
1554 if (!mod_timer(timer
, jiffies
+ timeout
))
1555 sctp_association_hold(asoc
);
1558 case SCTP_CMD_TIMER_STOP
:
1559 timer
= &asoc
->timers
[cmd
->obj
.to
];
1560 if (del_timer(timer
))
1561 sctp_association_put(asoc
);
1564 case SCTP_CMD_INIT_CHOOSE_TRANSPORT
:
1565 chunk
= cmd
->obj
.chunk
;
1566 t
= sctp_assoc_choose_alter_transport(asoc
,
1567 asoc
->init_last_sent_to
);
1568 asoc
->init_last_sent_to
= t
;
1569 chunk
->transport
= t
;
1570 t
->init_sent_count
++;
1571 /* Set the new transport as primary */
1572 sctp_assoc_set_primary(asoc
, t
);
1575 case SCTP_CMD_INIT_RESTART
:
1576 /* Do the needed accounting and updates
1577 * associated with restarting an initialization
1578 * timer. Only multiply the timeout by two if
1579 * all transports have been tried at the current
1582 sctp_cmd_t1_timer_update(asoc
,
1583 SCTP_EVENT_TIMEOUT_T1_INIT
,
1586 sctp_add_cmd_sf(commands
, SCTP_CMD_TIMER_RESTART
,
1587 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT
));
1590 case SCTP_CMD_COOKIEECHO_RESTART
:
1591 /* Do the needed accounting and updates
1592 * associated with restarting an initialization
1593 * timer. Only multiply the timeout by two if
1594 * all transports have been tried at the current
1597 sctp_cmd_t1_timer_update(asoc
,
1598 SCTP_EVENT_TIMEOUT_T1_COOKIE
,
1601 /* If we've sent any data bundled with
1602 * COOKIE-ECHO we need to resend.
1604 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
1606 sctp_retransmit_mark(&asoc
->outqueue
, t
,
1610 sctp_add_cmd_sf(commands
,
1611 SCTP_CMD_TIMER_RESTART
,
1612 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE
));
1615 case SCTP_CMD_INIT_FAILED
:
1616 sctp_cmd_init_failed(commands
, asoc
, cmd
->obj
.u16
);
1619 case SCTP_CMD_ASSOC_FAILED
:
1620 sctp_cmd_assoc_failed(commands
, asoc
, event_type
,
1621 subtype
, chunk
, cmd
->obj
.u16
);
1624 case SCTP_CMD_INIT_COUNTER_INC
:
1625 asoc
->init_err_counter
++;
1628 case SCTP_CMD_INIT_COUNTER_RESET
:
1629 asoc
->init_err_counter
= 0;
1630 asoc
->init_cycle
= 0;
1631 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
1633 t
->init_sent_count
= 0;
1637 case SCTP_CMD_REPORT_DUP
:
1638 sctp_tsnmap_mark_dup(&asoc
->peer
.tsn_map
,
1642 case SCTP_CMD_REPORT_BAD_TAG
:
1643 pr_debug("%s: vtag mismatch!\n", __func__
);
1646 case SCTP_CMD_STRIKE
:
1647 /* Mark one strike against a transport. */
1648 sctp_do_8_2_transport_strike(commands
, asoc
,
1649 cmd
->obj
.transport
, 0);
1652 case SCTP_CMD_TRANSPORT_IDLE
:
1653 t
= cmd
->obj
.transport
;
1654 sctp_transport_lower_cwnd(t
, SCTP_LOWER_CWND_INACTIVE
);
1657 case SCTP_CMD_TRANSPORT_HB_SENT
:
1658 t
= cmd
->obj
.transport
;
1659 sctp_do_8_2_transport_strike(commands
, asoc
,
1664 case SCTP_CMD_TRANSPORT_ON
:
1665 t
= cmd
->obj
.transport
;
1666 sctp_cmd_transport_on(commands
, asoc
, t
, chunk
);
1669 case SCTP_CMD_HB_TIMERS_START
:
1670 sctp_cmd_hb_timers_start(commands
, asoc
);
1673 case SCTP_CMD_HB_TIMER_UPDATE
:
1674 t
= cmd
->obj
.transport
;
1675 sctp_transport_reset_hb_timer(t
);
1678 case SCTP_CMD_HB_TIMERS_STOP
:
1679 sctp_cmd_hb_timers_stop(commands
, asoc
);
1682 case SCTP_CMD_PROBE_TIMER_UPDATE
:
1683 t
= cmd
->obj
.transport
;
1684 sctp_transport_reset_probe_timer(t
);
1687 case SCTP_CMD_REPORT_ERROR
:
1688 error
= cmd
->obj
.error
;
1691 case SCTP_CMD_PROCESS_CTSN
:
1692 /* Dummy up a SACK for processing. */
1693 sackh
.cum_tsn_ack
= cmd
->obj
.be32
;
1694 sackh
.a_rwnd
= htonl(asoc
->peer
.rwnd
+
1695 asoc
->outqueue
.outstanding_bytes
);
1696 sackh
.num_gap_ack_blocks
= 0;
1697 sackh
.num_dup_tsns
= 0;
1698 chunk
->subh
.sack_hdr
= &sackh
;
1699 sctp_add_cmd_sf(commands
, SCTP_CMD_PROCESS_SACK
,
1703 case SCTP_CMD_DISCARD_PACKET
:
1704 /* We need to discard the whole packet.
1705 * Uncork the queue since there might be
1708 chunk
->pdiscard
= 1;
1710 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1715 case SCTP_CMD_RTO_PENDING
:
1716 t
= cmd
->obj
.transport
;
1720 case SCTP_CMD_PART_DELIVER
:
1721 asoc
->stream
.si
->start_pd(&asoc
->ulpq
, GFP_ATOMIC
);
1724 case SCTP_CMD_RENEGE
:
1725 asoc
->stream
.si
->renege_events(&asoc
->ulpq
,
1730 case SCTP_CMD_SETUP_T4
:
1731 sctp_cmd_setup_t4(commands
, asoc
, cmd
->obj
.chunk
);
1734 case SCTP_CMD_PROCESS_OPERR
:
1735 sctp_cmd_process_operr(commands
, asoc
, chunk
);
1737 case SCTP_CMD_CLEAR_INIT_TAG
:
1738 asoc
->peer
.i
.init_tag
= 0;
1740 case SCTP_CMD_DEL_NON_PRIMARY
:
1741 sctp_cmd_del_non_primary(asoc
);
1743 case SCTP_CMD_T3_RTX_TIMERS_STOP
:
1744 sctp_cmd_t3_rtx_timers_stop(commands
, asoc
);
1746 case SCTP_CMD_FORCE_PRIM_RETRAN
:
1747 t
= asoc
->peer
.retran_path
;
1748 asoc
->peer
.retran_path
= asoc
->peer
.primary_path
;
1749 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1751 asoc
->peer
.retran_path
= t
;
1753 case SCTP_CMD_SET_SK_ERR
:
1754 sctp_cmd_set_sk_err(asoc
, cmd
->obj
.error
);
1756 case SCTP_CMD_ASSOC_CHANGE
:
1757 sctp_cmd_assoc_change(commands
, asoc
,
1760 case SCTP_CMD_ADAPTATION_IND
:
1761 sctp_cmd_adaptation_ind(commands
, asoc
);
1763 case SCTP_CMD_PEER_NO_AUTH
:
1764 sctp_cmd_peer_no_auth(commands
, asoc
);
1767 case SCTP_CMD_ASSOC_SHKEY
:
1768 error
= sctp_auth_asoc_init_active_key(asoc
,
1771 case SCTP_CMD_UPDATE_INITTAG
:
1772 asoc
->peer
.i
.init_tag
= cmd
->obj
.u32
;
1774 case SCTP_CMD_SEND_MSG
:
1775 if (!asoc
->outqueue
.cork
) {
1776 sctp_outq_cork(&asoc
->outqueue
);
1779 sctp_cmd_send_msg(asoc
, cmd
->obj
.msg
, gfp
);
1781 case SCTP_CMD_PURGE_ASCONF_QUEUE
:
1782 sctp_asconf_queue_teardown(asoc
);
1785 case SCTP_CMD_SET_ASOC
:
1786 if (asoc
&& local_cork
) {
1787 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1790 asoc
= cmd
->obj
.asoc
;
1794 pr_warn("Impossible command: %u\n",
1800 cmd
= sctp_next_cmd(commands
);
1802 if (cmd
->verb
== SCTP_CMD_REPLY
)
1803 sctp_chunk_free(cmd
->obj
.chunk
);
1804 cmd
= sctp_next_cmd(commands
);
1810 /* If this is in response to a received chunk, wait until
1811 * we are done with the packet to open the queue so that we don't
1812 * send multiple packets in response to a single request.
1814 if (asoc
&& SCTP_EVENT_T_CHUNK
== event_type
&& chunk
) {
1815 if (chunk
->end_of_packet
|| chunk
->singleton
)
1816 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1817 } else if (local_cork
)
1818 sctp_outq_uncork(&asoc
->outqueue
, gfp
);
1820 if (sp
->data_ready_signalled
)
1821 sp
->data_ready_signalled
= 0;