dt-bindings: add Marvell core PLL and clock divider PMU documentation
[linux/fpc-iii.git] / net / sctp / sm_sideeffect.c
blob6098d4c42fa91287d3cde36ac05d860f76d4fe32
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
6 * This file is part of the SCTP kernel implementation
8 * These functions work with the state functions in sctp_sm_statefuns.c
9 * to implement that state operations. These functions implement the
10 * steps which require modifying existing data structures.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, see
26 * <http://www.gnu.org/licenses/>.
28 * Please send any bug reports or fixes you make to the
29 * email address(es):
30 * lksctp developers <linux-sctp@vger.kernel.org>
32 * Written or modified by:
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Karl Knutson <karl@athena.chicago.il.us>
35 * Jon Grimm <jgrimm@austin.ibm.com>
36 * Hui Huang <hui.huang@nokia.com>
37 * Dajiang Zhang <dajiang.zhang@nokia.com>
38 * Daisy Chang <daisyc@us.ibm.com>
39 * Sridhar Samudrala <sri@us.ibm.com>
40 * Ardelle Fan <ardelle.fan@intel.com>
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/skbuff.h>
46 #include <linux/types.h>
47 #include <linux/socket.h>
48 #include <linux/ip.h>
49 #include <linux/gfp.h>
50 #include <net/sock.h>
51 #include <net/sctp/sctp.h>
52 #include <net/sctp/sm.h>
54 static int sctp_cmd_interpreter(sctp_event_t event_type,
55 sctp_subtype_t subtype,
56 sctp_state_t state,
57 struct sctp_endpoint *ep,
58 struct sctp_association *asoc,
59 void *event_arg,
60 sctp_disposition_t status,
61 sctp_cmd_seq_t *commands,
62 gfp_t gfp);
63 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
64 sctp_state_t state,
65 struct sctp_endpoint *ep,
66 struct sctp_association *asoc,
67 void *event_arg,
68 sctp_disposition_t status,
69 sctp_cmd_seq_t *commands,
70 gfp_t gfp);
72 static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
73 struct sctp_transport *t);
74 /********************************************************************
75 * Helper functions
76 ********************************************************************/
78 /* A helper function for delayed processing of INET ECN CE bit. */
79 static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
80 __u32 lowest_tsn)
82 /* Save the TSN away for comparison when we receive CWR */
84 asoc->last_ecne_tsn = lowest_tsn;
85 asoc->need_ecne = 1;
88 /* Helper function for delayed processing of SCTP ECNE chunk. */
89 /* RFC 2960 Appendix A
91 * RFC 2481 details a specific bit for a sender to send in
92 * the header of its next outbound TCP segment to indicate to
93 * its peer that it has reduced its congestion window. This
94 * is termed the CWR bit. For SCTP the same indication is made
95 * by including the CWR chunk. This chunk contains one data
96 * element, i.e. the TSN number that was sent in the ECNE chunk.
97 * This element represents the lowest TSN number in the datagram
98 * that was originally marked with the CE bit.
100 static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
101 __u32 lowest_tsn,
102 struct sctp_chunk *chunk)
104 struct sctp_chunk *repl;
106 /* Our previously transmitted packet ran into some congestion
107 * so we should take action by reducing cwnd and ssthresh
108 * and then ACK our peer that we we've done so by
109 * sending a CWR.
112 /* First, try to determine if we want to actually lower
113 * our cwnd variables. Only lower them if the ECNE looks more
114 * recent than the last response.
116 if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
117 struct sctp_transport *transport;
119 /* Find which transport's congestion variables
120 * need to be adjusted.
122 transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
124 /* Update the congestion variables. */
125 if (transport)
126 sctp_transport_lower_cwnd(transport,
127 SCTP_LOWER_CWND_ECNE);
128 asoc->last_cwr_tsn = lowest_tsn;
131 /* Always try to quiet the other end. In case of lost CWR,
132 * resend last_cwr_tsn.
134 repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
136 /* If we run out of memory, it will look like a lost CWR. We'll
137 * get back in sync eventually.
139 return repl;
142 /* Helper function to do delayed processing of ECN CWR chunk. */
143 static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
144 __u32 lowest_tsn)
146 /* Turn off ECNE getting auto-prepended to every outgoing
147 * packet
149 asoc->need_ecne = 0;
152 /* Generate SACK if necessary. We call this at the end of a packet. */
153 static int sctp_gen_sack(struct sctp_association *asoc, int force,
154 sctp_cmd_seq_t *commands)
156 __u32 ctsn, max_tsn_seen;
157 struct sctp_chunk *sack;
158 struct sctp_transport *trans = asoc->peer.last_data_from;
159 int error = 0;
161 if (force ||
162 (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
163 (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
164 asoc->peer.sack_needed = 1;
166 ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
167 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
169 /* From 12.2 Parameters necessary per association (i.e. the TCB):
171 * Ack State : This flag indicates if the next received packet
172 * : is to be responded to with a SACK. ...
173 * : When DATA chunks are out of order, SACK's
174 * : are not delayed (see Section 6).
176 * [This is actually not mentioned in Section 6, but we
177 * implement it here anyway. --piggy]
179 if (max_tsn_seen != ctsn)
180 asoc->peer.sack_needed = 1;
182 /* From 6.2 Acknowledgement on Reception of DATA Chunks:
184 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
185 * an acknowledgement SHOULD be generated for at least every
186 * second packet (not every second DATA chunk) received, and
187 * SHOULD be generated within 200 ms of the arrival of any
188 * unacknowledged DATA chunk. ...
190 if (!asoc->peer.sack_needed) {
191 asoc->peer.sack_cnt++;
193 /* Set the SACK delay timeout based on the
194 * SACK delay for the last transport
195 * data was received from, or the default
196 * for the association.
198 if (trans) {
199 /* We will need a SACK for the next packet. */
200 if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
201 asoc->peer.sack_needed = 1;
203 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
204 trans->sackdelay;
205 } else {
206 /* We will need a SACK for the next packet. */
207 if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
208 asoc->peer.sack_needed = 1;
210 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
211 asoc->sackdelay;
214 /* Restart the SACK timer. */
215 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
216 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
217 } else {
218 asoc->a_rwnd = asoc->rwnd;
219 sack = sctp_make_sack(asoc);
220 if (!sack)
221 goto nomem;
223 asoc->peer.sack_needed = 0;
224 asoc->peer.sack_cnt = 0;
226 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
228 /* Stop the SACK timer. */
229 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
230 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
233 return error;
234 nomem:
235 error = -ENOMEM;
236 return error;
239 /* When the T3-RTX timer expires, it calls this function to create the
240 * relevant state machine event.
242 void sctp_generate_t3_rtx_event(unsigned long peer)
244 int error;
245 struct sctp_transport *transport = (struct sctp_transport *) peer;
246 struct sctp_association *asoc = transport->asoc;
247 struct sock *sk = asoc->base.sk;
248 struct net *net = sock_net(sk);
250 /* Check whether a task is in the sock. */
252 bh_lock_sock(sk);
253 if (sock_owned_by_user(sk)) {
254 pr_debug("%s: sock is busy\n", __func__);
256 /* Try again later. */
257 if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
258 sctp_transport_hold(transport);
259 goto out_unlock;
262 /* Is this transport really dead and just waiting around for
263 * the timer to let go of the reference?
265 if (transport->dead)
266 goto out_unlock;
268 /* Run through the state machine. */
269 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
270 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
271 asoc->state,
272 asoc->ep, asoc,
273 transport, GFP_ATOMIC);
275 if (error)
276 sk->sk_err = -error;
278 out_unlock:
279 bh_unlock_sock(sk);
280 sctp_transport_put(transport);
283 /* This is a sa interface for producing timeout events. It works
284 * for timeouts which use the association as their parameter.
286 static void sctp_generate_timeout_event(struct sctp_association *asoc,
287 sctp_event_timeout_t timeout_type)
289 struct sock *sk = asoc->base.sk;
290 struct net *net = sock_net(sk);
291 int error = 0;
293 bh_lock_sock(sk);
294 if (sock_owned_by_user(sk)) {
295 pr_debug("%s: sock is busy: timer %d\n", __func__,
296 timeout_type);
298 /* Try again later. */
299 if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
300 sctp_association_hold(asoc);
301 goto out_unlock;
304 /* Is this association really dead and just waiting around for
305 * the timer to let go of the reference?
307 if (asoc->base.dead)
308 goto out_unlock;
310 /* Run through the state machine. */
311 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
312 SCTP_ST_TIMEOUT(timeout_type),
313 asoc->state, asoc->ep, asoc,
314 (void *)timeout_type, GFP_ATOMIC);
316 if (error)
317 sk->sk_err = -error;
319 out_unlock:
320 bh_unlock_sock(sk);
321 sctp_association_put(asoc);
324 static void sctp_generate_t1_cookie_event(unsigned long data)
326 struct sctp_association *asoc = (struct sctp_association *) data;
327 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
330 static void sctp_generate_t1_init_event(unsigned long data)
332 struct sctp_association *asoc = (struct sctp_association *) data;
333 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
336 static void sctp_generate_t2_shutdown_event(unsigned long data)
338 struct sctp_association *asoc = (struct sctp_association *) data;
339 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
342 static void sctp_generate_t4_rto_event(unsigned long data)
344 struct sctp_association *asoc = (struct sctp_association *) data;
345 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
348 static void sctp_generate_t5_shutdown_guard_event(unsigned long data)
350 struct sctp_association *asoc = (struct sctp_association *)data;
351 sctp_generate_timeout_event(asoc,
352 SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
354 } /* sctp_generate_t5_shutdown_guard_event() */
356 static void sctp_generate_autoclose_event(unsigned long data)
358 struct sctp_association *asoc = (struct sctp_association *) data;
359 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
362 /* Generate a heart beat event. If the sock is busy, reschedule. Make
363 * sure that the transport is still valid.
365 void sctp_generate_heartbeat_event(unsigned long data)
367 int error = 0;
368 struct sctp_transport *transport = (struct sctp_transport *) data;
369 struct sctp_association *asoc = transport->asoc;
370 struct sock *sk = asoc->base.sk;
371 struct net *net = sock_net(sk);
373 bh_lock_sock(sk);
374 if (sock_owned_by_user(sk)) {
375 pr_debug("%s: sock is busy\n", __func__);
377 /* Try again later. */
378 if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
379 sctp_transport_hold(transport);
380 goto out_unlock;
383 /* Is this structure just waiting around for us to actually
384 * get destroyed?
386 if (transport->dead)
387 goto out_unlock;
389 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
390 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
391 asoc->state, asoc->ep, asoc,
392 transport, GFP_ATOMIC);
394 if (error)
395 sk->sk_err = -error;
397 out_unlock:
398 bh_unlock_sock(sk);
399 sctp_transport_put(transport);
402 /* Handle the timeout of the ICMP protocol unreachable timer. Trigger
403 * the correct state machine transition that will close the association.
405 void sctp_generate_proto_unreach_event(unsigned long data)
407 struct sctp_transport *transport = (struct sctp_transport *) data;
408 struct sctp_association *asoc = transport->asoc;
409 struct sock *sk = asoc->base.sk;
410 struct net *net = sock_net(sk);
412 bh_lock_sock(sk);
413 if (sock_owned_by_user(sk)) {
414 pr_debug("%s: sock is busy\n", __func__);
416 /* Try again later. */
417 if (!mod_timer(&transport->proto_unreach_timer,
418 jiffies + (HZ/20)))
419 sctp_association_hold(asoc);
420 goto out_unlock;
423 /* Is this structure just waiting around for us to actually
424 * get destroyed?
426 if (asoc->base.dead)
427 goto out_unlock;
429 sctp_do_sm(net, SCTP_EVENT_T_OTHER,
430 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
431 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
433 out_unlock:
434 bh_unlock_sock(sk);
435 sctp_association_put(asoc);
439 /* Inject a SACK Timeout event into the state machine. */
440 static void sctp_generate_sack_event(unsigned long data)
442 struct sctp_association *asoc = (struct sctp_association *) data;
443 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
446 sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
447 NULL,
448 sctp_generate_t1_cookie_event,
449 sctp_generate_t1_init_event,
450 sctp_generate_t2_shutdown_event,
451 NULL,
452 sctp_generate_t4_rto_event,
453 sctp_generate_t5_shutdown_guard_event,
454 NULL,
455 sctp_generate_sack_event,
456 sctp_generate_autoclose_event,
460 /* RFC 2960 8.2 Path Failure Detection
462 * When its peer endpoint is multi-homed, an endpoint should keep a
463 * error counter for each of the destination transport addresses of the
464 * peer endpoint.
466 * Each time the T3-rtx timer expires on any address, or when a
467 * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
468 * the error counter of that destination address will be incremented.
469 * When the value in the error counter exceeds the protocol parameter
470 * 'Path.Max.Retrans' of that destination address, the endpoint should
471 * mark the destination transport address as inactive, and a
472 * notification SHOULD be sent to the upper layer.
475 static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
476 struct sctp_association *asoc,
477 struct sctp_transport *transport,
478 int is_hb)
480 /* The check for association's overall error counter exceeding the
481 * threshold is done in the state function.
483 /* We are here due to a timer expiration. If the timer was
484 * not a HEARTBEAT, then normal error tracking is done.
485 * If the timer was a heartbeat, we only increment error counts
486 * when we already have an outstanding HEARTBEAT that has not
487 * been acknowledged.
488 * Additionally, some tranport states inhibit error increments.
490 if (!is_hb) {
491 asoc->overall_error_count++;
492 if (transport->state != SCTP_INACTIVE)
493 transport->error_count++;
494 } else if (transport->hb_sent) {
495 if (transport->state != SCTP_UNCONFIRMED)
496 asoc->overall_error_count++;
497 if (transport->state != SCTP_INACTIVE)
498 transport->error_count++;
501 /* If the transport error count is greater than the pf_retrans
502 * threshold, and less than pathmaxrtx, and if the current state
503 * is SCTP_ACTIVE, then mark this transport as Partially Failed,
504 * see SCTP Quick Failover Draft, section 5.1
506 if ((transport->state == SCTP_ACTIVE) &&
507 (asoc->pf_retrans < transport->pathmaxrxt) &&
508 (transport->error_count > asoc->pf_retrans)) {
510 sctp_assoc_control_transport(asoc, transport,
511 SCTP_TRANSPORT_PF,
514 /* Update the hb timer to resend a heartbeat every rto */
515 sctp_cmd_hb_timer_update(commands, transport);
518 if (transport->state != SCTP_INACTIVE &&
519 (transport->error_count > transport->pathmaxrxt)) {
520 pr_debug("%s: association:%p transport addr:%pISpc failed\n",
521 __func__, asoc, &transport->ipaddr.sa);
523 sctp_assoc_control_transport(asoc, transport,
524 SCTP_TRANSPORT_DOWN,
525 SCTP_FAILED_THRESHOLD);
528 /* E2) For the destination address for which the timer
529 * expires, set RTO <- RTO * 2 ("back off the timer"). The
530 * maximum value discussed in rule C7 above (RTO.max) may be
531 * used to provide an upper bound to this doubling operation.
533 * Special Case: the first HB doesn't trigger exponential backoff.
534 * The first unacknowledged HB triggers it. We do this with a flag
535 * that indicates that we have an outstanding HB.
537 if (!is_hb || transport->hb_sent) {
538 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
539 sctp_max_rto(asoc, transport);
543 /* Worker routine to handle INIT command failure. */
544 static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
545 struct sctp_association *asoc,
546 unsigned int error)
548 struct sctp_ulpevent *event;
550 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC,
551 (__u16)error, 0, 0, NULL,
552 GFP_ATOMIC);
554 if (event)
555 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
556 SCTP_ULPEVENT(event));
558 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
559 SCTP_STATE(SCTP_STATE_CLOSED));
561 /* SEND_FAILED sent later when cleaning up the association. */
562 asoc->outqueue.error = error;
563 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
566 /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
567 static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
568 struct sctp_association *asoc,
569 sctp_event_t event_type,
570 sctp_subtype_t subtype,
571 struct sctp_chunk *chunk,
572 unsigned int error)
574 struct sctp_ulpevent *event;
575 struct sctp_chunk *abort;
576 /* Cancel any partial delivery in progress. */
577 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
579 if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
580 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
581 (__u16)error, 0, 0, chunk,
582 GFP_ATOMIC);
583 else
584 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
585 (__u16)error, 0, 0, NULL,
586 GFP_ATOMIC);
587 if (event)
588 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
589 SCTP_ULPEVENT(event));
591 if (asoc->overall_error_count >= asoc->max_retrans) {
592 abort = sctp_make_violation_max_retrans(asoc, chunk);
593 if (abort)
594 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
595 SCTP_CHUNK(abort));
598 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
599 SCTP_STATE(SCTP_STATE_CLOSED));
601 /* SEND_FAILED sent later when cleaning up the association. */
602 asoc->outqueue.error = error;
603 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
606 /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
607 * inside the cookie. In reality, this is only used for INIT-ACK processing
608 * since all other cases use "temporary" associations and can do all
609 * their work in statefuns directly.
611 static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
612 struct sctp_association *asoc,
613 struct sctp_chunk *chunk,
614 sctp_init_chunk_t *peer_init,
615 gfp_t gfp)
617 int error;
619 /* We only process the init as a sideeffect in a single
620 * case. This is when we process the INIT-ACK. If we
621 * fail during INIT processing (due to malloc problems),
622 * just return the error and stop processing the stack.
624 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
625 error = -ENOMEM;
626 else
627 error = 0;
629 return error;
632 /* Helper function to break out starting up of heartbeat timers. */
633 static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
634 struct sctp_association *asoc)
636 struct sctp_transport *t;
638 /* Start a heartbeat timer for each transport on the association.
639 * hold a reference on the transport to make sure none of
640 * the needed data structures go away.
642 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
644 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
645 sctp_transport_hold(t);
649 static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
650 struct sctp_association *asoc)
652 struct sctp_transport *t;
654 /* Stop all heartbeat timers. */
656 list_for_each_entry(t, &asoc->peer.transport_addr_list,
657 transports) {
658 if (del_timer(&t->hb_timer))
659 sctp_transport_put(t);
663 /* Helper function to stop any pending T3-RTX timers */
664 static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
665 struct sctp_association *asoc)
667 struct sctp_transport *t;
669 list_for_each_entry(t, &asoc->peer.transport_addr_list,
670 transports) {
671 if (del_timer(&t->T3_rtx_timer))
672 sctp_transport_put(t);
677 /* Helper function to update the heartbeat timer. */
678 static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
679 struct sctp_transport *t)
681 /* Update the heartbeat timer. */
682 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
683 sctp_transport_hold(t);
686 /* Helper function to handle the reception of an HEARTBEAT ACK. */
687 static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
688 struct sctp_association *asoc,
689 struct sctp_transport *t,
690 struct sctp_chunk *chunk)
692 sctp_sender_hb_info_t *hbinfo;
693 int was_unconfirmed = 0;
695 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
696 * HEARTBEAT should clear the error counter of the destination
697 * transport address to which the HEARTBEAT was sent.
699 t->error_count = 0;
702 * Although RFC4960 specifies that the overall error count must
703 * be cleared when a HEARTBEAT ACK is received, we make an
704 * exception while in SHUTDOWN PENDING. If the peer keeps its
705 * window shut forever, we may never be able to transmit our
706 * outstanding data and rely on the retransmission limit be reached
707 * to shutdown the association.
709 if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
710 t->asoc->overall_error_count = 0;
712 /* Clear the hb_sent flag to signal that we had a good
713 * acknowledgement.
715 t->hb_sent = 0;
717 /* Mark the destination transport address as active if it is not so
718 * marked.
720 if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
721 was_unconfirmed = 1;
722 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
723 SCTP_HEARTBEAT_SUCCESS);
726 if (t->state == SCTP_PF)
727 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
728 SCTP_HEARTBEAT_SUCCESS);
730 /* HB-ACK was received for a the proper HB. Consider this
731 * forward progress.
733 if (t->dst)
734 dst_confirm(t->dst);
736 /* The receiver of the HEARTBEAT ACK should also perform an
737 * RTT measurement for that destination transport address
738 * using the time value carried in the HEARTBEAT ACK chunk.
739 * If the transport's rto_pending variable has been cleared,
740 * it was most likely due to a retransmit. However, we want
741 * to re-enable it to properly update the rto.
743 if (t->rto_pending == 0)
744 t->rto_pending = 1;
746 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
747 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
749 /* Update the heartbeat timer. */
750 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
751 sctp_transport_hold(t);
753 if (was_unconfirmed && asoc->peer.transport_count == 1)
754 sctp_transport_immediate_rtx(t);
758 /* Helper function to process the process SACK command. */
759 static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
760 struct sctp_association *asoc,
761 struct sctp_chunk *chunk)
763 int err = 0;
765 if (sctp_outq_sack(&asoc->outqueue, chunk)) {
766 struct net *net = sock_net(asoc->base.sk);
768 /* There are no more TSNs awaiting SACK. */
769 err = sctp_do_sm(net, SCTP_EVENT_T_OTHER,
770 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
771 asoc->state, asoc->ep, asoc, NULL,
772 GFP_ATOMIC);
775 return err;
778 /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
779 * the transport for a shutdown chunk.
781 static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
782 struct sctp_association *asoc,
783 struct sctp_chunk *chunk)
785 struct sctp_transport *t;
787 if (chunk->transport)
788 t = chunk->transport;
789 else {
790 t = sctp_assoc_choose_alter_transport(asoc,
791 asoc->shutdown_last_sent_to);
792 chunk->transport = t;
794 asoc->shutdown_last_sent_to = t;
795 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
798 /* Helper function to change the state of an association. */
799 static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
800 struct sctp_association *asoc,
801 sctp_state_t state)
803 struct sock *sk = asoc->base.sk;
805 asoc->state = state;
807 pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]);
809 if (sctp_style(sk, TCP)) {
810 /* Change the sk->sk_state of a TCP-style socket that has
811 * successfully completed a connect() call.
813 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
814 sk->sk_state = SCTP_SS_ESTABLISHED;
816 /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
817 if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
818 sctp_sstate(sk, ESTABLISHED))
819 sk->sk_shutdown |= RCV_SHUTDOWN;
822 if (sctp_state(asoc, COOKIE_WAIT)) {
823 /* Reset init timeouts since they may have been
824 * increased due to timer expirations.
826 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
827 asoc->rto_initial;
828 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
829 asoc->rto_initial;
832 if (sctp_state(asoc, ESTABLISHED) ||
833 sctp_state(asoc, CLOSED) ||
834 sctp_state(asoc, SHUTDOWN_RECEIVED)) {
835 /* Wake up any processes waiting in the asoc's wait queue in
836 * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
838 if (waitqueue_active(&asoc->wait))
839 wake_up_interruptible(&asoc->wait);
841 /* Wake up any processes waiting in the sk's sleep queue of
842 * a TCP-style or UDP-style peeled-off socket in
843 * sctp_wait_for_accept() or sctp_wait_for_packet().
844 * For a UDP-style socket, the waiters are woken up by the
845 * notifications.
847 if (!sctp_style(sk, UDP))
848 sk->sk_state_change(sk);
852 /* Helper function to delete an association. */
853 static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
854 struct sctp_association *asoc)
856 struct sock *sk = asoc->base.sk;
858 /* If it is a non-temporary association belonging to a TCP-style
859 * listening socket that is not closed, do not free it so that accept()
860 * can pick it up later.
862 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
863 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
864 return;
866 sctp_unhash_established(asoc);
867 sctp_association_free(asoc);
871 * ADDIP Section 4.1 ASCONF Chunk Procedures
872 * A4) Start a T-4 RTO timer, using the RTO value of the selected
873 * destination address (we use active path instead of primary path just
874 * because primary path may be inactive.
876 static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
877 struct sctp_association *asoc,
878 struct sctp_chunk *chunk)
880 struct sctp_transport *t;
882 t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
883 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
884 chunk->transport = t;
887 /* Process an incoming Operation Error Chunk. */
888 static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
889 struct sctp_association *asoc,
890 struct sctp_chunk *chunk)
892 struct sctp_errhdr *err_hdr;
893 struct sctp_ulpevent *ev;
895 while (chunk->chunk_end > chunk->skb->data) {
896 err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
898 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
899 GFP_ATOMIC);
900 if (!ev)
901 return;
903 sctp_ulpq_tail_event(&asoc->ulpq, ev);
905 switch (err_hdr->cause) {
906 case SCTP_ERROR_UNKNOWN_CHUNK:
908 sctp_chunkhdr_t *unk_chunk_hdr;
910 unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable;
911 switch (unk_chunk_hdr->type) {
912 /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
913 * an ERROR chunk reporting that it did not recognized
914 * the ASCONF chunk type, the sender of the ASCONF MUST
915 * NOT send any further ASCONF chunks and MUST stop its
916 * T-4 timer.
918 case SCTP_CID_ASCONF:
919 if (asoc->peer.asconf_capable == 0)
920 break;
922 asoc->peer.asconf_capable = 0;
923 sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
924 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
925 break;
926 default:
927 break;
929 break;
931 default:
932 break;
937 /* Process variable FWDTSN chunk information. */
938 static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
939 struct sctp_chunk *chunk)
941 struct sctp_fwdtsn_skip *skip;
942 /* Walk through all the skipped SSNs */
943 sctp_walk_fwdtsn(skip, chunk) {
944 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
948 /* Helper function to remove the association non-primary peer
949 * transports.
951 static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
953 struct sctp_transport *t;
954 struct list_head *pos;
955 struct list_head *temp;
957 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
958 t = list_entry(pos, struct sctp_transport, transports);
959 if (!sctp_cmp_addr_exact(&t->ipaddr,
960 &asoc->peer.primary_addr)) {
961 sctp_assoc_rm_peer(asoc, t);
966 /* Helper function to set sk_err on a 1-1 style socket. */
967 static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
969 struct sock *sk = asoc->base.sk;
971 if (!sctp_style(sk, UDP))
972 sk->sk_err = error;
975 /* Helper function to generate an association change event */
976 static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands,
977 struct sctp_association *asoc,
978 u8 state)
980 struct sctp_ulpevent *ev;
982 ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
983 asoc->c.sinit_num_ostreams,
984 asoc->c.sinit_max_instreams,
985 NULL, GFP_ATOMIC);
986 if (ev)
987 sctp_ulpq_tail_event(&asoc->ulpq, ev);
990 /* Helper function to generate an adaptation indication event */
991 static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands,
992 struct sctp_association *asoc)
994 struct sctp_ulpevent *ev;
996 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
998 if (ev)
999 sctp_ulpq_tail_event(&asoc->ulpq, ev);
1003 static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
1004 sctp_event_timeout_t timer,
1005 char *name)
1007 struct sctp_transport *t;
1009 t = asoc->init_last_sent_to;
1010 asoc->init_err_counter++;
1012 if (t->init_sent_count > (asoc->init_cycle + 1)) {
1013 asoc->timeouts[timer] *= 2;
1014 if (asoc->timeouts[timer] > asoc->max_init_timeo) {
1015 asoc->timeouts[timer] = asoc->max_init_timeo;
1017 asoc->init_cycle++;
1019 pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d"
1020 " cycle:%d timeout:%ld\n", __func__, name,
1021 asoc->init_err_counter, asoc->init_cycle,
1022 asoc->timeouts[timer]);
1027 /* Send the whole message, chunk by chunk, to the outqueue.
1028 * This way the whole message is queued up and bundling if
1029 * encouraged for small fragments.
1031 static int sctp_cmd_send_msg(struct sctp_association *asoc,
1032 struct sctp_datamsg *msg)
1034 struct sctp_chunk *chunk;
1035 int error = 0;
1037 list_for_each_entry(chunk, &msg->chunks, frag_list) {
1038 error = sctp_outq_tail(&asoc->outqueue, chunk);
1039 if (error)
1040 break;
1043 return error;
1047 /* Sent the next ASCONF packet currently stored in the association.
1048 * This happens after the ASCONF_ACK was succeffully processed.
1050 static void sctp_cmd_send_asconf(struct sctp_association *asoc)
1052 struct net *net = sock_net(asoc->base.sk);
1054 /* Send the next asconf chunk from the addip chunk
1055 * queue.
1057 if (!list_empty(&asoc->addip_chunk_list)) {
1058 struct list_head *entry = asoc->addip_chunk_list.next;
1059 struct sctp_chunk *asconf = list_entry(entry,
1060 struct sctp_chunk, list);
1061 list_del_init(entry);
1063 /* Hold the chunk until an ASCONF_ACK is received. */
1064 sctp_chunk_hold(asconf);
1065 if (sctp_primitive_ASCONF(net, asoc, asconf))
1066 sctp_chunk_free(asconf);
1067 else
1068 asoc->addip_last_asconf = asconf;
1073 /* These three macros allow us to pull the debugging code out of the
1074 * main flow of sctp_do_sm() to keep attention focused on the real
1075 * functionality there.
1077 #define debug_pre_sfn() \
1078 pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \
1079 ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \
1080 asoc, sctp_state_tbl[state], state_fn->name)
1082 #define debug_post_sfn() \
1083 pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \
1084 sctp_status_tbl[status])
1086 #define debug_post_sfx() \
1087 pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \
1088 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
1089 sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED])
1092 * This is the master state machine processing function.
1094 * If you want to understand all of lksctp, this is a
1095 * good place to start.
1097 int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
1098 sctp_state_t state,
1099 struct sctp_endpoint *ep,
1100 struct sctp_association *asoc,
1101 void *event_arg,
1102 gfp_t gfp)
1104 sctp_cmd_seq_t commands;
1105 const sctp_sm_table_entry_t *state_fn;
1106 sctp_disposition_t status;
1107 int error = 0;
1108 typedef const char *(printfn_t)(sctp_subtype_t);
1109 static printfn_t *table[] = {
1110 NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
1112 printfn_t *debug_fn __attribute__ ((unused)) = table[event_type];
1114 /* Look up the state function, run it, and then process the
1115 * side effects. These three steps are the heart of lksctp.
1117 state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
1119 sctp_init_cmd_seq(&commands);
1121 debug_pre_sfn();
1122 status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands);
1123 debug_post_sfn();
1125 error = sctp_side_effects(event_type, subtype, state,
1126 ep, asoc, event_arg, status,
1127 &commands, gfp);
1128 debug_post_sfx();
1130 return error;
1133 /*****************************************************************
1134 * This the master state function side effect processing function.
1135 *****************************************************************/
1136 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
1137 sctp_state_t state,
1138 struct sctp_endpoint *ep,
1139 struct sctp_association *asoc,
1140 void *event_arg,
1141 sctp_disposition_t status,
1142 sctp_cmd_seq_t *commands,
1143 gfp_t gfp)
1145 int error;
1147 /* FIXME - Most of the dispositions left today would be categorized
1148 * as "exceptional" dispositions. For those dispositions, it
1149 * may not be proper to run through any of the commands at all.
1150 * For example, the command interpreter might be run only with
1151 * disposition SCTP_DISPOSITION_CONSUME.
1153 if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
1154 ep, asoc,
1155 event_arg, status,
1156 commands, gfp)))
1157 goto bail;
1159 switch (status) {
1160 case SCTP_DISPOSITION_DISCARD:
1161 pr_debug("%s: ignored sctp protocol event - state:%d, "
1162 "event_type:%d, event_id:%d\n", __func__, state,
1163 event_type, subtype.chunk);
1164 break;
1166 case SCTP_DISPOSITION_NOMEM:
1167 /* We ran out of memory, so we need to discard this
1168 * packet.
1170 /* BUG--we should now recover some memory, probably by
1171 * reneging...
1173 error = -ENOMEM;
1174 break;
1176 case SCTP_DISPOSITION_DELETE_TCB:
1177 /* This should now be a command. */
1178 break;
1180 case SCTP_DISPOSITION_CONSUME:
1181 case SCTP_DISPOSITION_ABORT:
1183 * We should no longer have much work to do here as the
1184 * real work has been done as explicit commands above.
1186 break;
1188 case SCTP_DISPOSITION_VIOLATION:
1189 net_err_ratelimited("protocol violation state %d chunkid %d\n",
1190 state, subtype.chunk);
1191 break;
1193 case SCTP_DISPOSITION_NOT_IMPL:
1194 pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
1195 state, event_type, subtype.chunk);
1196 break;
1198 case SCTP_DISPOSITION_BUG:
1199 pr_err("bug in state %d, event_type %d, event_id %d\n",
1200 state, event_type, subtype.chunk);
1201 BUG();
1202 break;
1204 default:
1205 pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
1206 status, state, event_type, subtype.chunk);
1207 BUG();
1208 break;
1211 bail:
1212 return error;
1215 /********************************************************************
1216 * 2nd Level Abstractions
1217 ********************************************************************/
1219 /* This is the side-effect interpreter. */
1220 static int sctp_cmd_interpreter(sctp_event_t event_type,
1221 sctp_subtype_t subtype,
1222 sctp_state_t state,
1223 struct sctp_endpoint *ep,
1224 struct sctp_association *asoc,
1225 void *event_arg,
1226 sctp_disposition_t status,
1227 sctp_cmd_seq_t *commands,
1228 gfp_t gfp)
1230 int error = 0;
1231 int force;
1232 sctp_cmd_t *cmd;
1233 struct sctp_chunk *new_obj;
1234 struct sctp_chunk *chunk = NULL;
1235 struct sctp_packet *packet;
1236 struct timer_list *timer;
1237 unsigned long timeout;
1238 struct sctp_transport *t;
1239 struct sctp_sackhdr sackh;
1240 int local_cork = 0;
1242 if (SCTP_EVENT_T_TIMEOUT != event_type)
1243 chunk = event_arg;
1245 /* Note: This whole file is a huge candidate for rework.
1246 * For example, each command could either have its own handler, so
1247 * the loop would look like:
1248 * while (cmds)
1249 * cmd->handle(x, y, z)
1250 * --jgrimm
1252 while (NULL != (cmd = sctp_next_cmd(commands))) {
1253 switch (cmd->verb) {
1254 case SCTP_CMD_NOP:
1255 /* Do nothing. */
1256 break;
1258 case SCTP_CMD_NEW_ASOC:
1259 /* Register a new association. */
1260 if (local_cork) {
1261 sctp_outq_uncork(&asoc->outqueue);
1262 local_cork = 0;
1265 /* Register with the endpoint. */
1266 asoc = cmd->obj.asoc;
1267 BUG_ON(asoc->peer.primary_path == NULL);
1268 sctp_endpoint_add_asoc(ep, asoc);
1269 sctp_hash_established(asoc);
1270 break;
1272 case SCTP_CMD_UPDATE_ASSOC:
1273 sctp_assoc_update(asoc, cmd->obj.asoc);
1274 break;
1276 case SCTP_CMD_PURGE_OUTQUEUE:
1277 sctp_outq_teardown(&asoc->outqueue);
1278 break;
1280 case SCTP_CMD_DELETE_TCB:
1281 if (local_cork) {
1282 sctp_outq_uncork(&asoc->outqueue);
1283 local_cork = 0;
1285 /* Delete the current association. */
1286 sctp_cmd_delete_tcb(commands, asoc);
1287 asoc = NULL;
1288 break;
1290 case SCTP_CMD_NEW_STATE:
1291 /* Enter a new state. */
1292 sctp_cmd_new_state(commands, asoc, cmd->obj.state);
1293 break;
1295 case SCTP_CMD_REPORT_TSN:
1296 /* Record the arrival of a TSN. */
1297 error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
1298 cmd->obj.u32, NULL);
1299 break;
1301 case SCTP_CMD_REPORT_FWDTSN:
1302 /* Move the Cumulattive TSN Ack ahead. */
1303 sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
1305 /* purge the fragmentation queue */
1306 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
1308 /* Abort any in progress partial delivery. */
1309 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
1310 break;
1312 case SCTP_CMD_PROCESS_FWDTSN:
1313 sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk);
1314 break;
1316 case SCTP_CMD_GEN_SACK:
1317 /* Generate a Selective ACK.
1318 * The argument tells us whether to just count
1319 * the packet and MAYBE generate a SACK, or
1320 * force a SACK out.
1322 force = cmd->obj.i32;
1323 error = sctp_gen_sack(asoc, force, commands);
1324 break;
1326 case SCTP_CMD_PROCESS_SACK:
1327 /* Process an inbound SACK. */
1328 error = sctp_cmd_process_sack(commands, asoc,
1329 cmd->obj.chunk);
1330 break;
1332 case SCTP_CMD_GEN_INIT_ACK:
1333 /* Generate an INIT ACK chunk. */
1334 new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
1336 if (!new_obj)
1337 goto nomem;
1339 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1340 SCTP_CHUNK(new_obj));
1341 break;
1343 case SCTP_CMD_PEER_INIT:
1344 /* Process a unified INIT from the peer.
1345 * Note: Only used during INIT-ACK processing. If
1346 * there is an error just return to the outter
1347 * layer which will bail.
1349 error = sctp_cmd_process_init(commands, asoc, chunk,
1350 cmd->obj.init, gfp);
1351 break;
1353 case SCTP_CMD_GEN_COOKIE_ECHO:
1354 /* Generate a COOKIE ECHO chunk. */
1355 new_obj = sctp_make_cookie_echo(asoc, chunk);
1356 if (!new_obj) {
1357 if (cmd->obj.chunk)
1358 sctp_chunk_free(cmd->obj.chunk);
1359 goto nomem;
1361 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1362 SCTP_CHUNK(new_obj));
1364 /* If there is an ERROR chunk to be sent along with
1365 * the COOKIE_ECHO, send it, too.
1367 if (cmd->obj.chunk)
1368 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1369 SCTP_CHUNK(cmd->obj.chunk));
1371 if (new_obj->transport) {
1372 new_obj->transport->init_sent_count++;
1373 asoc->init_last_sent_to = new_obj->transport;
1376 /* FIXME - Eventually come up with a cleaner way to
1377 * enabling COOKIE-ECHO + DATA bundling during
1378 * multihoming stale cookie scenarios, the following
1379 * command plays with asoc->peer.retran_path to
1380 * avoid the problem of sending the COOKIE-ECHO and
1381 * DATA in different paths, which could result
1382 * in the association being ABORTed if the DATA chunk
1383 * is processed first by the server. Checking the
1384 * init error counter simply causes this command
1385 * to be executed only during failed attempts of
1386 * association establishment.
1388 if ((asoc->peer.retran_path !=
1389 asoc->peer.primary_path) &&
1390 (asoc->init_err_counter > 0)) {
1391 sctp_add_cmd_sf(commands,
1392 SCTP_CMD_FORCE_PRIM_RETRAN,
1393 SCTP_NULL());
1396 break;
1398 case SCTP_CMD_GEN_SHUTDOWN:
1399 /* Generate SHUTDOWN when in SHUTDOWN_SENT state.
1400 * Reset error counts.
1402 asoc->overall_error_count = 0;
1404 /* Generate a SHUTDOWN chunk. */
1405 new_obj = sctp_make_shutdown(asoc, chunk);
1406 if (!new_obj)
1407 goto nomem;
1408 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1409 SCTP_CHUNK(new_obj));
1410 break;
1412 case SCTP_CMD_CHUNK_ULP:
1413 /* Send a chunk to the sockets layer. */
1414 pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
1415 __func__, cmd->obj.chunk, &asoc->ulpq);
1417 sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk,
1418 GFP_ATOMIC);
1419 break;
1421 case SCTP_CMD_EVENT_ULP:
1422 /* Send a notification to the sockets layer. */
1423 pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
1424 __func__, cmd->obj.ulpevent, &asoc->ulpq);
1426 sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent);
1427 break;
1429 case SCTP_CMD_REPLY:
1430 /* If an caller has not already corked, do cork. */
1431 if (!asoc->outqueue.cork) {
1432 sctp_outq_cork(&asoc->outqueue);
1433 local_cork = 1;
1435 /* Send a chunk to our peer. */
1436 error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk);
1437 break;
1439 case SCTP_CMD_SEND_PKT:
1440 /* Send a full packet to our peer. */
1441 packet = cmd->obj.packet;
1442 sctp_packet_transmit(packet);
1443 sctp_ootb_pkt_free(packet);
1444 break;
1446 case SCTP_CMD_T1_RETRAN:
1447 /* Mark a transport for retransmission. */
1448 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1449 SCTP_RTXR_T1_RTX);
1450 break;
1452 case SCTP_CMD_RETRAN:
1453 /* Mark a transport for retransmission. */
1454 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1455 SCTP_RTXR_T3_RTX);
1456 break;
1458 case SCTP_CMD_ECN_CE:
1459 /* Do delayed CE processing. */
1460 sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
1461 break;
1463 case SCTP_CMD_ECN_ECNE:
1464 /* Do delayed ECNE processing. */
1465 new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
1466 chunk);
1467 if (new_obj)
1468 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1469 SCTP_CHUNK(new_obj));
1470 break;
1472 case SCTP_CMD_ECN_CWR:
1473 /* Do delayed CWR processing. */
1474 sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
1475 break;
1477 case SCTP_CMD_SETUP_T2:
1478 sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
1479 break;
1481 case SCTP_CMD_TIMER_START_ONCE:
1482 timer = &asoc->timers[cmd->obj.to];
1484 if (timer_pending(timer))
1485 break;
1486 /* fall through */
1488 case SCTP_CMD_TIMER_START:
1489 timer = &asoc->timers[cmd->obj.to];
1490 timeout = asoc->timeouts[cmd->obj.to];
1491 BUG_ON(!timeout);
1493 timer->expires = jiffies + timeout;
1494 sctp_association_hold(asoc);
1495 add_timer(timer);
1496 break;
1498 case SCTP_CMD_TIMER_RESTART:
1499 timer = &asoc->timers[cmd->obj.to];
1500 timeout = asoc->timeouts[cmd->obj.to];
1501 if (!mod_timer(timer, jiffies + timeout))
1502 sctp_association_hold(asoc);
1503 break;
1505 case SCTP_CMD_TIMER_STOP:
1506 timer = &asoc->timers[cmd->obj.to];
1507 if (del_timer(timer))
1508 sctp_association_put(asoc);
1509 break;
1511 case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
1512 chunk = cmd->obj.chunk;
1513 t = sctp_assoc_choose_alter_transport(asoc,
1514 asoc->init_last_sent_to);
1515 asoc->init_last_sent_to = t;
1516 chunk->transport = t;
1517 t->init_sent_count++;
1518 /* Set the new transport as primary */
1519 sctp_assoc_set_primary(asoc, t);
1520 break;
1522 case SCTP_CMD_INIT_RESTART:
1523 /* Do the needed accounting and updates
1524 * associated with restarting an initialization
1525 * timer. Only multiply the timeout by two if
1526 * all transports have been tried at the current
1527 * timeout.
1529 sctp_cmd_t1_timer_update(asoc,
1530 SCTP_EVENT_TIMEOUT_T1_INIT,
1531 "INIT");
1533 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
1534 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
1535 break;
1537 case SCTP_CMD_COOKIEECHO_RESTART:
1538 /* Do the needed accounting and updates
1539 * associated with restarting an initialization
1540 * timer. Only multiply the timeout by two if
1541 * all transports have been tried at the current
1542 * timeout.
1544 sctp_cmd_t1_timer_update(asoc,
1545 SCTP_EVENT_TIMEOUT_T1_COOKIE,
1546 "COOKIE");
1548 /* If we've sent any data bundled with
1549 * COOKIE-ECHO we need to resend.
1551 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1552 transports) {
1553 sctp_retransmit_mark(&asoc->outqueue, t,
1554 SCTP_RTXR_T1_RTX);
1557 sctp_add_cmd_sf(commands,
1558 SCTP_CMD_TIMER_RESTART,
1559 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
1560 break;
1562 case SCTP_CMD_INIT_FAILED:
1563 sctp_cmd_init_failed(commands, asoc, cmd->obj.err);
1564 break;
1566 case SCTP_CMD_ASSOC_FAILED:
1567 sctp_cmd_assoc_failed(commands, asoc, event_type,
1568 subtype, chunk, cmd->obj.err);
1569 break;
1571 case SCTP_CMD_INIT_COUNTER_INC:
1572 asoc->init_err_counter++;
1573 break;
1575 case SCTP_CMD_INIT_COUNTER_RESET:
1576 asoc->init_err_counter = 0;
1577 asoc->init_cycle = 0;
1578 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1579 transports) {
1580 t->init_sent_count = 0;
1582 break;
1584 case SCTP_CMD_REPORT_DUP:
1585 sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
1586 cmd->obj.u32);
1587 break;
1589 case SCTP_CMD_REPORT_BAD_TAG:
1590 pr_debug("%s: vtag mismatch!\n", __func__);
1591 break;
1593 case SCTP_CMD_STRIKE:
1594 /* Mark one strike against a transport. */
1595 sctp_do_8_2_transport_strike(commands, asoc,
1596 cmd->obj.transport, 0);
1597 break;
1599 case SCTP_CMD_TRANSPORT_IDLE:
1600 t = cmd->obj.transport;
1601 sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
1602 break;
1604 case SCTP_CMD_TRANSPORT_HB_SENT:
1605 t = cmd->obj.transport;
1606 sctp_do_8_2_transport_strike(commands, asoc,
1607 t, 1);
1608 t->hb_sent = 1;
1609 break;
1611 case SCTP_CMD_TRANSPORT_ON:
1612 t = cmd->obj.transport;
1613 sctp_cmd_transport_on(commands, asoc, t, chunk);
1614 break;
1616 case SCTP_CMD_HB_TIMERS_START:
1617 sctp_cmd_hb_timers_start(commands, asoc);
1618 break;
1620 case SCTP_CMD_HB_TIMER_UPDATE:
1621 t = cmd->obj.transport;
1622 sctp_cmd_hb_timer_update(commands, t);
1623 break;
1625 case SCTP_CMD_HB_TIMERS_STOP:
1626 sctp_cmd_hb_timers_stop(commands, asoc);
1627 break;
1629 case SCTP_CMD_REPORT_ERROR:
1630 error = cmd->obj.error;
1631 break;
1633 case SCTP_CMD_PROCESS_CTSN:
1634 /* Dummy up a SACK for processing. */
1635 sackh.cum_tsn_ack = cmd->obj.be32;
1636 sackh.a_rwnd = asoc->peer.rwnd +
1637 asoc->outqueue.outstanding_bytes;
1638 sackh.num_gap_ack_blocks = 0;
1639 sackh.num_dup_tsns = 0;
1640 chunk->subh.sack_hdr = &sackh;
1641 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
1642 SCTP_CHUNK(chunk));
1643 break;
1645 case SCTP_CMD_DISCARD_PACKET:
1646 /* We need to discard the whole packet.
1647 * Uncork the queue since there might be
1648 * responses pending
1650 chunk->pdiscard = 1;
1651 if (asoc) {
1652 sctp_outq_uncork(&asoc->outqueue);
1653 local_cork = 0;
1655 break;
1657 case SCTP_CMD_RTO_PENDING:
1658 t = cmd->obj.transport;
1659 t->rto_pending = 1;
1660 break;
1662 case SCTP_CMD_PART_DELIVER:
1663 sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC);
1664 break;
1666 case SCTP_CMD_RENEGE:
1667 sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk,
1668 GFP_ATOMIC);
1669 break;
1671 case SCTP_CMD_SETUP_T4:
1672 sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
1673 break;
1675 case SCTP_CMD_PROCESS_OPERR:
1676 sctp_cmd_process_operr(commands, asoc, chunk);
1677 break;
1678 case SCTP_CMD_CLEAR_INIT_TAG:
1679 asoc->peer.i.init_tag = 0;
1680 break;
1681 case SCTP_CMD_DEL_NON_PRIMARY:
1682 sctp_cmd_del_non_primary(asoc);
1683 break;
1684 case SCTP_CMD_T3_RTX_TIMERS_STOP:
1685 sctp_cmd_t3_rtx_timers_stop(commands, asoc);
1686 break;
1687 case SCTP_CMD_FORCE_PRIM_RETRAN:
1688 t = asoc->peer.retran_path;
1689 asoc->peer.retran_path = asoc->peer.primary_path;
1690 error = sctp_outq_uncork(&asoc->outqueue);
1691 local_cork = 0;
1692 asoc->peer.retran_path = t;
1693 break;
1694 case SCTP_CMD_SET_SK_ERR:
1695 sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1696 break;
1697 case SCTP_CMD_ASSOC_CHANGE:
1698 sctp_cmd_assoc_change(commands, asoc,
1699 cmd->obj.u8);
1700 break;
1701 case SCTP_CMD_ADAPTATION_IND:
1702 sctp_cmd_adaptation_ind(commands, asoc);
1703 break;
1705 case SCTP_CMD_ASSOC_SHKEY:
1706 error = sctp_auth_asoc_init_active_key(asoc,
1707 GFP_ATOMIC);
1708 break;
1709 case SCTP_CMD_UPDATE_INITTAG:
1710 asoc->peer.i.init_tag = cmd->obj.u32;
1711 break;
1712 case SCTP_CMD_SEND_MSG:
1713 if (!asoc->outqueue.cork) {
1714 sctp_outq_cork(&asoc->outqueue);
1715 local_cork = 1;
1717 error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
1718 break;
1719 case SCTP_CMD_SEND_NEXT_ASCONF:
1720 sctp_cmd_send_asconf(asoc);
1721 break;
1722 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1723 sctp_asconf_queue_teardown(asoc);
1724 break;
1726 case SCTP_CMD_SET_ASOC:
1727 asoc = cmd->obj.asoc;
1728 break;
1730 default:
1731 pr_warn("Impossible command: %u\n",
1732 cmd->verb);
1733 break;
1736 if (error)
1737 break;
1740 out:
1741 /* If this is in response to a received chunk, wait until
1742 * we are done with the packet to open the queue so that we don't
1743 * send multiple packets in response to a single request.
1745 if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
1746 if (chunk->end_of_packet || chunk->singleton)
1747 error = sctp_outq_uncork(&asoc->outqueue);
1748 } else if (local_cork)
1749 error = sctp_outq_uncork(&asoc->outqueue);
1750 return error;
1751 nomem:
1752 error = -ENOMEM;
1753 goto out;