1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
8 * This file is part of the SCTP kernel implementation
10 * This module provides the abstraction for an SCTP association.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, see
26 * <http://www.gnu.org/licenses/>.
28 * Please send any bug reports or fixes you make to the
30 * lksctp developers <linux-sctp@vger.kernel.org>
32 * Written or modified by:
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Karl Knutson <karl@athena.chicago.il.us>
35 * Jon Grimm <jgrimm@us.ibm.com>
36 * Xingang Guo <xingang.guo@intel.com>
37 * Hui Huang <hui.huang@nokia.com>
38 * Sridhar Samudrala <sri@us.ibm.com>
39 * Daisy Chang <daisyc@us.ibm.com>
40 * Ryan Layer <rmlayer@us.ibm.com>
41 * Kevin Gao <kevin.gao@intel.com>
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/types.h>
47 #include <linux/fcntl.h>
48 #include <linux/poll.h>
49 #include <linux/init.h>
51 #include <linux/slab.h>
54 #include <net/sctp/sctp.h>
55 #include <net/sctp/sm.h>
57 /* Forward declarations for internal functions. */
58 static void sctp_select_active_and_retran_path(struct sctp_association
*asoc
);
59 static void sctp_assoc_bh_rcv(struct work_struct
*work
);
60 static void sctp_assoc_free_asconf_acks(struct sctp_association
*asoc
);
61 static void sctp_assoc_free_asconf_queue(struct sctp_association
*asoc
);
63 /* 1st Level Abstractions. */
65 /* Initialize a new association from provided memory. */
66 static struct sctp_association
*sctp_association_init(
67 struct sctp_association
*asoc
,
68 const struct sctp_endpoint
*ep
,
69 const struct sock
*sk
,
70 enum sctp_scope scope
, gfp_t gfp
)
72 struct net
*net
= sock_net(sk
);
74 struct sctp_paramhdr
*p
;
77 /* Retrieve the SCTP per socket area. */
78 sp
= sctp_sk((struct sock
*)sk
);
80 /* Discarding const is appropriate here. */
81 asoc
->ep
= (struct sctp_endpoint
*)ep
;
82 asoc
->base
.sk
= (struct sock
*)sk
;
84 sctp_endpoint_hold(asoc
->ep
);
85 sock_hold(asoc
->base
.sk
);
87 /* Initialize the common base substructure. */
88 asoc
->base
.type
= SCTP_EP_TYPE_ASSOCIATION
;
90 /* Initialize the object handling fields. */
91 refcount_set(&asoc
->base
.refcnt
, 1);
93 /* Initialize the bind addr area. */
94 sctp_bind_addr_init(&asoc
->base
.bind_addr
, ep
->base
.bind_addr
.port
);
96 asoc
->state
= SCTP_STATE_CLOSED
;
97 asoc
->cookie_life
= ms_to_ktime(sp
->assocparams
.sasoc_cookie_life
);
98 asoc
->user_frag
= sp
->user_frag
;
100 /* Set the association max_retrans and RTO values from the
103 asoc
->max_retrans
= sp
->assocparams
.sasoc_asocmaxrxt
;
104 asoc
->pf_retrans
= net
->sctp
.pf_retrans
;
106 asoc
->rto_initial
= msecs_to_jiffies(sp
->rtoinfo
.srto_initial
);
107 asoc
->rto_max
= msecs_to_jiffies(sp
->rtoinfo
.srto_max
);
108 asoc
->rto_min
= msecs_to_jiffies(sp
->rtoinfo
.srto_min
);
110 /* Initialize the association's heartbeat interval based on the
111 * sock configured value.
113 asoc
->hbinterval
= msecs_to_jiffies(sp
->hbinterval
);
115 /* Initialize path max retrans value. */
116 asoc
->pathmaxrxt
= sp
->pathmaxrxt
;
118 asoc
->flowlabel
= sp
->flowlabel
;
119 asoc
->dscp
= sp
->dscp
;
121 /* Set association default SACK delay */
122 asoc
->sackdelay
= msecs_to_jiffies(sp
->sackdelay
);
123 asoc
->sackfreq
= sp
->sackfreq
;
125 /* Set the association default flags controlling
126 * Heartbeat, SACK delay, and Path MTU Discovery.
128 asoc
->param_flags
= sp
->param_flags
;
130 /* Initialize the maximum number of new data packets that can be sent
133 asoc
->max_burst
= sp
->max_burst
;
135 /* initialize association timers */
136 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T1_COOKIE
] = asoc
->rto_initial
;
137 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T1_INIT
] = asoc
->rto_initial
;
138 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
] = asoc
->rto_initial
;
140 /* sctpimpguide Section 2.12.2
141 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
142 * recommended value of 5 times 'RTO.Max'.
144 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
]
147 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_SACK
] = asoc
->sackdelay
;
148 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_AUTOCLOSE
] = sp
->autoclose
* HZ
;
150 /* Initializes the timers */
151 for (i
= SCTP_EVENT_TIMEOUT_NONE
; i
< SCTP_NUM_TIMEOUT_TYPES
; ++i
)
152 timer_setup(&asoc
->timers
[i
], sctp_timer_events
[i
], 0);
154 /* Pull default initialization values from the sock options.
155 * Note: This assumes that the values have already been
156 * validated in the sock.
158 asoc
->c
.sinit_max_instreams
= sp
->initmsg
.sinit_max_instreams
;
159 asoc
->c
.sinit_num_ostreams
= sp
->initmsg
.sinit_num_ostreams
;
160 asoc
->max_init_attempts
= sp
->initmsg
.sinit_max_attempts
;
162 asoc
->max_init_timeo
=
163 msecs_to_jiffies(sp
->initmsg
.sinit_max_init_timeo
);
165 /* Set the local window size for receive.
166 * This is also the rcvbuf space per association.
167 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
168 * 1500 bytes in one SCTP packet.
170 if ((sk
->sk_rcvbuf
/2) < SCTP_DEFAULT_MINWINDOW
)
171 asoc
->rwnd
= SCTP_DEFAULT_MINWINDOW
;
173 asoc
->rwnd
= sk
->sk_rcvbuf
/2;
175 asoc
->a_rwnd
= asoc
->rwnd
;
177 /* Use my own max window until I learn something better. */
178 asoc
->peer
.rwnd
= SCTP_DEFAULT_MAXWINDOW
;
180 /* Initialize the receive memory counter */
181 atomic_set(&asoc
->rmem_alloc
, 0);
183 init_waitqueue_head(&asoc
->wait
);
185 asoc
->c
.my_vtag
= sctp_generate_tag(ep
);
186 asoc
->c
.my_port
= ep
->base
.bind_addr
.port
;
188 asoc
->c
.initial_tsn
= sctp_generate_tsn(ep
);
190 asoc
->next_tsn
= asoc
->c
.initial_tsn
;
192 asoc
->ctsn_ack_point
= asoc
->next_tsn
- 1;
193 asoc
->adv_peer_ack_point
= asoc
->ctsn_ack_point
;
194 asoc
->highest_sacked
= asoc
->ctsn_ack_point
;
195 asoc
->last_cwr_tsn
= asoc
->ctsn_ack_point
;
197 /* ADDIP Section 4.1 Asconf Chunk Procedures
199 * When an endpoint has an ASCONF signaled change to be sent to the
200 * remote endpoint it should do the following:
202 * A2) a serial number should be assigned to the chunk. The serial
203 * number SHOULD be a monotonically increasing number. The serial
204 * numbers SHOULD be initialized at the start of the
205 * association to the same value as the initial TSN.
207 asoc
->addip_serial
= asoc
->c
.initial_tsn
;
208 asoc
->strreset_outseq
= asoc
->c
.initial_tsn
;
210 INIT_LIST_HEAD(&asoc
->addip_chunk_list
);
211 INIT_LIST_HEAD(&asoc
->asconf_ack_list
);
213 /* Make an empty list of remote transport addresses. */
214 INIT_LIST_HEAD(&asoc
->peer
.transport_addr_list
);
216 /* RFC 2960 5.1 Normal Establishment of an Association
218 * After the reception of the first data chunk in an
219 * association the endpoint must immediately respond with a
220 * sack to acknowledge the data chunk. Subsequent
221 * acknowledgements should be done as described in Section
224 * [We implement this by telling a new association that it
225 * already received one packet.]
227 asoc
->peer
.sack_needed
= 1;
228 asoc
->peer
.sack_generation
= 1;
230 /* Assume that the peer will tell us if he recognizes ASCONF
231 * as part of INIT exchange.
232 * The sctp_addip_noauth option is there for backward compatibility
233 * and will revert old behavior.
235 if (net
->sctp
.addip_noauth
)
236 asoc
->peer
.asconf_capable
= 1;
238 /* Create an input queue. */
239 sctp_inq_init(&asoc
->base
.inqueue
);
240 sctp_inq_set_th_handler(&asoc
->base
.inqueue
, sctp_assoc_bh_rcv
);
242 /* Create an output queue. */
243 sctp_outq_init(asoc
, &asoc
->outqueue
);
245 if (!sctp_ulpq_init(&asoc
->ulpq
, asoc
))
248 if (sctp_stream_init(&asoc
->stream
, asoc
->c
.sinit_num_ostreams
,
252 /* Initialize default path MTU. */
253 asoc
->pathmtu
= sp
->pathmtu
;
254 sctp_assoc_update_frag_point(asoc
);
256 /* Assume that peer would support both address types unless we are
259 asoc
->peer
.ipv4_address
= 1;
260 if (asoc
->base
.sk
->sk_family
== PF_INET6
)
261 asoc
->peer
.ipv6_address
= 1;
262 INIT_LIST_HEAD(&asoc
->asocs
);
264 asoc
->default_stream
= sp
->default_stream
;
265 asoc
->default_ppid
= sp
->default_ppid
;
266 asoc
->default_flags
= sp
->default_flags
;
267 asoc
->default_context
= sp
->default_context
;
268 asoc
->default_timetolive
= sp
->default_timetolive
;
269 asoc
->default_rcv_context
= sp
->default_rcv_context
;
271 /* AUTH related initializations */
272 INIT_LIST_HEAD(&asoc
->endpoint_shared_keys
);
273 if (sctp_auth_asoc_copy_shkeys(ep
, asoc
, gfp
))
276 asoc
->active_key_id
= ep
->active_key_id
;
277 asoc
->prsctp_enable
= ep
->prsctp_enable
;
278 asoc
->reconf_enable
= ep
->reconf_enable
;
279 asoc
->strreset_enable
= ep
->strreset_enable
;
281 /* Save the hmacs and chunks list into this association */
282 if (ep
->auth_hmacs_list
)
283 memcpy(asoc
->c
.auth_hmacs
, ep
->auth_hmacs_list
,
284 ntohs(ep
->auth_hmacs_list
->param_hdr
.length
));
285 if (ep
->auth_chunk_list
)
286 memcpy(asoc
->c
.auth_chunks
, ep
->auth_chunk_list
,
287 ntohs(ep
->auth_chunk_list
->param_hdr
.length
));
289 /* Get the AUTH random number for this association */
290 p
= (struct sctp_paramhdr
*)asoc
->c
.auth_random
;
291 p
->type
= SCTP_PARAM_RANDOM
;
292 p
->length
= htons(sizeof(*p
) + SCTP_AUTH_RANDOM_LENGTH
);
293 get_random_bytes(p
+1, SCTP_AUTH_RANDOM_LENGTH
);
298 sctp_stream_free(&asoc
->stream
);
300 sock_put(asoc
->base
.sk
);
301 sctp_endpoint_put(asoc
->ep
);
305 /* Allocate and initialize a new association */
306 struct sctp_association
*sctp_association_new(const struct sctp_endpoint
*ep
,
307 const struct sock
*sk
,
308 enum sctp_scope scope
, gfp_t gfp
)
310 struct sctp_association
*asoc
;
312 asoc
= kzalloc(sizeof(*asoc
), gfp
);
316 if (!sctp_association_init(asoc
, ep
, sk
, scope
, gfp
))
319 SCTP_DBG_OBJCNT_INC(assoc
);
321 pr_debug("Created asoc %p\n", asoc
);
331 /* Free this association if possible. There may still be users, so
332 * the actual deallocation may be delayed.
334 void sctp_association_free(struct sctp_association
*asoc
)
336 struct sock
*sk
= asoc
->base
.sk
;
337 struct sctp_transport
*transport
;
338 struct list_head
*pos
, *temp
;
341 /* Only real associations count against the endpoint, so
342 * don't bother for if this is a temporary association.
344 if (!list_empty(&asoc
->asocs
)) {
345 list_del(&asoc
->asocs
);
347 /* Decrement the backlog value for a TCP-style listening
350 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))
351 sk
->sk_ack_backlog
--;
354 /* Mark as dead, so other users can know this structure is
357 asoc
->base
.dead
= true;
359 /* Dispose of any data lying around in the outqueue. */
360 sctp_outq_free(&asoc
->outqueue
);
362 /* Dispose of any pending messages for the upper layer. */
363 sctp_ulpq_free(&asoc
->ulpq
);
365 /* Dispose of any pending chunks on the inqueue. */
366 sctp_inq_free(&asoc
->base
.inqueue
);
368 sctp_tsnmap_free(&asoc
->peer
.tsn_map
);
370 /* Free stream information. */
371 sctp_stream_free(&asoc
->stream
);
373 if (asoc
->strreset_chunk
)
374 sctp_chunk_free(asoc
->strreset_chunk
);
376 /* Clean up the bound address list. */
377 sctp_bind_addr_free(&asoc
->base
.bind_addr
);
379 /* Do we need to go through all of our timers and
380 * delete them? To be safe we will try to delete all, but we
381 * should be able to go through and make a guess based
384 for (i
= SCTP_EVENT_TIMEOUT_NONE
; i
< SCTP_NUM_TIMEOUT_TYPES
; ++i
) {
385 if (del_timer(&asoc
->timers
[i
]))
386 sctp_association_put(asoc
);
389 /* Free peer's cached cookie. */
390 kfree(asoc
->peer
.cookie
);
391 kfree(asoc
->peer
.peer_random
);
392 kfree(asoc
->peer
.peer_chunks
);
393 kfree(asoc
->peer
.peer_hmacs
);
395 /* Release the transport structures. */
396 list_for_each_safe(pos
, temp
, &asoc
->peer
.transport_addr_list
) {
397 transport
= list_entry(pos
, struct sctp_transport
, transports
);
399 sctp_unhash_transport(transport
);
400 sctp_transport_free(transport
);
403 asoc
->peer
.transport_count
= 0;
405 sctp_asconf_queue_teardown(asoc
);
407 /* Free pending address space being deleted */
408 kfree(asoc
->asconf_addr_del_pending
);
410 /* AUTH - Free the endpoint shared keys */
411 sctp_auth_destroy_keys(&asoc
->endpoint_shared_keys
);
413 /* AUTH - Free the association shared key */
414 sctp_auth_key_put(asoc
->asoc_shared_key
);
416 sctp_association_put(asoc
);
419 /* Cleanup and free up an association. */
420 static void sctp_association_destroy(struct sctp_association
*asoc
)
422 if (unlikely(!asoc
->base
.dead
)) {
423 WARN(1, "Attempt to destroy undead association %p!\n", asoc
);
427 sctp_endpoint_put(asoc
->ep
);
428 sock_put(asoc
->base
.sk
);
430 if (asoc
->assoc_id
!= 0) {
431 spin_lock_bh(&sctp_assocs_id_lock
);
432 idr_remove(&sctp_assocs_id
, asoc
->assoc_id
);
433 spin_unlock_bh(&sctp_assocs_id_lock
);
436 WARN_ON(atomic_read(&asoc
->rmem_alloc
));
438 kfree_rcu(asoc
, rcu
);
439 SCTP_DBG_OBJCNT_DEC(assoc
);
442 /* Change the primary destination address for the peer. */
443 void sctp_assoc_set_primary(struct sctp_association
*asoc
,
444 struct sctp_transport
*transport
)
448 /* it's a changeover only if we already have a primary path
449 * that we are changing
451 if (asoc
->peer
.primary_path
!= NULL
&&
452 asoc
->peer
.primary_path
!= transport
)
455 asoc
->peer
.primary_path
= transport
;
457 /* Set a default msg_name for events. */
458 memcpy(&asoc
->peer
.primary_addr
, &transport
->ipaddr
,
459 sizeof(union sctp_addr
));
461 /* If the primary path is changing, assume that the
462 * user wants to use this new path.
464 if ((transport
->state
== SCTP_ACTIVE
) ||
465 (transport
->state
== SCTP_UNKNOWN
))
466 asoc
->peer
.active_path
= transport
;
469 * SFR-CACC algorithm:
470 * Upon the receipt of a request to change the primary
471 * destination address, on the data structure for the new
472 * primary destination, the sender MUST do the following:
474 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
475 * to this destination address earlier. The sender MUST set
476 * CYCLING_CHANGEOVER to indicate that this switch is a
477 * double switch to the same destination address.
479 * Really, only bother is we have data queued or outstanding on
482 if (!asoc
->outqueue
.outstanding_bytes
&& !asoc
->outqueue
.out_qlen
)
485 if (transport
->cacc
.changeover_active
)
486 transport
->cacc
.cycling_changeover
= changeover
;
488 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
489 * a changeover has occurred.
491 transport
->cacc
.changeover_active
= changeover
;
493 /* 3) The sender MUST store the next TSN to be sent in
494 * next_tsn_at_change.
496 transport
->cacc
.next_tsn_at_change
= asoc
->next_tsn
;
499 /* Remove a transport from an association. */
500 void sctp_assoc_rm_peer(struct sctp_association
*asoc
,
501 struct sctp_transport
*peer
)
503 struct sctp_transport
*transport
;
504 struct list_head
*pos
;
505 struct sctp_chunk
*ch
;
507 pr_debug("%s: association:%p addr:%pISpc\n",
508 __func__
, asoc
, &peer
->ipaddr
.sa
);
510 /* If we are to remove the current retran_path, update it
511 * to the next peer before removing this peer from the list.
513 if (asoc
->peer
.retran_path
== peer
)
514 sctp_assoc_update_retran_path(asoc
);
516 /* Remove this peer from the list. */
517 list_del_rcu(&peer
->transports
);
518 /* Remove this peer from the transport hashtable */
519 sctp_unhash_transport(peer
);
521 /* Get the first transport of asoc. */
522 pos
= asoc
->peer
.transport_addr_list
.next
;
523 transport
= list_entry(pos
, struct sctp_transport
, transports
);
525 /* Update any entries that match the peer to be deleted. */
526 if (asoc
->peer
.primary_path
== peer
)
527 sctp_assoc_set_primary(asoc
, transport
);
528 if (asoc
->peer
.active_path
== peer
)
529 asoc
->peer
.active_path
= transport
;
530 if (asoc
->peer
.retran_path
== peer
)
531 asoc
->peer
.retran_path
= transport
;
532 if (asoc
->peer
.last_data_from
== peer
)
533 asoc
->peer
.last_data_from
= transport
;
535 if (asoc
->strreset_chunk
&&
536 asoc
->strreset_chunk
->transport
== peer
) {
537 asoc
->strreset_chunk
->transport
= transport
;
538 sctp_transport_reset_reconf_timer(transport
);
541 /* If we remove the transport an INIT was last sent to, set it to
542 * NULL. Combined with the update of the retran path above, this
543 * will cause the next INIT to be sent to the next available
544 * transport, maintaining the cycle.
546 if (asoc
->init_last_sent_to
== peer
)
547 asoc
->init_last_sent_to
= NULL
;
549 /* If we remove the transport an SHUTDOWN was last sent to, set it
550 * to NULL. Combined with the update of the retran path above, this
551 * will cause the next SHUTDOWN to be sent to the next available
552 * transport, maintaining the cycle.
554 if (asoc
->shutdown_last_sent_to
== peer
)
555 asoc
->shutdown_last_sent_to
= NULL
;
557 /* If we remove the transport an ASCONF was last sent to, set it to
560 if (asoc
->addip_last_asconf
&&
561 asoc
->addip_last_asconf
->transport
== peer
)
562 asoc
->addip_last_asconf
->transport
= NULL
;
564 /* If we have something on the transmitted list, we have to
565 * save it off. The best place is the active path.
567 if (!list_empty(&peer
->transmitted
)) {
568 struct sctp_transport
*active
= asoc
->peer
.active_path
;
570 /* Reset the transport of each chunk on this list */
571 list_for_each_entry(ch
, &peer
->transmitted
,
573 ch
->transport
= NULL
;
574 ch
->rtt_in_progress
= 0;
577 list_splice_tail_init(&peer
->transmitted
,
578 &active
->transmitted
);
580 /* Start a T3 timer here in case it wasn't running so
581 * that these migrated packets have a chance to get
584 if (!timer_pending(&active
->T3_rtx_timer
))
585 if (!mod_timer(&active
->T3_rtx_timer
,
586 jiffies
+ active
->rto
))
587 sctp_transport_hold(active
);
590 list_for_each_entry(ch
, &asoc
->outqueue
.out_chunk_list
, list
)
591 if (ch
->transport
== peer
)
592 ch
->transport
= NULL
;
594 asoc
->peer
.transport_count
--;
596 sctp_transport_free(peer
);
599 /* Add a transport address to an association. */
600 struct sctp_transport
*sctp_assoc_add_peer(struct sctp_association
*asoc
,
601 const union sctp_addr
*addr
,
603 const int peer_state
)
605 struct net
*net
= sock_net(asoc
->base
.sk
);
606 struct sctp_transport
*peer
;
607 struct sctp_sock
*sp
;
610 sp
= sctp_sk(asoc
->base
.sk
);
612 /* AF_INET and AF_INET6 share common port field. */
613 port
= ntohs(addr
->v4
.sin_port
);
615 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__
,
616 asoc
, &addr
->sa
, peer_state
);
618 /* Set the port if it has not been set yet. */
619 if (0 == asoc
->peer
.port
)
620 asoc
->peer
.port
= port
;
622 /* Check to see if this is a duplicate. */
623 peer
= sctp_assoc_lookup_paddr(asoc
, addr
);
625 /* An UNKNOWN state is only set on transports added by
626 * user in sctp_connectx() call. Such transports should be
627 * considered CONFIRMED per RFC 4960, Section 5.4.
629 if (peer
->state
== SCTP_UNKNOWN
) {
630 peer
->state
= SCTP_ACTIVE
;
635 peer
= sctp_transport_new(net
, addr
, gfp
);
639 sctp_transport_set_owner(peer
, asoc
);
641 /* Initialize the peer's heartbeat interval based on the
642 * association configured value.
644 peer
->hbinterval
= asoc
->hbinterval
;
646 /* Set the path max_retrans. */
647 peer
->pathmaxrxt
= asoc
->pathmaxrxt
;
649 /* And the partial failure retrans threshold */
650 peer
->pf_retrans
= asoc
->pf_retrans
;
652 /* Initialize the peer's SACK delay timeout based on the
653 * association configured value.
655 peer
->sackdelay
= asoc
->sackdelay
;
656 peer
->sackfreq
= asoc
->sackfreq
;
658 if (addr
->sa
.sa_family
== AF_INET6
) {
659 __be32 info
= addr
->v6
.sin6_flowinfo
;
662 peer
->flowlabel
= ntohl(info
& IPV6_FLOWLABEL_MASK
);
663 peer
->flowlabel
|= SCTP_FLOWLABEL_SET_MASK
;
665 peer
->flowlabel
= asoc
->flowlabel
;
668 peer
->dscp
= asoc
->dscp
;
670 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
671 * based on association setting.
673 peer
->param_flags
= asoc
->param_flags
;
675 /* Initialize the pmtu of the transport. */
676 sctp_transport_route(peer
, NULL
, sp
);
678 /* If this is the first transport addr on this association,
679 * initialize the association PMTU to the peer's PMTU.
680 * If not and the current association PMTU is higher than the new
681 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
683 sctp_assoc_set_pmtu(asoc
, asoc
->pathmtu
?
684 min_t(int, peer
->pathmtu
, asoc
->pathmtu
) :
687 peer
->pmtu_pending
= 0;
689 /* The asoc->peer.port might not be meaningful yet, but
690 * initialize the packet structure anyway.
692 sctp_packet_init(&peer
->packet
, peer
, asoc
->base
.bind_addr
.port
,
697 * o The initial cwnd before DATA transmission or after a sufficiently
698 * long idle period MUST be set to
699 * min(4*MTU, max(2*MTU, 4380 bytes))
701 * o The initial value of ssthresh MAY be arbitrarily high
702 * (for example, implementations MAY use the size of the
703 * receiver advertised window).
705 peer
->cwnd
= min(4*asoc
->pathmtu
, max_t(__u32
, 2*asoc
->pathmtu
, 4380));
707 /* At this point, we may not have the receiver's advertised window,
708 * so initialize ssthresh to the default value and it will be set
709 * later when we process the INIT.
711 peer
->ssthresh
= SCTP_DEFAULT_MAXWINDOW
;
713 peer
->partial_bytes_acked
= 0;
714 peer
->flight_size
= 0;
715 peer
->burst_limited
= 0;
717 /* Set the transport's RTO.initial value */
718 peer
->rto
= asoc
->rto_initial
;
719 sctp_max_rto(asoc
, peer
);
721 /* Set the peer's active state. */
722 peer
->state
= peer_state
;
724 /* Add this peer into the transport hashtable */
725 if (sctp_hash_transport(peer
)) {
726 sctp_transport_free(peer
);
730 /* Attach the remote transport to our asoc. */
731 list_add_tail_rcu(&peer
->transports
, &asoc
->peer
.transport_addr_list
);
732 asoc
->peer
.transport_count
++;
734 /* If we do not yet have a primary path, set one. */
735 if (!asoc
->peer
.primary_path
) {
736 sctp_assoc_set_primary(asoc
, peer
);
737 asoc
->peer
.retran_path
= peer
;
740 if (asoc
->peer
.active_path
== asoc
->peer
.retran_path
&&
741 peer
->state
!= SCTP_UNCONFIRMED
) {
742 asoc
->peer
.retran_path
= peer
;
748 /* Delete a transport address from an association. */
749 void sctp_assoc_del_peer(struct sctp_association
*asoc
,
750 const union sctp_addr
*addr
)
752 struct list_head
*pos
;
753 struct list_head
*temp
;
754 struct sctp_transport
*transport
;
756 list_for_each_safe(pos
, temp
, &asoc
->peer
.transport_addr_list
) {
757 transport
= list_entry(pos
, struct sctp_transport
, transports
);
758 if (sctp_cmp_addr_exact(addr
, &transport
->ipaddr
)) {
759 /* Do book keeping for removing the peer and free it. */
760 sctp_assoc_rm_peer(asoc
, transport
);
766 /* Lookup a transport by address. */
767 struct sctp_transport
*sctp_assoc_lookup_paddr(
768 const struct sctp_association
*asoc
,
769 const union sctp_addr
*address
)
771 struct sctp_transport
*t
;
773 /* Cycle through all transports searching for a peer address. */
775 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
777 if (sctp_cmp_addr_exact(address
, &t
->ipaddr
))
784 /* Remove all transports except a give one */
785 void sctp_assoc_del_nonprimary_peers(struct sctp_association
*asoc
,
786 struct sctp_transport
*primary
)
788 struct sctp_transport
*temp
;
789 struct sctp_transport
*t
;
791 list_for_each_entry_safe(t
, temp
, &asoc
->peer
.transport_addr_list
,
793 /* if the current transport is not the primary one, delete it */
795 sctp_assoc_rm_peer(asoc
, t
);
799 /* Engage in transport control operations.
800 * Mark the transport up or down and send a notification to the user.
801 * Select and update the new active and retran paths.
803 void sctp_assoc_control_transport(struct sctp_association
*asoc
,
804 struct sctp_transport
*transport
,
805 enum sctp_transport_cmd command
,
806 sctp_sn_error_t error
)
808 struct sctp_ulpevent
*event
;
809 struct sockaddr_storage addr
;
811 bool ulp_notify
= true;
813 /* Record the transition on the transport. */
815 case SCTP_TRANSPORT_UP
:
816 /* If we are moving from UNCONFIRMED state due
817 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
818 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
820 if (SCTP_UNCONFIRMED
== transport
->state
&&
821 SCTP_HEARTBEAT_SUCCESS
== error
)
822 spc_state
= SCTP_ADDR_CONFIRMED
;
824 spc_state
= SCTP_ADDR_AVAILABLE
;
825 /* Don't inform ULP about transition from PF to
826 * active state and set cwnd to 1 MTU, see SCTP
827 * Quick failover draft section 5.1, point 5
829 if (transport
->state
== SCTP_PF
) {
831 transport
->cwnd
= asoc
->pathmtu
;
833 transport
->state
= SCTP_ACTIVE
;
836 case SCTP_TRANSPORT_DOWN
:
837 /* If the transport was never confirmed, do not transition it
838 * to inactive state. Also, release the cached route since
839 * there may be a better route next time.
841 if (transport
->state
!= SCTP_UNCONFIRMED
)
842 transport
->state
= SCTP_INACTIVE
;
844 sctp_transport_dst_release(transport
);
848 spc_state
= SCTP_ADDR_UNREACHABLE
;
851 case SCTP_TRANSPORT_PF
:
852 transport
->state
= SCTP_PF
;
860 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification
864 memset(&addr
, 0, sizeof(struct sockaddr_storage
));
865 memcpy(&addr
, &transport
->ipaddr
,
866 transport
->af_specific
->sockaddr_len
);
868 event
= sctp_ulpevent_make_peer_addr_change(asoc
, &addr
,
869 0, spc_state
, error
, GFP_ATOMIC
);
871 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
, event
);
874 /* Select new active and retran paths. */
875 sctp_select_active_and_retran_path(asoc
);
878 /* Hold a reference to an association. */
879 void sctp_association_hold(struct sctp_association
*asoc
)
881 refcount_inc(&asoc
->base
.refcnt
);
884 /* Release a reference to an association and cleanup
885 * if there are no more references.
887 void sctp_association_put(struct sctp_association
*asoc
)
889 if (refcount_dec_and_test(&asoc
->base
.refcnt
))
890 sctp_association_destroy(asoc
);
893 /* Allocate the next TSN, Transmission Sequence Number, for the given
896 __u32
sctp_association_get_next_tsn(struct sctp_association
*asoc
)
898 /* From Section 1.6 Serial Number Arithmetic:
899 * Transmission Sequence Numbers wrap around when they reach
900 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use
901 * after transmitting TSN = 2*32 - 1 is TSN = 0.
903 __u32 retval
= asoc
->next_tsn
;
910 /* Compare two addresses to see if they match. Wildcard addresses
911 * only match themselves.
913 int sctp_cmp_addr_exact(const union sctp_addr
*ss1
,
914 const union sctp_addr
*ss2
)
918 af
= sctp_get_af_specific(ss1
->sa
.sa_family
);
922 return af
->cmp_addr(ss1
, ss2
);
925 /* Return an ecne chunk to get prepended to a packet.
926 * Note: We are sly and return a shared, prealloced chunk. FIXME:
927 * No we don't, but we could/should.
929 struct sctp_chunk
*sctp_get_ecne_prepend(struct sctp_association
*asoc
)
931 if (!asoc
->need_ecne
)
934 /* Send ECNE if needed.
935 * Not being able to allocate a chunk here is not deadly.
937 return sctp_make_ecne(asoc
, asoc
->last_ecne_tsn
);
941 * Find which transport this TSN was sent on.
943 struct sctp_transport
*sctp_assoc_lookup_tsn(struct sctp_association
*asoc
,
946 struct sctp_transport
*active
;
947 struct sctp_transport
*match
;
948 struct sctp_transport
*transport
;
949 struct sctp_chunk
*chunk
;
950 __be32 key
= htonl(tsn
);
955 * FIXME: In general, find a more efficient data structure for
960 * The general strategy is to search each transport's transmitted
961 * list. Return which transport this TSN lives on.
963 * Let's be hopeful and check the active_path first.
964 * Another optimization would be to know if there is only one
965 * outbound path and not have to look for the TSN at all.
969 active
= asoc
->peer
.active_path
;
971 list_for_each_entry(chunk
, &active
->transmitted
,
974 if (key
== chunk
->subh
.data_hdr
->tsn
) {
980 /* If not found, go search all the other transports. */
981 list_for_each_entry(transport
, &asoc
->peer
.transport_addr_list
,
984 if (transport
== active
)
986 list_for_each_entry(chunk
, &transport
->transmitted
,
988 if (key
== chunk
->subh
.data_hdr
->tsn
) {
998 /* Do delayed input processing. This is scheduled by sctp_rcv(). */
999 static void sctp_assoc_bh_rcv(struct work_struct
*work
)
1001 struct sctp_association
*asoc
=
1002 container_of(work
, struct sctp_association
,
1003 base
.inqueue
.immediate
);
1004 struct net
*net
= sock_net(asoc
->base
.sk
);
1005 union sctp_subtype subtype
;
1006 struct sctp_endpoint
*ep
;
1007 struct sctp_chunk
*chunk
;
1008 struct sctp_inq
*inqueue
;
1009 int first_time
= 1; /* is this the first time through the loop */
1013 /* The association should be held so we should be safe. */
1016 inqueue
= &asoc
->base
.inqueue
;
1017 sctp_association_hold(asoc
);
1018 while (NULL
!= (chunk
= sctp_inq_pop(inqueue
))) {
1019 state
= asoc
->state
;
1020 subtype
= SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
);
1022 /* If the first chunk in the packet is AUTH, do special
1023 * processing specified in Section 6.3 of SCTP-AUTH spec
1025 if (first_time
&& subtype
.chunk
== SCTP_CID_AUTH
) {
1026 struct sctp_chunkhdr
*next_hdr
;
1028 next_hdr
= sctp_inq_peek(inqueue
);
1032 /* If the next chunk is COOKIE-ECHO, skip the AUTH
1033 * chunk while saving a pointer to it so we can do
1034 * Authentication later (during cookie-echo
1037 if (next_hdr
->type
== SCTP_CID_COOKIE_ECHO
) {
1038 chunk
->auth_chunk
= skb_clone(chunk
->skb
,
1046 /* SCTP-AUTH, Section 6.3:
1047 * The receiver has a list of chunk types which it expects
1048 * to be received only after an AUTH-chunk. This list has
1049 * been sent to the peer during the association setup. It
1050 * MUST silently discard these chunks if they are not placed
1051 * after an AUTH chunk in the packet.
1053 if (sctp_auth_recv_cid(subtype
.chunk
, asoc
) && !chunk
->auth
)
1056 /* Remember where the last DATA chunk came from so we
1057 * know where to send the SACK.
1059 if (sctp_chunk_is_data(chunk
))
1060 asoc
->peer
.last_data_from
= chunk
->transport
;
1062 SCTP_INC_STATS(net
, SCTP_MIB_INCTRLCHUNKS
);
1063 asoc
->stats
.ictrlchunks
++;
1064 if (chunk
->chunk_hdr
->type
== SCTP_CID_SACK
)
1065 asoc
->stats
.isacks
++;
1068 if (chunk
->transport
)
1069 chunk
->transport
->last_time_heard
= ktime_get();
1071 /* Run through the state machine. */
1072 error
= sctp_do_sm(net
, SCTP_EVENT_T_CHUNK
, subtype
,
1073 state
, ep
, asoc
, chunk
, GFP_ATOMIC
);
1075 /* Check to see if the association is freed in response to
1076 * the incoming chunk. If so, get out of the while loop.
1078 if (asoc
->base
.dead
)
1081 /* If there is an error on chunk, discard this packet. */
1083 chunk
->pdiscard
= 1;
1088 sctp_association_put(asoc
);
1091 /* This routine moves an association from its old sk to a new sk. */
1092 void sctp_assoc_migrate(struct sctp_association
*assoc
, struct sock
*newsk
)
1094 struct sctp_sock
*newsp
= sctp_sk(newsk
);
1095 struct sock
*oldsk
= assoc
->base
.sk
;
1097 /* Delete the association from the old endpoint's list of
1100 list_del_init(&assoc
->asocs
);
1102 /* Decrement the backlog value for a TCP-style socket. */
1103 if (sctp_style(oldsk
, TCP
))
1104 oldsk
->sk_ack_backlog
--;
1106 /* Release references to the old endpoint and the sock. */
1107 sctp_endpoint_put(assoc
->ep
);
1108 sock_put(assoc
->base
.sk
);
1110 /* Get a reference to the new endpoint. */
1111 assoc
->ep
= newsp
->ep
;
1112 sctp_endpoint_hold(assoc
->ep
);
1114 /* Get a reference to the new sock. */
1115 assoc
->base
.sk
= newsk
;
1116 sock_hold(assoc
->base
.sk
);
1118 /* Add the association to the new endpoint's list of associations. */
1119 sctp_endpoint_add_asoc(newsp
->ep
, assoc
);
1122 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */
1123 int sctp_assoc_update(struct sctp_association
*asoc
,
1124 struct sctp_association
*new)
1126 struct sctp_transport
*trans
;
1127 struct list_head
*pos
, *temp
;
1129 /* Copy in new parameters of peer. */
1131 asoc
->peer
.rwnd
= new->peer
.rwnd
;
1132 asoc
->peer
.sack_needed
= new->peer
.sack_needed
;
1133 asoc
->peer
.auth_capable
= new->peer
.auth_capable
;
1134 asoc
->peer
.i
= new->peer
.i
;
1136 if (!sctp_tsnmap_init(&asoc
->peer
.tsn_map
, SCTP_TSN_MAP_INITIAL
,
1137 asoc
->peer
.i
.initial_tsn
, GFP_ATOMIC
))
1140 /* Remove any peer addresses not present in the new association. */
1141 list_for_each_safe(pos
, temp
, &asoc
->peer
.transport_addr_list
) {
1142 trans
= list_entry(pos
, struct sctp_transport
, transports
);
1143 if (!sctp_assoc_lookup_paddr(new, &trans
->ipaddr
)) {
1144 sctp_assoc_rm_peer(asoc
, trans
);
1148 if (asoc
->state
>= SCTP_STATE_ESTABLISHED
)
1149 sctp_transport_reset(trans
);
1152 /* If the case is A (association restart), use
1153 * initial_tsn as next_tsn. If the case is B, use
1154 * current next_tsn in case data sent to peer
1155 * has been discarded and needs retransmission.
1157 if (asoc
->state
>= SCTP_STATE_ESTABLISHED
) {
1158 asoc
->next_tsn
= new->next_tsn
;
1159 asoc
->ctsn_ack_point
= new->ctsn_ack_point
;
1160 asoc
->adv_peer_ack_point
= new->adv_peer_ack_point
;
1162 /* Reinitialize SSN for both local streams
1163 * and peer's streams.
1165 sctp_stream_clear(&asoc
->stream
);
1167 /* Flush the ULP reassembly and ordered queue.
1168 * Any data there will now be stale and will
1171 sctp_ulpq_flush(&asoc
->ulpq
);
1173 /* reset the overall association error count so
1174 * that the restarted association doesn't get torn
1175 * down on the next retransmission timer.
1177 asoc
->overall_error_count
= 0;
1180 /* Add any peer addresses from the new association. */
1181 list_for_each_entry(trans
, &new->peer
.transport_addr_list
,
1183 if (!sctp_assoc_lookup_paddr(asoc
, &trans
->ipaddr
) &&
1184 !sctp_assoc_add_peer(asoc
, &trans
->ipaddr
,
1185 GFP_ATOMIC
, trans
->state
))
1188 asoc
->ctsn_ack_point
= asoc
->next_tsn
- 1;
1189 asoc
->adv_peer_ack_point
= asoc
->ctsn_ack_point
;
1191 if (sctp_state(asoc
, COOKIE_WAIT
))
1192 sctp_stream_update(&asoc
->stream
, &new->stream
);
1194 /* get a new assoc id if we don't have one yet. */
1195 if (sctp_assoc_set_id(asoc
, GFP_ATOMIC
))
1199 /* SCTP-AUTH: Save the peer parameters from the new associations
1200 * and also move the association shared keys over
1202 kfree(asoc
->peer
.peer_random
);
1203 asoc
->peer
.peer_random
= new->peer
.peer_random
;
1204 new->peer
.peer_random
= NULL
;
1206 kfree(asoc
->peer
.peer_chunks
);
1207 asoc
->peer
.peer_chunks
= new->peer
.peer_chunks
;
1208 new->peer
.peer_chunks
= NULL
;
1210 kfree(asoc
->peer
.peer_hmacs
);
1211 asoc
->peer
.peer_hmacs
= new->peer
.peer_hmacs
;
1212 new->peer
.peer_hmacs
= NULL
;
1214 return sctp_auth_asoc_init_active_key(asoc
, GFP_ATOMIC
);
1217 /* Update the retran path for sending a retransmitted packet.
1218 * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
1220 * When there is outbound data to send and the primary path
1221 * becomes inactive (e.g., due to failures), or where the
1222 * SCTP user explicitly requests to send data to an
1223 * inactive destination transport address, before reporting
1224 * an error to its ULP, the SCTP endpoint should try to send
1225 * the data to an alternate active destination transport
1226 * address if one exists.
1228 * When retransmitting data that timed out, if the endpoint
1229 * is multihomed, it should consider each source-destination
1230 * address pair in its retransmission selection policy.
1231 * When retransmitting timed-out data, the endpoint should
1232 * attempt to pick the most divergent source-destination
1233 * pair from the original source-destination pair to which
1234 * the packet was transmitted.
1236 * Note: Rules for picking the most divergent source-destination
1237 * pair are an implementation decision and are not specified
1238 * within this document.
1240 * Our basic strategy is to round-robin transports in priorities
1241 * according to sctp_trans_score() e.g., if no such
1242 * transport with state SCTP_ACTIVE exists, round-robin through
1243 * SCTP_UNKNOWN, etc. You get the picture.
1245 static u8
sctp_trans_score(const struct sctp_transport
*trans
)
1247 switch (trans
->state
) {
1249 return 3; /* best case */
1254 default: /* case SCTP_INACTIVE */
1255 return 0; /* worst case */
1259 static struct sctp_transport
*sctp_trans_elect_tie(struct sctp_transport
*trans1
,
1260 struct sctp_transport
*trans2
)
1262 if (trans1
->error_count
> trans2
->error_count
) {
1264 } else if (trans1
->error_count
== trans2
->error_count
&&
1265 ktime_after(trans2
->last_time_heard
,
1266 trans1
->last_time_heard
)) {
1273 static struct sctp_transport
*sctp_trans_elect_best(struct sctp_transport
*curr
,
1274 struct sctp_transport
*best
)
1276 u8 score_curr
, score_best
;
1278 if (best
== NULL
|| curr
== best
)
1281 score_curr
= sctp_trans_score(curr
);
1282 score_best
= sctp_trans_score(best
);
1284 /* First, try a score-based selection if both transport states
1285 * differ. If we're in a tie, lets try to make a more clever
1286 * decision here based on error counts and last time heard.
1288 if (score_curr
> score_best
)
1290 else if (score_curr
== score_best
)
1291 return sctp_trans_elect_tie(best
, curr
);
1296 void sctp_assoc_update_retran_path(struct sctp_association
*asoc
)
1298 struct sctp_transport
*trans
= asoc
->peer
.retran_path
;
1299 struct sctp_transport
*trans_next
= NULL
;
1301 /* We're done as we only have the one and only path. */
1302 if (asoc
->peer
.transport_count
== 1)
1304 /* If active_path and retran_path are the same and active,
1305 * then this is the only active path. Use it.
1307 if (asoc
->peer
.active_path
== asoc
->peer
.retran_path
&&
1308 asoc
->peer
.active_path
->state
== SCTP_ACTIVE
)
1311 /* Iterate from retran_path's successor back to retran_path. */
1312 for (trans
= list_next_entry(trans
, transports
); 1;
1313 trans
= list_next_entry(trans
, transports
)) {
1314 /* Manually skip the head element. */
1315 if (&trans
->transports
== &asoc
->peer
.transport_addr_list
)
1317 if (trans
->state
== SCTP_UNCONFIRMED
)
1319 trans_next
= sctp_trans_elect_best(trans
, trans_next
);
1320 /* Active is good enough for immediate return. */
1321 if (trans_next
->state
== SCTP_ACTIVE
)
1323 /* We've reached the end, time to update path. */
1324 if (trans
== asoc
->peer
.retran_path
)
1328 asoc
->peer
.retran_path
= trans_next
;
1330 pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1331 __func__
, asoc
, &asoc
->peer
.retran_path
->ipaddr
.sa
);
1334 static void sctp_select_active_and_retran_path(struct sctp_association
*asoc
)
1336 struct sctp_transport
*trans
, *trans_pri
= NULL
, *trans_sec
= NULL
;
1337 struct sctp_transport
*trans_pf
= NULL
;
1339 /* Look for the two most recently used active transports. */
1340 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
1342 /* Skip uninteresting transports. */
1343 if (trans
->state
== SCTP_INACTIVE
||
1344 trans
->state
== SCTP_UNCONFIRMED
)
1346 /* Keep track of the best PF transport from our
1347 * list in case we don't find an active one.
1349 if (trans
->state
== SCTP_PF
) {
1350 trans_pf
= sctp_trans_elect_best(trans
, trans_pf
);
1353 /* For active transports, pick the most recent ones. */
1354 if (trans_pri
== NULL
||
1355 ktime_after(trans
->last_time_heard
,
1356 trans_pri
->last_time_heard
)) {
1357 trans_sec
= trans_pri
;
1359 } else if (trans_sec
== NULL
||
1360 ktime_after(trans
->last_time_heard
,
1361 trans_sec
->last_time_heard
)) {
1366 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1368 * By default, an endpoint should always transmit to the primary
1369 * path, unless the SCTP user explicitly specifies the
1370 * destination transport address (and possibly source transport
1371 * address) to use. [If the primary is active but not most recent,
1372 * bump the most recently used transport.]
1374 if ((asoc
->peer
.primary_path
->state
== SCTP_ACTIVE
||
1375 asoc
->peer
.primary_path
->state
== SCTP_UNKNOWN
) &&
1376 asoc
->peer
.primary_path
!= trans_pri
) {
1377 trans_sec
= trans_pri
;
1378 trans_pri
= asoc
->peer
.primary_path
;
1381 /* We did not find anything useful for a possible retransmission
1382 * path; either primary path that we found is the the same as
1383 * the current one, or we didn't generally find an active one.
1385 if (trans_sec
== NULL
)
1386 trans_sec
= trans_pri
;
1388 /* If we failed to find a usable transport, just camp on the
1389 * active or pick a PF iff it's the better choice.
1391 if (trans_pri
== NULL
) {
1392 trans_pri
= sctp_trans_elect_best(asoc
->peer
.active_path
, trans_pf
);
1393 trans_sec
= trans_pri
;
1396 /* Set the active and retran transports. */
1397 asoc
->peer
.active_path
= trans_pri
;
1398 asoc
->peer
.retran_path
= trans_sec
;
1401 struct sctp_transport
*
1402 sctp_assoc_choose_alter_transport(struct sctp_association
*asoc
,
1403 struct sctp_transport
*last_sent_to
)
1405 /* If this is the first time packet is sent, use the active path,
1406 * else use the retran path. If the last packet was sent over the
1407 * retran path, update the retran path and use it.
1409 if (last_sent_to
== NULL
) {
1410 return asoc
->peer
.active_path
;
1412 if (last_sent_to
== asoc
->peer
.retran_path
)
1413 sctp_assoc_update_retran_path(asoc
);
1415 return asoc
->peer
.retran_path
;
1419 void sctp_assoc_update_frag_point(struct sctp_association
*asoc
)
1421 int frag
= sctp_mtu_payload(sctp_sk(asoc
->base
.sk
), asoc
->pathmtu
,
1422 sctp_datachk_len(&asoc
->stream
));
1424 if (asoc
->user_frag
)
1425 frag
= min_t(int, frag
, asoc
->user_frag
);
1427 frag
= min_t(int, frag
, SCTP_MAX_CHUNK_LEN
-
1428 sctp_datachk_len(&asoc
->stream
));
1430 asoc
->frag_point
= SCTP_TRUNC4(frag
);
1433 void sctp_assoc_set_pmtu(struct sctp_association
*asoc
, __u32 pmtu
)
1435 if (asoc
->pathmtu
!= pmtu
) {
1436 asoc
->pathmtu
= pmtu
;
1437 sctp_assoc_update_frag_point(asoc
);
1440 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__
, asoc
,
1441 asoc
->pathmtu
, asoc
->frag_point
);
1444 /* Update the association's pmtu and frag_point by going through all the
1445 * transports. This routine is called when a transport's PMTU has changed.
1447 void sctp_assoc_sync_pmtu(struct sctp_association
*asoc
)
1449 struct sctp_transport
*t
;
1455 /* Get the lowest pmtu of all the transports. */
1456 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
, transports
) {
1457 if (t
->pmtu_pending
&& t
->dst
) {
1458 sctp_transport_update_pmtu(t
,
1459 atomic_read(&t
->mtu_info
));
1460 t
->pmtu_pending
= 0;
1462 if (!pmtu
|| (t
->pathmtu
< pmtu
))
1466 sctp_assoc_set_pmtu(asoc
, pmtu
);
1469 /* Should we send a SACK to update our peer? */
1470 static inline bool sctp_peer_needs_update(struct sctp_association
*asoc
)
1472 struct net
*net
= sock_net(asoc
->base
.sk
);
1473 switch (asoc
->state
) {
1474 case SCTP_STATE_ESTABLISHED
:
1475 case SCTP_STATE_SHUTDOWN_PENDING
:
1476 case SCTP_STATE_SHUTDOWN_RECEIVED
:
1477 case SCTP_STATE_SHUTDOWN_SENT
:
1478 if ((asoc
->rwnd
> asoc
->a_rwnd
) &&
1479 ((asoc
->rwnd
- asoc
->a_rwnd
) >= max_t(__u32
,
1480 (asoc
->base
.sk
->sk_rcvbuf
>> net
->sctp
.rwnd_upd_shift
),
1490 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
1491 void sctp_assoc_rwnd_increase(struct sctp_association
*asoc
, unsigned int len
)
1493 struct sctp_chunk
*sack
;
1494 struct timer_list
*timer
;
1496 if (asoc
->rwnd_over
) {
1497 if (asoc
->rwnd_over
>= len
) {
1498 asoc
->rwnd_over
-= len
;
1500 asoc
->rwnd
+= (len
- asoc
->rwnd_over
);
1501 asoc
->rwnd_over
= 0;
1507 /* If we had window pressure, start recovering it
1508 * once our rwnd had reached the accumulated pressure
1509 * threshold. The idea is to recover slowly, but up
1510 * to the initial advertised window.
1512 if (asoc
->rwnd_press
) {
1513 int change
= min(asoc
->pathmtu
, asoc
->rwnd_press
);
1514 asoc
->rwnd
+= change
;
1515 asoc
->rwnd_press
-= change
;
1518 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1519 __func__
, asoc
, len
, asoc
->rwnd
, asoc
->rwnd_over
,
1522 /* Send a window update SACK if the rwnd has increased by at least the
1523 * minimum of the association's PMTU and half of the receive buffer.
1524 * The algorithm used is similar to the one described in
1525 * Section 4.2.3.3 of RFC 1122.
1527 if (sctp_peer_needs_update(asoc
)) {
1528 asoc
->a_rwnd
= asoc
->rwnd
;
1530 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1531 "a_rwnd:%u\n", __func__
, asoc
, asoc
->rwnd
,
1534 sack
= sctp_make_sack(asoc
);
1538 asoc
->peer
.sack_needed
= 0;
1540 sctp_outq_tail(&asoc
->outqueue
, sack
, GFP_ATOMIC
);
1542 /* Stop the SACK timer. */
1543 timer
= &asoc
->timers
[SCTP_EVENT_TIMEOUT_SACK
];
1544 if (del_timer(timer
))
1545 sctp_association_put(asoc
);
1549 /* Decrease asoc's rwnd by len. */
1550 void sctp_assoc_rwnd_decrease(struct sctp_association
*asoc
, unsigned int len
)
1555 if (unlikely(!asoc
->rwnd
|| asoc
->rwnd_over
))
1556 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1557 "asoc->rwnd_over:%u!\n", __func__
, asoc
,
1558 asoc
->rwnd
, asoc
->rwnd_over
);
1560 if (asoc
->ep
->rcvbuf_policy
)
1561 rx_count
= atomic_read(&asoc
->rmem_alloc
);
1563 rx_count
= atomic_read(&asoc
->base
.sk
->sk_rmem_alloc
);
1565 /* If we've reached or overflowed our receive buffer, announce
1566 * a 0 rwnd if rwnd would still be positive. Store the
1567 * the potential pressure overflow so that the window can be restored
1568 * back to original value.
1570 if (rx_count
>= asoc
->base
.sk
->sk_rcvbuf
)
1573 if (asoc
->rwnd
>= len
) {
1576 asoc
->rwnd_press
+= asoc
->rwnd
;
1580 asoc
->rwnd_over
+= len
- asoc
->rwnd
;
1584 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1585 __func__
, asoc
, len
, asoc
->rwnd
, asoc
->rwnd_over
,
1589 /* Build the bind address list for the association based on info from the
1590 * local endpoint and the remote peer.
1592 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association
*asoc
,
1593 enum sctp_scope scope
, gfp_t gfp
)
1597 /* Use scoping rules to determine the subset of addresses from
1600 flags
= (PF_INET6
== asoc
->base
.sk
->sk_family
) ? SCTP_ADDR6_ALLOWED
: 0;
1601 if (asoc
->peer
.ipv4_address
)
1602 flags
|= SCTP_ADDR4_PEERSUPP
;
1603 if (asoc
->peer
.ipv6_address
)
1604 flags
|= SCTP_ADDR6_PEERSUPP
;
1606 return sctp_bind_addr_copy(sock_net(asoc
->base
.sk
),
1607 &asoc
->base
.bind_addr
,
1608 &asoc
->ep
->base
.bind_addr
,
1612 /* Build the association's bind address list from the cookie. */
1613 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association
*asoc
,
1614 struct sctp_cookie
*cookie
,
1617 int var_size2
= ntohs(cookie
->peer_init
->chunk_hdr
.length
);
1618 int var_size3
= cookie
->raw_addr_list_len
;
1619 __u8
*raw
= (__u8
*)cookie
->peer_init
+ var_size2
;
1621 return sctp_raw_to_bind_addrs(&asoc
->base
.bind_addr
, raw
, var_size3
,
1622 asoc
->ep
->base
.bind_addr
.port
, gfp
);
1625 /* Lookup laddr in the bind address list of an association. */
1626 int sctp_assoc_lookup_laddr(struct sctp_association
*asoc
,
1627 const union sctp_addr
*laddr
)
1631 if ((asoc
->base
.bind_addr
.port
== ntohs(laddr
->v4
.sin_port
)) &&
1632 sctp_bind_addr_match(&asoc
->base
.bind_addr
, laddr
,
1633 sctp_sk(asoc
->base
.sk
)))
1639 /* Set an association id for a given association */
1640 int sctp_assoc_set_id(struct sctp_association
*asoc
, gfp_t gfp
)
1642 bool preload
= gfpflags_allow_blocking(gfp
);
1645 /* If the id is already assigned, keep it. */
1651 spin_lock_bh(&sctp_assocs_id_lock
);
1652 /* 0 is not a valid assoc_id, must be >= 1 */
1653 ret
= idr_alloc_cyclic(&sctp_assocs_id
, asoc
, 1, 0, GFP_NOWAIT
);
1654 spin_unlock_bh(&sctp_assocs_id_lock
);
1660 asoc
->assoc_id
= (sctp_assoc_t
)ret
;
1664 /* Free the ASCONF queue */
1665 static void sctp_assoc_free_asconf_queue(struct sctp_association
*asoc
)
1667 struct sctp_chunk
*asconf
;
1668 struct sctp_chunk
*tmp
;
1670 list_for_each_entry_safe(asconf
, tmp
, &asoc
->addip_chunk_list
, list
) {
1671 list_del_init(&asconf
->list
);
1672 sctp_chunk_free(asconf
);
1676 /* Free asconf_ack cache */
1677 static void sctp_assoc_free_asconf_acks(struct sctp_association
*asoc
)
1679 struct sctp_chunk
*ack
;
1680 struct sctp_chunk
*tmp
;
1682 list_for_each_entry_safe(ack
, tmp
, &asoc
->asconf_ack_list
,
1684 list_del_init(&ack
->transmitted_list
);
1685 sctp_chunk_free(ack
);
1689 /* Clean up the ASCONF_ACK queue */
1690 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association
*asoc
)
1692 struct sctp_chunk
*ack
;
1693 struct sctp_chunk
*tmp
;
1695 /* We can remove all the entries from the queue up to
1696 * the "Peer-Sequence-Number".
1698 list_for_each_entry_safe(ack
, tmp
, &asoc
->asconf_ack_list
,
1700 if (ack
->subh
.addip_hdr
->serial
==
1701 htonl(asoc
->peer
.addip_serial
))
1704 list_del_init(&ack
->transmitted_list
);
1705 sctp_chunk_free(ack
);
1709 /* Find the ASCONF_ACK whose serial number matches ASCONF */
1710 struct sctp_chunk
*sctp_assoc_lookup_asconf_ack(
1711 const struct sctp_association
*asoc
,
1714 struct sctp_chunk
*ack
;
1716 /* Walk through the list of cached ASCONF-ACKs and find the
1717 * ack chunk whose serial number matches that of the request.
1719 list_for_each_entry(ack
, &asoc
->asconf_ack_list
, transmitted_list
) {
1720 if (sctp_chunk_pending(ack
))
1722 if (ack
->subh
.addip_hdr
->serial
== serial
) {
1723 sctp_chunk_hold(ack
);
1731 void sctp_asconf_queue_teardown(struct sctp_association
*asoc
)
1733 /* Free any cached ASCONF_ACK chunk. */
1734 sctp_assoc_free_asconf_acks(asoc
);
1736 /* Free the ASCONF queue. */
1737 sctp_assoc_free_asconf_queue(asoc
);
1739 /* Free any cached ASCONF chunk. */
1740 if (asoc
->addip_last_asconf
)
1741 sctp_chunk_free(asoc
->addip_last_asconf
);