1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
8 * This file is part of the SCTP kernel implementation
10 * This module provides the abstraction for an SCTP association.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, see
26 * <http://www.gnu.org/licenses/>.
28 * Please send any bug reports or fixes you make to the
30 * lksctp developers <linux-sctp@vger.kernel.org>
32 * Written or modified by:
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Karl Knutson <karl@athena.chicago.il.us>
35 * Jon Grimm <jgrimm@us.ibm.com>
36 * Xingang Guo <xingang.guo@intel.com>
37 * Hui Huang <hui.huang@nokia.com>
38 * Sridhar Samudrala <sri@us.ibm.com>
39 * Daisy Chang <daisyc@us.ibm.com>
40 * Ryan Layer <rmlayer@us.ibm.com>
41 * Kevin Gao <kevin.gao@intel.com>
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/types.h>
47 #include <linux/fcntl.h>
48 #include <linux/poll.h>
49 #include <linux/init.h>
51 #include <linux/slab.h>
54 #include <net/sctp/sctp.h>
55 #include <net/sctp/sm.h>
57 /* Forward declarations for internal functions. */
58 static void sctp_select_active_and_retran_path(struct sctp_association
*asoc
);
59 static void sctp_assoc_bh_rcv(struct work_struct
*work
);
60 static void sctp_assoc_free_asconf_acks(struct sctp_association
*asoc
);
61 static void sctp_assoc_free_asconf_queue(struct sctp_association
*asoc
);
63 /* 1st Level Abstractions. */
65 /* Initialize a new association from provided memory. */
66 static struct sctp_association
*sctp_association_init(struct sctp_association
*asoc
,
67 const struct sctp_endpoint
*ep
,
68 const struct sock
*sk
,
72 struct net
*net
= sock_net(sk
);
78 /* Retrieve the SCTP per socket area. */
79 sp
= sctp_sk((struct sock
*)sk
);
81 /* Discarding const is appropriate here. */
82 asoc
->ep
= (struct sctp_endpoint
*)ep
;
83 asoc
->base
.sk
= (struct sock
*)sk
;
85 sctp_endpoint_hold(asoc
->ep
);
86 sock_hold(asoc
->base
.sk
);
88 /* Initialize the common base substructure. */
89 asoc
->base
.type
= SCTP_EP_TYPE_ASSOCIATION
;
91 /* Initialize the object handling fields. */
92 atomic_set(&asoc
->base
.refcnt
, 1);
94 /* Initialize the bind addr area. */
95 sctp_bind_addr_init(&asoc
->base
.bind_addr
, ep
->base
.bind_addr
.port
);
97 asoc
->state
= SCTP_STATE_CLOSED
;
98 asoc
->cookie_life
= ms_to_ktime(sp
->assocparams
.sasoc_cookie_life
);
99 asoc
->user_frag
= sp
->user_frag
;
101 /* Set the association max_retrans and RTO values from the
104 asoc
->max_retrans
= sp
->assocparams
.sasoc_asocmaxrxt
;
105 asoc
->pf_retrans
= net
->sctp
.pf_retrans
;
107 asoc
->rto_initial
= msecs_to_jiffies(sp
->rtoinfo
.srto_initial
);
108 asoc
->rto_max
= msecs_to_jiffies(sp
->rtoinfo
.srto_max
);
109 asoc
->rto_min
= msecs_to_jiffies(sp
->rtoinfo
.srto_min
);
111 /* Initialize the association's heartbeat interval based on the
112 * sock configured value.
114 asoc
->hbinterval
= msecs_to_jiffies(sp
->hbinterval
);
116 /* Initialize path max retrans value. */
117 asoc
->pathmaxrxt
= sp
->pathmaxrxt
;
119 /* Initialize default path MTU. */
120 asoc
->pathmtu
= sp
->pathmtu
;
122 /* Set association default SACK delay */
123 asoc
->sackdelay
= msecs_to_jiffies(sp
->sackdelay
);
124 asoc
->sackfreq
= sp
->sackfreq
;
126 /* Set the association default flags controlling
127 * Heartbeat, SACK delay, and Path MTU Discovery.
129 asoc
->param_flags
= sp
->param_flags
;
131 /* Initialize the maximum number of new data packets that can be sent
134 asoc
->max_burst
= sp
->max_burst
;
136 /* initialize association timers */
137 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T1_COOKIE
] = asoc
->rto_initial
;
138 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T1_INIT
] = asoc
->rto_initial
;
139 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
] = asoc
->rto_initial
;
141 /* sctpimpguide Section 2.12.2
142 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
143 * recommended value of 5 times 'RTO.Max'.
145 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
]
148 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_SACK
] = asoc
->sackdelay
;
149 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_AUTOCLOSE
] = sp
->autoclose
* HZ
;
151 /* Initializes the timers */
152 for (i
= SCTP_EVENT_TIMEOUT_NONE
; i
< SCTP_NUM_TIMEOUT_TYPES
; ++i
)
153 setup_timer(&asoc
->timers
[i
], sctp_timer_events
[i
],
154 (unsigned long)asoc
);
156 /* Pull default initialization values from the sock options.
157 * Note: This assumes that the values have already been
158 * validated in the sock.
160 asoc
->c
.sinit_max_instreams
= sp
->initmsg
.sinit_max_instreams
;
161 asoc
->c
.sinit_num_ostreams
= sp
->initmsg
.sinit_num_ostreams
;
162 asoc
->max_init_attempts
= sp
->initmsg
.sinit_max_attempts
;
164 asoc
->max_init_timeo
=
165 msecs_to_jiffies(sp
->initmsg
.sinit_max_init_timeo
);
167 /* Set the local window size for receive.
168 * This is also the rcvbuf space per association.
169 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
170 * 1500 bytes in one SCTP packet.
172 if ((sk
->sk_rcvbuf
/2) < SCTP_DEFAULT_MINWINDOW
)
173 asoc
->rwnd
= SCTP_DEFAULT_MINWINDOW
;
175 asoc
->rwnd
= sk
->sk_rcvbuf
/2;
177 asoc
->a_rwnd
= asoc
->rwnd
;
179 /* Use my own max window until I learn something better. */
180 asoc
->peer
.rwnd
= SCTP_DEFAULT_MAXWINDOW
;
182 /* Initialize the receive memory counter */
183 atomic_set(&asoc
->rmem_alloc
, 0);
185 init_waitqueue_head(&asoc
->wait
);
187 asoc
->c
.my_vtag
= sctp_generate_tag(ep
);
188 asoc
->c
.my_port
= ep
->base
.bind_addr
.port
;
190 asoc
->c
.initial_tsn
= sctp_generate_tsn(ep
);
192 asoc
->next_tsn
= asoc
->c
.initial_tsn
;
194 asoc
->ctsn_ack_point
= asoc
->next_tsn
- 1;
195 asoc
->adv_peer_ack_point
= asoc
->ctsn_ack_point
;
196 asoc
->highest_sacked
= asoc
->ctsn_ack_point
;
197 asoc
->last_cwr_tsn
= asoc
->ctsn_ack_point
;
199 /* ADDIP Section 4.1 Asconf Chunk Procedures
201 * When an endpoint has an ASCONF signaled change to be sent to the
202 * remote endpoint it should do the following:
204 * A2) a serial number should be assigned to the chunk. The serial
205 * number SHOULD be a monotonically increasing number. The serial
206 * numbers SHOULD be initialized at the start of the
207 * association to the same value as the initial TSN.
209 asoc
->addip_serial
= asoc
->c
.initial_tsn
;
211 INIT_LIST_HEAD(&asoc
->addip_chunk_list
);
212 INIT_LIST_HEAD(&asoc
->asconf_ack_list
);
214 /* Make an empty list of remote transport addresses. */
215 INIT_LIST_HEAD(&asoc
->peer
.transport_addr_list
);
217 /* RFC 2960 5.1 Normal Establishment of an Association
219 * After the reception of the first data chunk in an
220 * association the endpoint must immediately respond with a
221 * sack to acknowledge the data chunk. Subsequent
222 * acknowledgements should be done as described in Section
225 * [We implement this by telling a new association that it
226 * already received one packet.]
228 asoc
->peer
.sack_needed
= 1;
229 asoc
->peer
.sack_generation
= 1;
231 /* Assume that the peer will tell us if he recognizes ASCONF
232 * as part of INIT exchange.
233 * The sctp_addip_noauth option is there for backward compatibility
234 * and will revert old behavior.
236 if (net
->sctp
.addip_noauth
)
237 asoc
->peer
.asconf_capable
= 1;
239 /* Create an input queue. */
240 sctp_inq_init(&asoc
->base
.inqueue
);
241 sctp_inq_set_th_handler(&asoc
->base
.inqueue
, sctp_assoc_bh_rcv
);
243 /* Create an output queue. */
244 sctp_outq_init(asoc
, &asoc
->outqueue
);
246 if (!sctp_ulpq_init(&asoc
->ulpq
, asoc
))
249 /* Assume that peer would support both address types unless we are
252 asoc
->peer
.ipv4_address
= 1;
253 if (asoc
->base
.sk
->sk_family
== PF_INET6
)
254 asoc
->peer
.ipv6_address
= 1;
255 INIT_LIST_HEAD(&asoc
->asocs
);
257 asoc
->default_stream
= sp
->default_stream
;
258 asoc
->default_ppid
= sp
->default_ppid
;
259 asoc
->default_flags
= sp
->default_flags
;
260 asoc
->default_context
= sp
->default_context
;
261 asoc
->default_timetolive
= sp
->default_timetolive
;
262 asoc
->default_rcv_context
= sp
->default_rcv_context
;
264 /* AUTH related initializations */
265 INIT_LIST_HEAD(&asoc
->endpoint_shared_keys
);
266 err
= sctp_auth_asoc_copy_shkeys(ep
, asoc
, gfp
);
270 asoc
->active_key_id
= ep
->active_key_id
;
272 /* Save the hmacs and chunks list into this association */
273 if (ep
->auth_hmacs_list
)
274 memcpy(asoc
->c
.auth_hmacs
, ep
->auth_hmacs_list
,
275 ntohs(ep
->auth_hmacs_list
->param_hdr
.length
));
276 if (ep
->auth_chunk_list
)
277 memcpy(asoc
->c
.auth_chunks
, ep
->auth_chunk_list
,
278 ntohs(ep
->auth_chunk_list
->param_hdr
.length
));
280 /* Get the AUTH random number for this association */
281 p
= (sctp_paramhdr_t
*)asoc
->c
.auth_random
;
282 p
->type
= SCTP_PARAM_RANDOM
;
283 p
->length
= htons(sizeof(sctp_paramhdr_t
) + SCTP_AUTH_RANDOM_LENGTH
);
284 get_random_bytes(p
+1, SCTP_AUTH_RANDOM_LENGTH
);
289 sock_put(asoc
->base
.sk
);
290 sctp_endpoint_put(asoc
->ep
);
294 /* Allocate and initialize a new association */
295 struct sctp_association
*sctp_association_new(const struct sctp_endpoint
*ep
,
296 const struct sock
*sk
,
300 struct sctp_association
*asoc
;
302 asoc
= kzalloc(sizeof(*asoc
), gfp
);
306 if (!sctp_association_init(asoc
, ep
, sk
, scope
, gfp
))
309 SCTP_DBG_OBJCNT_INC(assoc
);
311 pr_debug("Created asoc %p\n", asoc
);
321 /* Free this association if possible. There may still be users, so
322 * the actual deallocation may be delayed.
324 void sctp_association_free(struct sctp_association
*asoc
)
326 struct sock
*sk
= asoc
->base
.sk
;
327 struct sctp_transport
*transport
;
328 struct list_head
*pos
, *temp
;
331 /* Only real associations count against the endpoint, so
332 * don't bother for if this is a temporary association.
334 if (!list_empty(&asoc
->asocs
)) {
335 list_del(&asoc
->asocs
);
337 /* Decrement the backlog value for a TCP-style listening
340 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))
341 sk
->sk_ack_backlog
--;
344 /* Mark as dead, so other users can know this structure is
347 asoc
->base
.dead
= true;
349 /* Dispose of any data lying around in the outqueue. */
350 sctp_outq_free(&asoc
->outqueue
);
352 /* Dispose of any pending messages for the upper layer. */
353 sctp_ulpq_free(&asoc
->ulpq
);
355 /* Dispose of any pending chunks on the inqueue. */
356 sctp_inq_free(&asoc
->base
.inqueue
);
358 sctp_tsnmap_free(&asoc
->peer
.tsn_map
);
360 /* Free ssnmap storage. */
361 sctp_ssnmap_free(asoc
->ssnmap
);
363 /* Clean up the bound address list. */
364 sctp_bind_addr_free(&asoc
->base
.bind_addr
);
366 /* Do we need to go through all of our timers and
367 * delete them? To be safe we will try to delete all, but we
368 * should be able to go through and make a guess based
371 for (i
= SCTP_EVENT_TIMEOUT_NONE
; i
< SCTP_NUM_TIMEOUT_TYPES
; ++i
) {
372 if (del_timer(&asoc
->timers
[i
]))
373 sctp_association_put(asoc
);
376 /* Free peer's cached cookie. */
377 kfree(asoc
->peer
.cookie
);
378 kfree(asoc
->peer
.peer_random
);
379 kfree(asoc
->peer
.peer_chunks
);
380 kfree(asoc
->peer
.peer_hmacs
);
382 /* Release the transport structures. */
383 list_for_each_safe(pos
, temp
, &asoc
->peer
.transport_addr_list
) {
384 transport
= list_entry(pos
, struct sctp_transport
, transports
);
386 sctp_unhash_transport(transport
);
387 sctp_transport_free(transport
);
390 asoc
->peer
.transport_count
= 0;
392 sctp_asconf_queue_teardown(asoc
);
394 /* Free pending address space being deleted */
395 kfree(asoc
->asconf_addr_del_pending
);
397 /* AUTH - Free the endpoint shared keys */
398 sctp_auth_destroy_keys(&asoc
->endpoint_shared_keys
);
400 /* AUTH - Free the association shared key */
401 sctp_auth_key_put(asoc
->asoc_shared_key
);
403 sctp_association_put(asoc
);
406 /* Cleanup and free up an association. */
407 static void sctp_association_destroy(struct sctp_association
*asoc
)
409 if (unlikely(!asoc
->base
.dead
)) {
410 WARN(1, "Attempt to destroy undead association %p!\n", asoc
);
414 sctp_endpoint_put(asoc
->ep
);
415 sock_put(asoc
->base
.sk
);
417 if (asoc
->assoc_id
!= 0) {
418 spin_lock_bh(&sctp_assocs_id_lock
);
419 idr_remove(&sctp_assocs_id
, asoc
->assoc_id
);
420 spin_unlock_bh(&sctp_assocs_id_lock
);
423 WARN_ON(atomic_read(&asoc
->rmem_alloc
));
426 SCTP_DBG_OBJCNT_DEC(assoc
);
429 /* Change the primary destination address for the peer. */
430 void sctp_assoc_set_primary(struct sctp_association
*asoc
,
431 struct sctp_transport
*transport
)
435 /* it's a changeover only if we already have a primary path
436 * that we are changing
438 if (asoc
->peer
.primary_path
!= NULL
&&
439 asoc
->peer
.primary_path
!= transport
)
442 asoc
->peer
.primary_path
= transport
;
444 /* Set a default msg_name for events. */
445 memcpy(&asoc
->peer
.primary_addr
, &transport
->ipaddr
,
446 sizeof(union sctp_addr
));
448 /* If the primary path is changing, assume that the
449 * user wants to use this new path.
451 if ((transport
->state
== SCTP_ACTIVE
) ||
452 (transport
->state
== SCTP_UNKNOWN
))
453 asoc
->peer
.active_path
= transport
;
456 * SFR-CACC algorithm:
457 * Upon the receipt of a request to change the primary
458 * destination address, on the data structure for the new
459 * primary destination, the sender MUST do the following:
461 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
462 * to this destination address earlier. The sender MUST set
463 * CYCLING_CHANGEOVER to indicate that this switch is a
464 * double switch to the same destination address.
466 * Really, only bother is we have data queued or outstanding on
469 if (!asoc
->outqueue
.outstanding_bytes
&& !asoc
->outqueue
.out_qlen
)
472 if (transport
->cacc
.changeover_active
)
473 transport
->cacc
.cycling_changeover
= changeover
;
475 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
476 * a changeover has occurred.
478 transport
->cacc
.changeover_active
= changeover
;
480 /* 3) The sender MUST store the next TSN to be sent in
481 * next_tsn_at_change.
483 transport
->cacc
.next_tsn_at_change
= asoc
->next_tsn
;
486 /* Remove a transport from an association. */
487 void sctp_assoc_rm_peer(struct sctp_association
*asoc
,
488 struct sctp_transport
*peer
)
490 struct list_head
*pos
;
491 struct sctp_transport
*transport
;
493 pr_debug("%s: association:%p addr:%pISpc\n",
494 __func__
, asoc
, &peer
->ipaddr
.sa
);
496 /* If we are to remove the current retran_path, update it
497 * to the next peer before removing this peer from the list.
499 if (asoc
->peer
.retran_path
== peer
)
500 sctp_assoc_update_retran_path(asoc
);
502 /* Remove this peer from the list. */
503 list_del_rcu(&peer
->transports
);
504 /* Remove this peer from the transport hashtable */
505 sctp_unhash_transport(peer
);
507 /* Get the first transport of asoc. */
508 pos
= asoc
->peer
.transport_addr_list
.next
;
509 transport
= list_entry(pos
, struct sctp_transport
, transports
);
511 /* Update any entries that match the peer to be deleted. */
512 if (asoc
->peer
.primary_path
== peer
)
513 sctp_assoc_set_primary(asoc
, transport
);
514 if (asoc
->peer
.active_path
== peer
)
515 asoc
->peer
.active_path
= transport
;
516 if (asoc
->peer
.retran_path
== peer
)
517 asoc
->peer
.retran_path
= transport
;
518 if (asoc
->peer
.last_data_from
== peer
)
519 asoc
->peer
.last_data_from
= transport
;
521 /* If we remove the transport an INIT was last sent to, set it to
522 * NULL. Combined with the update of the retran path above, this
523 * will cause the next INIT to be sent to the next available
524 * transport, maintaining the cycle.
526 if (asoc
->init_last_sent_to
== peer
)
527 asoc
->init_last_sent_to
= NULL
;
529 /* If we remove the transport an SHUTDOWN was last sent to, set it
530 * to NULL. Combined with the update of the retran path above, this
531 * will cause the next SHUTDOWN to be sent to the next available
532 * transport, maintaining the cycle.
534 if (asoc
->shutdown_last_sent_to
== peer
)
535 asoc
->shutdown_last_sent_to
= NULL
;
537 /* If we remove the transport an ASCONF was last sent to, set it to
540 if (asoc
->addip_last_asconf
&&
541 asoc
->addip_last_asconf
->transport
== peer
)
542 asoc
->addip_last_asconf
->transport
= NULL
;
544 /* If we have something on the transmitted list, we have to
545 * save it off. The best place is the active path.
547 if (!list_empty(&peer
->transmitted
)) {
548 struct sctp_transport
*active
= asoc
->peer
.active_path
;
549 struct sctp_chunk
*ch
;
551 /* Reset the transport of each chunk on this list */
552 list_for_each_entry(ch
, &peer
->transmitted
,
554 ch
->transport
= NULL
;
555 ch
->rtt_in_progress
= 0;
558 list_splice_tail_init(&peer
->transmitted
,
559 &active
->transmitted
);
561 /* Start a T3 timer here in case it wasn't running so
562 * that these migrated packets have a chance to get
565 if (!timer_pending(&active
->T3_rtx_timer
))
566 if (!mod_timer(&active
->T3_rtx_timer
,
567 jiffies
+ active
->rto
))
568 sctp_transport_hold(active
);
571 asoc
->peer
.transport_count
--;
573 sctp_transport_free(peer
);
576 /* Add a transport address to an association. */
577 struct sctp_transport
*sctp_assoc_add_peer(struct sctp_association
*asoc
,
578 const union sctp_addr
*addr
,
580 const int peer_state
)
582 struct net
*net
= sock_net(asoc
->base
.sk
);
583 struct sctp_transport
*peer
;
584 struct sctp_sock
*sp
;
587 sp
= sctp_sk(asoc
->base
.sk
);
589 /* AF_INET and AF_INET6 share common port field. */
590 port
= ntohs(addr
->v4
.sin_port
);
592 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__
,
593 asoc
, &addr
->sa
, peer_state
);
595 /* Set the port if it has not been set yet. */
596 if (0 == asoc
->peer
.port
)
597 asoc
->peer
.port
= port
;
599 /* Check to see if this is a duplicate. */
600 peer
= sctp_assoc_lookup_paddr(asoc
, addr
);
602 /* An UNKNOWN state is only set on transports added by
603 * user in sctp_connectx() call. Such transports should be
604 * considered CONFIRMED per RFC 4960, Section 5.4.
606 if (peer
->state
== SCTP_UNKNOWN
) {
607 peer
->state
= SCTP_ACTIVE
;
612 peer
= sctp_transport_new(net
, addr
, gfp
);
616 sctp_transport_set_owner(peer
, asoc
);
618 /* Initialize the peer's heartbeat interval based on the
619 * association configured value.
621 peer
->hbinterval
= asoc
->hbinterval
;
623 /* Set the path max_retrans. */
624 peer
->pathmaxrxt
= asoc
->pathmaxrxt
;
626 /* And the partial failure retrans threshold */
627 peer
->pf_retrans
= asoc
->pf_retrans
;
629 /* Initialize the peer's SACK delay timeout based on the
630 * association configured value.
632 peer
->sackdelay
= asoc
->sackdelay
;
633 peer
->sackfreq
= asoc
->sackfreq
;
635 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
636 * based on association setting.
638 peer
->param_flags
= asoc
->param_flags
;
640 sctp_transport_route(peer
, NULL
, sp
);
642 /* Initialize the pmtu of the transport. */
643 if (peer
->param_flags
& SPP_PMTUD_DISABLE
) {
645 peer
->pathmtu
= asoc
->pathmtu
;
647 peer
->pathmtu
= SCTP_DEFAULT_MAXSEGMENT
;
650 /* If this is the first transport addr on this association,
651 * initialize the association PMTU to the peer's PMTU.
652 * If not and the current association PMTU is higher than the new
653 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
656 asoc
->pathmtu
= min_t(int, peer
->pathmtu
, asoc
->pathmtu
);
658 asoc
->pathmtu
= peer
->pathmtu
;
660 pr_debug("%s: association:%p PMTU set to %d\n", __func__
, asoc
,
663 peer
->pmtu_pending
= 0;
665 asoc
->frag_point
= sctp_frag_point(asoc
, asoc
->pathmtu
);
667 /* The asoc->peer.port might not be meaningful yet, but
668 * initialize the packet structure anyway.
670 sctp_packet_init(&peer
->packet
, peer
, asoc
->base
.bind_addr
.port
,
675 * o The initial cwnd before DATA transmission or after a sufficiently
676 * long idle period MUST be set to
677 * min(4*MTU, max(2*MTU, 4380 bytes))
679 * o The initial value of ssthresh MAY be arbitrarily high
680 * (for example, implementations MAY use the size of the
681 * receiver advertised window).
683 peer
->cwnd
= min(4*asoc
->pathmtu
, max_t(__u32
, 2*asoc
->pathmtu
, 4380));
685 /* At this point, we may not have the receiver's advertised window,
686 * so initialize ssthresh to the default value and it will be set
687 * later when we process the INIT.
689 peer
->ssthresh
= SCTP_DEFAULT_MAXWINDOW
;
691 peer
->partial_bytes_acked
= 0;
692 peer
->flight_size
= 0;
693 peer
->burst_limited
= 0;
695 /* Set the transport's RTO.initial value */
696 peer
->rto
= asoc
->rto_initial
;
697 sctp_max_rto(asoc
, peer
);
699 /* Set the peer's active state. */
700 peer
->state
= peer_state
;
702 /* Attach the remote transport to our asoc. */
703 list_add_tail_rcu(&peer
->transports
, &asoc
->peer
.transport_addr_list
);
704 asoc
->peer
.transport_count
++;
705 /* Add this peer into the transport hashtable */
706 sctp_hash_transport(peer
);
708 /* If we do not yet have a primary path, set one. */
709 if (!asoc
->peer
.primary_path
) {
710 sctp_assoc_set_primary(asoc
, peer
);
711 asoc
->peer
.retran_path
= peer
;
714 if (asoc
->peer
.active_path
== asoc
->peer
.retran_path
&&
715 peer
->state
!= SCTP_UNCONFIRMED
) {
716 asoc
->peer
.retran_path
= peer
;
722 /* Delete a transport address from an association. */
723 void sctp_assoc_del_peer(struct sctp_association
*asoc
,
724 const union sctp_addr
*addr
)
726 struct list_head
*pos
;
727 struct list_head
*temp
;
728 struct sctp_transport
*transport
;
730 list_for_each_safe(pos
, temp
, &asoc
->peer
.transport_addr_list
) {
731 transport
= list_entry(pos
, struct sctp_transport
, transports
);
732 if (sctp_cmp_addr_exact(addr
, &transport
->ipaddr
)) {
733 /* Do book keeping for removing the peer and free it. */
734 sctp_assoc_rm_peer(asoc
, transport
);
740 /* Lookup a transport by address. */
741 struct sctp_transport
*sctp_assoc_lookup_paddr(
742 const struct sctp_association
*asoc
,
743 const union sctp_addr
*address
)
745 struct sctp_transport
*t
;
747 /* Cycle through all transports searching for a peer address. */
749 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
751 if (sctp_cmp_addr_exact(address
, &t
->ipaddr
))
758 /* Remove all transports except a give one */
759 void sctp_assoc_del_nonprimary_peers(struct sctp_association
*asoc
,
760 struct sctp_transport
*primary
)
762 struct sctp_transport
*temp
;
763 struct sctp_transport
*t
;
765 list_for_each_entry_safe(t
, temp
, &asoc
->peer
.transport_addr_list
,
767 /* if the current transport is not the primary one, delete it */
769 sctp_assoc_rm_peer(asoc
, t
);
773 /* Engage in transport control operations.
774 * Mark the transport up or down and send a notification to the user.
775 * Select and update the new active and retran paths.
777 void sctp_assoc_control_transport(struct sctp_association
*asoc
,
778 struct sctp_transport
*transport
,
779 sctp_transport_cmd_t command
,
780 sctp_sn_error_t error
)
782 struct sctp_ulpevent
*event
;
783 struct sockaddr_storage addr
;
785 bool ulp_notify
= true;
787 /* Record the transition on the transport. */
789 case SCTP_TRANSPORT_UP
:
790 /* If we are moving from UNCONFIRMED state due
791 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
792 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
794 if (SCTP_UNCONFIRMED
== transport
->state
&&
795 SCTP_HEARTBEAT_SUCCESS
== error
)
796 spc_state
= SCTP_ADDR_CONFIRMED
;
798 spc_state
= SCTP_ADDR_AVAILABLE
;
799 /* Don't inform ULP about transition from PF to
800 * active state and set cwnd to 1 MTU, see SCTP
801 * Quick failover draft section 5.1, point 5
803 if (transport
->state
== SCTP_PF
) {
805 transport
->cwnd
= asoc
->pathmtu
;
807 transport
->state
= SCTP_ACTIVE
;
810 case SCTP_TRANSPORT_DOWN
:
811 /* If the transport was never confirmed, do not transition it
812 * to inactive state. Also, release the cached route since
813 * there may be a better route next time.
815 if (transport
->state
!= SCTP_UNCONFIRMED
)
816 transport
->state
= SCTP_INACTIVE
;
818 dst_release(transport
->dst
);
819 transport
->dst
= NULL
;
823 spc_state
= SCTP_ADDR_UNREACHABLE
;
826 case SCTP_TRANSPORT_PF
:
827 transport
->state
= SCTP_PF
;
835 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification
839 memset(&addr
, 0, sizeof(struct sockaddr_storage
));
840 memcpy(&addr
, &transport
->ipaddr
,
841 transport
->af_specific
->sockaddr_len
);
843 event
= sctp_ulpevent_make_peer_addr_change(asoc
, &addr
,
844 0, spc_state
, error
, GFP_ATOMIC
);
846 sctp_ulpq_tail_event(&asoc
->ulpq
, event
);
849 /* Select new active and retran paths. */
850 sctp_select_active_and_retran_path(asoc
);
853 /* Hold a reference to an association. */
854 void sctp_association_hold(struct sctp_association
*asoc
)
856 atomic_inc(&asoc
->base
.refcnt
);
859 /* Release a reference to an association and cleanup
860 * if there are no more references.
862 void sctp_association_put(struct sctp_association
*asoc
)
864 if (atomic_dec_and_test(&asoc
->base
.refcnt
))
865 sctp_association_destroy(asoc
);
868 /* Allocate the next TSN, Transmission Sequence Number, for the given
871 __u32
sctp_association_get_next_tsn(struct sctp_association
*asoc
)
873 /* From Section 1.6 Serial Number Arithmetic:
874 * Transmission Sequence Numbers wrap around when they reach
875 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use
876 * after transmitting TSN = 2*32 - 1 is TSN = 0.
878 __u32 retval
= asoc
->next_tsn
;
885 /* Compare two addresses to see if they match. Wildcard addresses
886 * only match themselves.
888 int sctp_cmp_addr_exact(const union sctp_addr
*ss1
,
889 const union sctp_addr
*ss2
)
893 af
= sctp_get_af_specific(ss1
->sa
.sa_family
);
897 return af
->cmp_addr(ss1
, ss2
);
900 /* Return an ecne chunk to get prepended to a packet.
901 * Note: We are sly and return a shared, prealloced chunk. FIXME:
902 * No we don't, but we could/should.
904 struct sctp_chunk
*sctp_get_ecne_prepend(struct sctp_association
*asoc
)
906 if (!asoc
->need_ecne
)
909 /* Send ECNE if needed.
910 * Not being able to allocate a chunk here is not deadly.
912 return sctp_make_ecne(asoc
, asoc
->last_ecne_tsn
);
916 * Find which transport this TSN was sent on.
918 struct sctp_transport
*sctp_assoc_lookup_tsn(struct sctp_association
*asoc
,
921 struct sctp_transport
*active
;
922 struct sctp_transport
*match
;
923 struct sctp_transport
*transport
;
924 struct sctp_chunk
*chunk
;
925 __be32 key
= htonl(tsn
);
930 * FIXME: In general, find a more efficient data structure for
935 * The general strategy is to search each transport's transmitted
936 * list. Return which transport this TSN lives on.
938 * Let's be hopeful and check the active_path first.
939 * Another optimization would be to know if there is only one
940 * outbound path and not have to look for the TSN at all.
944 active
= asoc
->peer
.active_path
;
946 list_for_each_entry(chunk
, &active
->transmitted
,
949 if (key
== chunk
->subh
.data_hdr
->tsn
) {
955 /* If not found, go search all the other transports. */
956 list_for_each_entry(transport
, &asoc
->peer
.transport_addr_list
,
959 if (transport
== active
)
961 list_for_each_entry(chunk
, &transport
->transmitted
,
963 if (key
== chunk
->subh
.data_hdr
->tsn
) {
973 /* Is this the association we are looking for? */
974 struct sctp_transport
*sctp_assoc_is_match(struct sctp_association
*asoc
,
976 const union sctp_addr
*laddr
,
977 const union sctp_addr
*paddr
)
979 struct sctp_transport
*transport
;
981 if ((htons(asoc
->base
.bind_addr
.port
) == laddr
->v4
.sin_port
) &&
982 (htons(asoc
->peer
.port
) == paddr
->v4
.sin_port
) &&
983 net_eq(sock_net(asoc
->base
.sk
), net
)) {
984 transport
= sctp_assoc_lookup_paddr(asoc
, paddr
);
988 if (sctp_bind_addr_match(&asoc
->base
.bind_addr
, laddr
,
989 sctp_sk(asoc
->base
.sk
)))
998 /* Do delayed input processing. This is scheduled by sctp_rcv(). */
999 static void sctp_assoc_bh_rcv(struct work_struct
*work
)
1001 struct sctp_association
*asoc
=
1002 container_of(work
, struct sctp_association
,
1003 base
.inqueue
.immediate
);
1004 struct net
*net
= sock_net(asoc
->base
.sk
);
1005 struct sctp_endpoint
*ep
;
1006 struct sctp_chunk
*chunk
;
1007 struct sctp_inq
*inqueue
;
1009 sctp_subtype_t subtype
;
1012 /* The association should be held so we should be safe. */
1015 inqueue
= &asoc
->base
.inqueue
;
1016 sctp_association_hold(asoc
);
1017 while (NULL
!= (chunk
= sctp_inq_pop(inqueue
))) {
1018 state
= asoc
->state
;
1019 subtype
= SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
);
1021 /* SCTP-AUTH, Section 6.3:
1022 * The receiver has a list of chunk types which it expects
1023 * to be received only after an AUTH-chunk. This list has
1024 * been sent to the peer during the association setup. It
1025 * MUST silently discard these chunks if they are not placed
1026 * after an AUTH chunk in the packet.
1028 if (sctp_auth_recv_cid(subtype
.chunk
, asoc
) && !chunk
->auth
)
1031 /* Remember where the last DATA chunk came from so we
1032 * know where to send the SACK.
1034 if (sctp_chunk_is_data(chunk
))
1035 asoc
->peer
.last_data_from
= chunk
->transport
;
1037 SCTP_INC_STATS(net
, SCTP_MIB_INCTRLCHUNKS
);
1038 asoc
->stats
.ictrlchunks
++;
1039 if (chunk
->chunk_hdr
->type
== SCTP_CID_SACK
)
1040 asoc
->stats
.isacks
++;
1043 if (chunk
->transport
)
1044 chunk
->transport
->last_time_heard
= ktime_get();
1046 /* Run through the state machine. */
1047 error
= sctp_do_sm(net
, SCTP_EVENT_T_CHUNK
, subtype
,
1048 state
, ep
, asoc
, chunk
, GFP_ATOMIC
);
1050 /* Check to see if the association is freed in response to
1051 * the incoming chunk. If so, get out of the while loop.
1053 if (asoc
->base
.dead
)
1056 /* If there is an error on chunk, discard this packet. */
1058 chunk
->pdiscard
= 1;
1060 sctp_association_put(asoc
);
1063 /* This routine moves an association from its old sk to a new sk. */
1064 void sctp_assoc_migrate(struct sctp_association
*assoc
, struct sock
*newsk
)
1066 struct sctp_sock
*newsp
= sctp_sk(newsk
);
1067 struct sock
*oldsk
= assoc
->base
.sk
;
1069 /* Delete the association from the old endpoint's list of
1072 list_del_init(&assoc
->asocs
);
1074 /* Decrement the backlog value for a TCP-style socket. */
1075 if (sctp_style(oldsk
, TCP
))
1076 oldsk
->sk_ack_backlog
--;
1078 /* Release references to the old endpoint and the sock. */
1079 sctp_endpoint_put(assoc
->ep
);
1080 sock_put(assoc
->base
.sk
);
1082 /* Get a reference to the new endpoint. */
1083 assoc
->ep
= newsp
->ep
;
1084 sctp_endpoint_hold(assoc
->ep
);
1086 /* Get a reference to the new sock. */
1087 assoc
->base
.sk
= newsk
;
1088 sock_hold(assoc
->base
.sk
);
1090 /* Add the association to the new endpoint's list of associations. */
1091 sctp_endpoint_add_asoc(newsp
->ep
, assoc
);
1094 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */
1095 void sctp_assoc_update(struct sctp_association
*asoc
,
1096 struct sctp_association
*new)
1098 struct sctp_transport
*trans
;
1099 struct list_head
*pos
, *temp
;
1101 /* Copy in new parameters of peer. */
1103 asoc
->peer
.rwnd
= new->peer
.rwnd
;
1104 asoc
->peer
.sack_needed
= new->peer
.sack_needed
;
1105 asoc
->peer
.auth_capable
= new->peer
.auth_capable
;
1106 asoc
->peer
.i
= new->peer
.i
;
1107 sctp_tsnmap_init(&asoc
->peer
.tsn_map
, SCTP_TSN_MAP_INITIAL
,
1108 asoc
->peer
.i
.initial_tsn
, GFP_ATOMIC
);
1110 /* Remove any peer addresses not present in the new association. */
1111 list_for_each_safe(pos
, temp
, &asoc
->peer
.transport_addr_list
) {
1112 trans
= list_entry(pos
, struct sctp_transport
, transports
);
1113 if (!sctp_assoc_lookup_paddr(new, &trans
->ipaddr
)) {
1114 sctp_assoc_rm_peer(asoc
, trans
);
1118 if (asoc
->state
>= SCTP_STATE_ESTABLISHED
)
1119 sctp_transport_reset(trans
);
1122 /* If the case is A (association restart), use
1123 * initial_tsn as next_tsn. If the case is B, use
1124 * current next_tsn in case data sent to peer
1125 * has been discarded and needs retransmission.
1127 if (asoc
->state
>= SCTP_STATE_ESTABLISHED
) {
1128 asoc
->next_tsn
= new->next_tsn
;
1129 asoc
->ctsn_ack_point
= new->ctsn_ack_point
;
1130 asoc
->adv_peer_ack_point
= new->adv_peer_ack_point
;
1132 /* Reinitialize SSN for both local streams
1133 * and peer's streams.
1135 sctp_ssnmap_clear(asoc
->ssnmap
);
1137 /* Flush the ULP reassembly and ordered queue.
1138 * Any data there will now be stale and will
1141 sctp_ulpq_flush(&asoc
->ulpq
);
1143 /* reset the overall association error count so
1144 * that the restarted association doesn't get torn
1145 * down on the next retransmission timer.
1147 asoc
->overall_error_count
= 0;
1150 /* Add any peer addresses from the new association. */
1151 list_for_each_entry(trans
, &new->peer
.transport_addr_list
,
1153 if (!sctp_assoc_lookup_paddr(asoc
, &trans
->ipaddr
))
1154 sctp_assoc_add_peer(asoc
, &trans
->ipaddr
,
1155 GFP_ATOMIC
, trans
->state
);
1158 asoc
->ctsn_ack_point
= asoc
->next_tsn
- 1;
1159 asoc
->adv_peer_ack_point
= asoc
->ctsn_ack_point
;
1160 if (!asoc
->ssnmap
) {
1161 /* Move the ssnmap. */
1162 asoc
->ssnmap
= new->ssnmap
;
1166 if (!asoc
->assoc_id
) {
1167 /* get a new association id since we don't have one
1170 sctp_assoc_set_id(asoc
, GFP_ATOMIC
);
1174 /* SCTP-AUTH: Save the peer parameters from the new associations
1175 * and also move the association shared keys over
1177 kfree(asoc
->peer
.peer_random
);
1178 asoc
->peer
.peer_random
= new->peer
.peer_random
;
1179 new->peer
.peer_random
= NULL
;
1181 kfree(asoc
->peer
.peer_chunks
);
1182 asoc
->peer
.peer_chunks
= new->peer
.peer_chunks
;
1183 new->peer
.peer_chunks
= NULL
;
1185 kfree(asoc
->peer
.peer_hmacs
);
1186 asoc
->peer
.peer_hmacs
= new->peer
.peer_hmacs
;
1187 new->peer
.peer_hmacs
= NULL
;
1189 sctp_auth_asoc_init_active_key(asoc
, GFP_ATOMIC
);
1192 /* Update the retran path for sending a retransmitted packet.
1193 * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
1195 * When there is outbound data to send and the primary path
1196 * becomes inactive (e.g., due to failures), or where the
1197 * SCTP user explicitly requests to send data to an
1198 * inactive destination transport address, before reporting
1199 * an error to its ULP, the SCTP endpoint should try to send
1200 * the data to an alternate active destination transport
1201 * address if one exists.
1203 * When retransmitting data that timed out, if the endpoint
1204 * is multihomed, it should consider each source-destination
1205 * address pair in its retransmission selection policy.
1206 * When retransmitting timed-out data, the endpoint should
1207 * attempt to pick the most divergent source-destination
1208 * pair from the original source-destination pair to which
1209 * the packet was transmitted.
1211 * Note: Rules for picking the most divergent source-destination
1212 * pair are an implementation decision and are not specified
1213 * within this document.
1215 * Our basic strategy is to round-robin transports in priorities
1216 * according to sctp_trans_score() e.g., if no such
1217 * transport with state SCTP_ACTIVE exists, round-robin through
1218 * SCTP_UNKNOWN, etc. You get the picture.
1220 static u8
sctp_trans_score(const struct sctp_transport
*trans
)
1222 switch (trans
->state
) {
1224 return 3; /* best case */
1229 default: /* case SCTP_INACTIVE */
1230 return 0; /* worst case */
1234 static struct sctp_transport
*sctp_trans_elect_tie(struct sctp_transport
*trans1
,
1235 struct sctp_transport
*trans2
)
1237 if (trans1
->error_count
> trans2
->error_count
) {
1239 } else if (trans1
->error_count
== trans2
->error_count
&&
1240 ktime_after(trans2
->last_time_heard
,
1241 trans1
->last_time_heard
)) {
1248 static struct sctp_transport
*sctp_trans_elect_best(struct sctp_transport
*curr
,
1249 struct sctp_transport
*best
)
1251 u8 score_curr
, score_best
;
1253 if (best
== NULL
|| curr
== best
)
1256 score_curr
= sctp_trans_score(curr
);
1257 score_best
= sctp_trans_score(best
);
1259 /* First, try a score-based selection if both transport states
1260 * differ. If we're in a tie, lets try to make a more clever
1261 * decision here based on error counts and last time heard.
1263 if (score_curr
> score_best
)
1265 else if (score_curr
== score_best
)
1266 return sctp_trans_elect_tie(best
, curr
);
1271 void sctp_assoc_update_retran_path(struct sctp_association
*asoc
)
1273 struct sctp_transport
*trans
= asoc
->peer
.retran_path
;
1274 struct sctp_transport
*trans_next
= NULL
;
1276 /* We're done as we only have the one and only path. */
1277 if (asoc
->peer
.transport_count
== 1)
1279 /* If active_path and retran_path are the same and active,
1280 * then this is the only active path. Use it.
1282 if (asoc
->peer
.active_path
== asoc
->peer
.retran_path
&&
1283 asoc
->peer
.active_path
->state
== SCTP_ACTIVE
)
1286 /* Iterate from retran_path's successor back to retran_path. */
1287 for (trans
= list_next_entry(trans
, transports
); 1;
1288 trans
= list_next_entry(trans
, transports
)) {
1289 /* Manually skip the head element. */
1290 if (&trans
->transports
== &asoc
->peer
.transport_addr_list
)
1292 if (trans
->state
== SCTP_UNCONFIRMED
)
1294 trans_next
= sctp_trans_elect_best(trans
, trans_next
);
1295 /* Active is good enough for immediate return. */
1296 if (trans_next
->state
== SCTP_ACTIVE
)
1298 /* We've reached the end, time to update path. */
1299 if (trans
== asoc
->peer
.retran_path
)
1303 asoc
->peer
.retran_path
= trans_next
;
1305 pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1306 __func__
, asoc
, &asoc
->peer
.retran_path
->ipaddr
.sa
);
1309 static void sctp_select_active_and_retran_path(struct sctp_association
*asoc
)
1311 struct sctp_transport
*trans
, *trans_pri
= NULL
, *trans_sec
= NULL
;
1312 struct sctp_transport
*trans_pf
= NULL
;
1314 /* Look for the two most recently used active transports. */
1315 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
1317 /* Skip uninteresting transports. */
1318 if (trans
->state
== SCTP_INACTIVE
||
1319 trans
->state
== SCTP_UNCONFIRMED
)
1321 /* Keep track of the best PF transport from our
1322 * list in case we don't find an active one.
1324 if (trans
->state
== SCTP_PF
) {
1325 trans_pf
= sctp_trans_elect_best(trans
, trans_pf
);
1328 /* For active transports, pick the most recent ones. */
1329 if (trans_pri
== NULL
||
1330 ktime_after(trans
->last_time_heard
,
1331 trans_pri
->last_time_heard
)) {
1332 trans_sec
= trans_pri
;
1334 } else if (trans_sec
== NULL
||
1335 ktime_after(trans
->last_time_heard
,
1336 trans_sec
->last_time_heard
)) {
1341 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1343 * By default, an endpoint should always transmit to the primary
1344 * path, unless the SCTP user explicitly specifies the
1345 * destination transport address (and possibly source transport
1346 * address) to use. [If the primary is active but not most recent,
1347 * bump the most recently used transport.]
1349 if ((asoc
->peer
.primary_path
->state
== SCTP_ACTIVE
||
1350 asoc
->peer
.primary_path
->state
== SCTP_UNKNOWN
) &&
1351 asoc
->peer
.primary_path
!= trans_pri
) {
1352 trans_sec
= trans_pri
;
1353 trans_pri
= asoc
->peer
.primary_path
;
1356 /* We did not find anything useful for a possible retransmission
1357 * path; either primary path that we found is the the same as
1358 * the current one, or we didn't generally find an active one.
1360 if (trans_sec
== NULL
)
1361 trans_sec
= trans_pri
;
1363 /* If we failed to find a usable transport, just camp on the
1364 * active or pick a PF iff it's the better choice.
1366 if (trans_pri
== NULL
) {
1367 trans_pri
= sctp_trans_elect_best(asoc
->peer
.active_path
, trans_pf
);
1368 trans_sec
= trans_pri
;
1371 /* Set the active and retran transports. */
1372 asoc
->peer
.active_path
= trans_pri
;
1373 asoc
->peer
.retran_path
= trans_sec
;
1376 struct sctp_transport
*
1377 sctp_assoc_choose_alter_transport(struct sctp_association
*asoc
,
1378 struct sctp_transport
*last_sent_to
)
1380 /* If this is the first time packet is sent, use the active path,
1381 * else use the retran path. If the last packet was sent over the
1382 * retran path, update the retran path and use it.
1384 if (last_sent_to
== NULL
) {
1385 return asoc
->peer
.active_path
;
1387 if (last_sent_to
== asoc
->peer
.retran_path
)
1388 sctp_assoc_update_retran_path(asoc
);
1390 return asoc
->peer
.retran_path
;
1394 /* Update the association's pmtu and frag_point by going through all the
1395 * transports. This routine is called when a transport's PMTU has changed.
1397 void sctp_assoc_sync_pmtu(struct sock
*sk
, struct sctp_association
*asoc
)
1399 struct sctp_transport
*t
;
1405 /* Get the lowest pmtu of all the transports. */
1406 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
1408 if (t
->pmtu_pending
&& t
->dst
) {
1409 sctp_transport_update_pmtu(sk
, t
,
1410 WORD_TRUNC(dst_mtu(t
->dst
)));
1411 t
->pmtu_pending
= 0;
1413 if (!pmtu
|| (t
->pathmtu
< pmtu
))
1418 asoc
->pathmtu
= pmtu
;
1419 asoc
->frag_point
= sctp_frag_point(asoc
, pmtu
);
1422 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__
, asoc
,
1423 asoc
->pathmtu
, asoc
->frag_point
);
1426 /* Should we send a SACK to update our peer? */
1427 static inline bool sctp_peer_needs_update(struct sctp_association
*asoc
)
1429 struct net
*net
= sock_net(asoc
->base
.sk
);
1430 switch (asoc
->state
) {
1431 case SCTP_STATE_ESTABLISHED
:
1432 case SCTP_STATE_SHUTDOWN_PENDING
:
1433 case SCTP_STATE_SHUTDOWN_RECEIVED
:
1434 case SCTP_STATE_SHUTDOWN_SENT
:
1435 if ((asoc
->rwnd
> asoc
->a_rwnd
) &&
1436 ((asoc
->rwnd
- asoc
->a_rwnd
) >= max_t(__u32
,
1437 (asoc
->base
.sk
->sk_rcvbuf
>> net
->sctp
.rwnd_upd_shift
),
1447 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
1448 void sctp_assoc_rwnd_increase(struct sctp_association
*asoc
, unsigned int len
)
1450 struct sctp_chunk
*sack
;
1451 struct timer_list
*timer
;
1453 if (asoc
->rwnd_over
) {
1454 if (asoc
->rwnd_over
>= len
) {
1455 asoc
->rwnd_over
-= len
;
1457 asoc
->rwnd
+= (len
- asoc
->rwnd_over
);
1458 asoc
->rwnd_over
= 0;
1464 /* If we had window pressure, start recovering it
1465 * once our rwnd had reached the accumulated pressure
1466 * threshold. The idea is to recover slowly, but up
1467 * to the initial advertised window.
1469 if (asoc
->rwnd_press
&& asoc
->rwnd
>= asoc
->rwnd_press
) {
1470 int change
= min(asoc
->pathmtu
, asoc
->rwnd_press
);
1471 asoc
->rwnd
+= change
;
1472 asoc
->rwnd_press
-= change
;
1475 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1476 __func__
, asoc
, len
, asoc
->rwnd
, asoc
->rwnd_over
,
1479 /* Send a window update SACK if the rwnd has increased by at least the
1480 * minimum of the association's PMTU and half of the receive buffer.
1481 * The algorithm used is similar to the one described in
1482 * Section 4.2.3.3 of RFC 1122.
1484 if (sctp_peer_needs_update(asoc
)) {
1485 asoc
->a_rwnd
= asoc
->rwnd
;
1487 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1488 "a_rwnd:%u\n", __func__
, asoc
, asoc
->rwnd
,
1491 sack
= sctp_make_sack(asoc
);
1495 asoc
->peer
.sack_needed
= 0;
1497 sctp_outq_tail(&asoc
->outqueue
, sack
, GFP_ATOMIC
);
1499 /* Stop the SACK timer. */
1500 timer
= &asoc
->timers
[SCTP_EVENT_TIMEOUT_SACK
];
1501 if (del_timer(timer
))
1502 sctp_association_put(asoc
);
1506 /* Decrease asoc's rwnd by len. */
1507 void sctp_assoc_rwnd_decrease(struct sctp_association
*asoc
, unsigned int len
)
1512 if (unlikely(!asoc
->rwnd
|| asoc
->rwnd_over
))
1513 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1514 "asoc->rwnd_over:%u!\n", __func__
, asoc
,
1515 asoc
->rwnd
, asoc
->rwnd_over
);
1517 if (asoc
->ep
->rcvbuf_policy
)
1518 rx_count
= atomic_read(&asoc
->rmem_alloc
);
1520 rx_count
= atomic_read(&asoc
->base
.sk
->sk_rmem_alloc
);
1522 /* If we've reached or overflowed our receive buffer, announce
1523 * a 0 rwnd if rwnd would still be positive. Store the
1524 * the potential pressure overflow so that the window can be restored
1525 * back to original value.
1527 if (rx_count
>= asoc
->base
.sk
->sk_rcvbuf
)
1530 if (asoc
->rwnd
>= len
) {
1533 asoc
->rwnd_press
+= asoc
->rwnd
;
1537 asoc
->rwnd_over
= len
- asoc
->rwnd
;
1541 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1542 __func__
, asoc
, len
, asoc
->rwnd
, asoc
->rwnd_over
,
1546 /* Build the bind address list for the association based on info from the
1547 * local endpoint and the remote peer.
1549 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association
*asoc
,
1550 sctp_scope_t scope
, gfp_t gfp
)
1554 /* Use scoping rules to determine the subset of addresses from
1557 flags
= (PF_INET6
== asoc
->base
.sk
->sk_family
) ? SCTP_ADDR6_ALLOWED
: 0;
1558 if (asoc
->peer
.ipv4_address
)
1559 flags
|= SCTP_ADDR4_PEERSUPP
;
1560 if (asoc
->peer
.ipv6_address
)
1561 flags
|= SCTP_ADDR6_PEERSUPP
;
1563 return sctp_bind_addr_copy(sock_net(asoc
->base
.sk
),
1564 &asoc
->base
.bind_addr
,
1565 &asoc
->ep
->base
.bind_addr
,
1569 /* Build the association's bind address list from the cookie. */
1570 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association
*asoc
,
1571 struct sctp_cookie
*cookie
,
1574 int var_size2
= ntohs(cookie
->peer_init
->chunk_hdr
.length
);
1575 int var_size3
= cookie
->raw_addr_list_len
;
1576 __u8
*raw
= (__u8
*)cookie
->peer_init
+ var_size2
;
1578 return sctp_raw_to_bind_addrs(&asoc
->base
.bind_addr
, raw
, var_size3
,
1579 asoc
->ep
->base
.bind_addr
.port
, gfp
);
1582 /* Lookup laddr in the bind address list of an association. */
1583 int sctp_assoc_lookup_laddr(struct sctp_association
*asoc
,
1584 const union sctp_addr
*laddr
)
1588 if ((asoc
->base
.bind_addr
.port
== ntohs(laddr
->v4
.sin_port
)) &&
1589 sctp_bind_addr_match(&asoc
->base
.bind_addr
, laddr
,
1590 sctp_sk(asoc
->base
.sk
)))
1596 /* Set an association id for a given association */
1597 int sctp_assoc_set_id(struct sctp_association
*asoc
, gfp_t gfp
)
1599 bool preload
= gfpflags_allow_blocking(gfp
);
1602 /* If the id is already assigned, keep it. */
1608 spin_lock_bh(&sctp_assocs_id_lock
);
1609 /* 0 is not a valid assoc_id, must be >= 1 */
1610 ret
= idr_alloc_cyclic(&sctp_assocs_id
, asoc
, 1, 0, GFP_NOWAIT
);
1611 spin_unlock_bh(&sctp_assocs_id_lock
);
1617 asoc
->assoc_id
= (sctp_assoc_t
)ret
;
1621 /* Free the ASCONF queue */
1622 static void sctp_assoc_free_asconf_queue(struct sctp_association
*asoc
)
1624 struct sctp_chunk
*asconf
;
1625 struct sctp_chunk
*tmp
;
1627 list_for_each_entry_safe(asconf
, tmp
, &asoc
->addip_chunk_list
, list
) {
1628 list_del_init(&asconf
->list
);
1629 sctp_chunk_free(asconf
);
1633 /* Free asconf_ack cache */
1634 static void sctp_assoc_free_asconf_acks(struct sctp_association
*asoc
)
1636 struct sctp_chunk
*ack
;
1637 struct sctp_chunk
*tmp
;
1639 list_for_each_entry_safe(ack
, tmp
, &asoc
->asconf_ack_list
,
1641 list_del_init(&ack
->transmitted_list
);
1642 sctp_chunk_free(ack
);
1646 /* Clean up the ASCONF_ACK queue */
1647 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association
*asoc
)
1649 struct sctp_chunk
*ack
;
1650 struct sctp_chunk
*tmp
;
1652 /* We can remove all the entries from the queue up to
1653 * the "Peer-Sequence-Number".
1655 list_for_each_entry_safe(ack
, tmp
, &asoc
->asconf_ack_list
,
1657 if (ack
->subh
.addip_hdr
->serial
==
1658 htonl(asoc
->peer
.addip_serial
))
1661 list_del_init(&ack
->transmitted_list
);
1662 sctp_chunk_free(ack
);
1666 /* Find the ASCONF_ACK whose serial number matches ASCONF */
1667 struct sctp_chunk
*sctp_assoc_lookup_asconf_ack(
1668 const struct sctp_association
*asoc
,
1671 struct sctp_chunk
*ack
;
1673 /* Walk through the list of cached ASCONF-ACKs and find the
1674 * ack chunk whose serial number matches that of the request.
1676 list_for_each_entry(ack
, &asoc
->asconf_ack_list
, transmitted_list
) {
1677 if (sctp_chunk_pending(ack
))
1679 if (ack
->subh
.addip_hdr
->serial
== serial
) {
1680 sctp_chunk_hold(ack
);
1688 void sctp_asconf_queue_teardown(struct sctp_association
*asoc
)
1690 /* Free any cached ASCONF_ACK chunk. */
1691 sctp_assoc_free_asconf_acks(asoc
);
1693 /* Free the ASCONF queue. */
1694 sctp_assoc_free_asconf_queue(asoc
);
1696 /* Free any cached ASCONF chunk. */
1697 if (asoc
->addip_last_asconf
)
1698 sctp_chunk_free(asoc
->addip_last_asconf
);