1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
6 * Copyright (c) 2001-2002 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This file is part of the SCTP kernel implementation
11 * These functions interface with the sockets layer to implement the
12 * SCTP Extensions for the Sockets API.
14 * Note that the descriptions from the specification are USER level
15 * functions--this file is the functions which populate the struct proto
16 * for SCTP which is the BOTTOM of the sockets interface.
18 * This SCTP implementation is free software;
19 * you can redistribute it and/or modify it under the terms of
20 * the GNU General Public License as published by
21 * the Free Software Foundation; either version 2, or (at your option)
24 * This SCTP implementation is distributed in the hope that it
25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
26 * ************************
27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
28 * See the GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with GNU CC; see the file COPYING. If not, see
32 * <http://www.gnu.org/licenses/>.
34 * Please send any bug reports or fixes you make to the
36 * lksctp developers <linux-sctp@vger.kernel.org>
38 * Written or modified by:
39 * La Monte H.P. Yarroll <piggy@acm.org>
40 * Narasimha Budihal <narsi@refcode.org>
41 * Karl Knutson <karl@athena.chicago.il.us>
42 * Jon Grimm <jgrimm@us.ibm.com>
43 * Xingang Guo <xingang.guo@intel.com>
44 * Daisy Chang <daisyc@us.ibm.com>
45 * Sridhar Samudrala <samudrala@us.ibm.com>
46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
47 * Ardelle Fan <ardelle.fan@intel.com>
48 * Ryan Layer <rmlayer@us.ibm.com>
49 * Anup Pemmaiah <pemmaiah@cc.usu.edu>
50 * Kevin Gao <kevin.gao@intel.com>
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55 #include <crypto/hash.h>
56 #include <linux/types.h>
57 #include <linux/kernel.h>
58 #include <linux/wait.h>
59 #include <linux/time.h>
60 #include <linux/sched/signal.h>
62 #include <linux/capability.h>
63 #include <linux/fcntl.h>
64 #include <linux/poll.h>
65 #include <linux/init.h>
66 #include <linux/slab.h>
67 #include <linux/file.h>
68 #include <linux/compat.h>
69 #include <linux/rhashtable.h>
73 #include <net/route.h>
75 #include <net/inet_common.h>
76 #include <net/busy_poll.h>
78 #include <linux/socket.h> /* for sa_family_t */
79 #include <linux/export.h>
81 #include <net/sctp/sctp.h>
82 #include <net/sctp/sm.h>
83 #include <net/sctp/stream_sched.h>
85 /* Forward declarations for internal helper functions. */
86 static bool sctp_writeable(struct sock
*sk
);
87 static void sctp_wfree(struct sk_buff
*skb
);
88 static int sctp_wait_for_sndbuf(struct sctp_association
*asoc
, long *timeo_p
,
90 static int sctp_wait_for_packet(struct sock
*sk
, int *err
, long *timeo_p
);
91 static int sctp_wait_for_connect(struct sctp_association
*, long *timeo_p
);
92 static int sctp_wait_for_accept(struct sock
*sk
, long timeo
);
93 static void sctp_wait_for_close(struct sock
*sk
, long timeo
);
94 static void sctp_destruct_sock(struct sock
*sk
);
95 static struct sctp_af
*sctp_sockaddr_af(struct sctp_sock
*opt
,
96 union sctp_addr
*addr
, int len
);
97 static int sctp_bindx_add(struct sock
*, struct sockaddr
*, int);
98 static int sctp_bindx_rem(struct sock
*, struct sockaddr
*, int);
99 static int sctp_send_asconf_add_ip(struct sock
*, struct sockaddr
*, int);
100 static int sctp_send_asconf_del_ip(struct sock
*, struct sockaddr
*, int);
101 static int sctp_send_asconf(struct sctp_association
*asoc
,
102 struct sctp_chunk
*chunk
);
103 static int sctp_do_bind(struct sock
*, union sctp_addr
*, int);
104 static int sctp_autobind(struct sock
*sk
);
105 static int sctp_sock_migrate(struct sock
*oldsk
, struct sock
*newsk
,
106 struct sctp_association
*assoc
,
107 enum sctp_socket_type type
);
109 static unsigned long sctp_memory_pressure
;
110 static atomic_long_t sctp_memory_allocated
;
111 struct percpu_counter sctp_sockets_allocated
;
113 static void sctp_enter_memory_pressure(struct sock
*sk
)
115 sctp_memory_pressure
= 1;
119 /* Get the sndbuf space available at the time on the association. */
120 static inline int sctp_wspace(struct sctp_association
*asoc
)
122 struct sock
*sk
= asoc
->base
.sk
;
124 return asoc
->ep
->sndbuf_policy
? sk
->sk_sndbuf
- asoc
->sndbuf_used
125 : sk_stream_wspace(sk
);
128 /* Increment the used sndbuf space count of the corresponding association by
129 * the size of the outgoing data chunk.
130 * Also, set the skb destructor for sndbuf accounting later.
132 * Since it is always 1-1 between chunk and skb, and also a new skb is always
133 * allocated for chunk bundling in sctp_packet_transmit(), we can use the
134 * destructor in the data chunk skb for the purpose of the sndbuf space
137 static inline void sctp_set_owner_w(struct sctp_chunk
*chunk
)
139 struct sctp_association
*asoc
= chunk
->asoc
;
140 struct sock
*sk
= asoc
->base
.sk
;
142 /* The sndbuf space is tracked per association. */
143 sctp_association_hold(asoc
);
146 sctp_auth_shkey_hold(chunk
->shkey
);
148 skb_set_owner_w(chunk
->skb
, sk
);
150 chunk
->skb
->destructor
= sctp_wfree
;
151 /* Save the chunk pointer in skb for sctp_wfree to use later. */
152 skb_shinfo(chunk
->skb
)->destructor_arg
= chunk
;
154 refcount_add(sizeof(struct sctp_chunk
), &sk
->sk_wmem_alloc
);
155 asoc
->sndbuf_used
+= chunk
->skb
->truesize
+ sizeof(struct sctp_chunk
);
156 sk
->sk_wmem_queued
+= chunk
->skb
->truesize
+ sizeof(struct sctp_chunk
);
157 sk_mem_charge(sk
, chunk
->skb
->truesize
);
160 static void sctp_clear_owner_w(struct sctp_chunk
*chunk
)
162 skb_orphan(chunk
->skb
);
165 static void sctp_for_each_tx_datachunk(struct sctp_association
*asoc
,
166 void (*cb
)(struct sctp_chunk
*))
169 struct sctp_outq
*q
= &asoc
->outqueue
;
170 struct sctp_transport
*t
;
171 struct sctp_chunk
*chunk
;
173 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
, transports
)
174 list_for_each_entry(chunk
, &t
->transmitted
, transmitted_list
)
177 list_for_each_entry(chunk
, &q
->retransmit
, transmitted_list
)
180 list_for_each_entry(chunk
, &q
->sacked
, transmitted_list
)
183 list_for_each_entry(chunk
, &q
->abandoned
, transmitted_list
)
186 list_for_each_entry(chunk
, &q
->out_chunk_list
, list
)
190 static void sctp_for_each_rx_skb(struct sctp_association
*asoc
, struct sock
*sk
,
191 void (*cb
)(struct sk_buff
*, struct sock
*))
194 struct sk_buff
*skb
, *tmp
;
196 sctp_skb_for_each(skb
, &asoc
->ulpq
.lobby
, tmp
)
199 sctp_skb_for_each(skb
, &asoc
->ulpq
.reasm
, tmp
)
202 sctp_skb_for_each(skb
, &asoc
->ulpq
.reasm_uo
, tmp
)
206 /* Verify that this is a valid address. */
207 static inline int sctp_verify_addr(struct sock
*sk
, union sctp_addr
*addr
,
212 /* Verify basic sockaddr. */
213 af
= sctp_sockaddr_af(sctp_sk(sk
), addr
, len
);
217 /* Is this a valid SCTP address? */
218 if (!af
->addr_valid(addr
, sctp_sk(sk
), NULL
))
221 if (!sctp_sk(sk
)->pf
->send_verify(sctp_sk(sk
), (addr
)))
227 /* Look up the association by its id. If this is not a UDP-style
228 * socket, the ID field is always ignored.
230 struct sctp_association
*sctp_id2assoc(struct sock
*sk
, sctp_assoc_t id
)
232 struct sctp_association
*asoc
= NULL
;
234 /* If this is not a UDP-style socket, assoc id should be ignored. */
235 if (!sctp_style(sk
, UDP
)) {
236 /* Return NULL if the socket state is not ESTABLISHED. It
237 * could be a TCP-style listening socket or a socket which
238 * hasn't yet called connect() to establish an association.
240 if (!sctp_sstate(sk
, ESTABLISHED
) && !sctp_sstate(sk
, CLOSING
))
243 /* Get the first and the only association from the list. */
244 if (!list_empty(&sctp_sk(sk
)->ep
->asocs
))
245 asoc
= list_entry(sctp_sk(sk
)->ep
->asocs
.next
,
246 struct sctp_association
, asocs
);
250 /* Otherwise this is a UDP-style socket. */
251 if (id
<= SCTP_ALL_ASSOC
)
254 spin_lock_bh(&sctp_assocs_id_lock
);
255 asoc
= (struct sctp_association
*)idr_find(&sctp_assocs_id
, (int)id
);
256 if (asoc
&& (asoc
->base
.sk
!= sk
|| asoc
->base
.dead
))
258 spin_unlock_bh(&sctp_assocs_id_lock
);
263 /* Look up the transport from an address and an assoc id. If both address and
264 * id are specified, the associations matching the address and the id should be
267 static struct sctp_transport
*sctp_addr_id2transport(struct sock
*sk
,
268 struct sockaddr_storage
*addr
,
271 struct sctp_association
*addr_asoc
= NULL
, *id_asoc
= NULL
;
272 struct sctp_af
*af
= sctp_get_af_specific(addr
->ss_family
);
273 union sctp_addr
*laddr
= (union sctp_addr
*)addr
;
274 struct sctp_transport
*transport
;
276 if (!af
|| sctp_verify_addr(sk
, laddr
, af
->sockaddr_len
))
279 addr_asoc
= sctp_endpoint_lookup_assoc(sctp_sk(sk
)->ep
,
286 id_asoc
= sctp_id2assoc(sk
, id
);
287 if (id_asoc
&& (id_asoc
!= addr_asoc
))
290 sctp_get_pf_specific(sk
->sk_family
)->addr_to_user(sctp_sk(sk
),
291 (union sctp_addr
*)addr
);
296 /* API 3.1.2 bind() - UDP Style Syntax
297 * The syntax of bind() is,
299 * ret = bind(int sd, struct sockaddr *addr, int addrlen);
301 * sd - the socket descriptor returned by socket().
302 * addr - the address structure (struct sockaddr_in or struct
303 * sockaddr_in6 [RFC 2553]),
304 * addr_len - the size of the address structure.
306 static int sctp_bind(struct sock
*sk
, struct sockaddr
*addr
, int addr_len
)
312 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__
, sk
,
315 /* Disallow binding twice. */
316 if (!sctp_sk(sk
)->ep
->base
.bind_addr
.port
)
317 retval
= sctp_do_bind(sk
, (union sctp_addr
*)addr
,
327 static long sctp_get_port_local(struct sock
*, union sctp_addr
*);
329 /* Verify this is a valid sockaddr. */
330 static struct sctp_af
*sctp_sockaddr_af(struct sctp_sock
*opt
,
331 union sctp_addr
*addr
, int len
)
335 /* Check minimum size. */
336 if (len
< sizeof (struct sockaddr
))
339 if (!opt
->pf
->af_supported(addr
->sa
.sa_family
, opt
))
342 if (addr
->sa
.sa_family
== AF_INET6
) {
343 if (len
< SIN6_LEN_RFC2133
)
345 /* V4 mapped address are really of AF_INET family */
346 if (ipv6_addr_v4mapped(&addr
->v6
.sin6_addr
) &&
347 !opt
->pf
->af_supported(AF_INET
, opt
))
351 /* If we get this far, af is valid. */
352 af
= sctp_get_af_specific(addr
->sa
.sa_family
);
354 if (len
< af
->sockaddr_len
)
360 /* Bind a local address either to an endpoint or to an association. */
361 static int sctp_do_bind(struct sock
*sk
, union sctp_addr
*addr
, int len
)
363 struct net
*net
= sock_net(sk
);
364 struct sctp_sock
*sp
= sctp_sk(sk
);
365 struct sctp_endpoint
*ep
= sp
->ep
;
366 struct sctp_bind_addr
*bp
= &ep
->base
.bind_addr
;
371 /* Common sockaddr verification. */
372 af
= sctp_sockaddr_af(sp
, addr
, len
);
374 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
375 __func__
, sk
, addr
, len
);
379 snum
= ntohs(addr
->v4
.sin_port
);
381 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
382 __func__
, sk
, &addr
->sa
, bp
->port
, snum
, len
);
384 /* PF specific bind() address verification. */
385 if (!sp
->pf
->bind_verify(sp
, addr
))
386 return -EADDRNOTAVAIL
;
388 /* We must either be unbound, or bind to the same port.
389 * It's OK to allow 0 ports if we are already bound.
390 * We'll just inhert an already bound port in this case
395 else if (snum
!= bp
->port
) {
396 pr_debug("%s: new port %d doesn't match existing port "
397 "%d\n", __func__
, snum
, bp
->port
);
402 if (snum
&& snum
< inet_prot_sock(net
) &&
403 !ns_capable(net
->user_ns
, CAP_NET_BIND_SERVICE
))
406 /* See if the address matches any of the addresses we may have
407 * already bound before checking against other endpoints.
409 if (sctp_bind_addr_match(bp
, addr
, sp
))
412 /* Make sure we are allowed to bind here.
413 * The function sctp_get_port_local() does duplicate address
416 addr
->v4
.sin_port
= htons(snum
);
417 if ((ret
= sctp_get_port_local(sk
, addr
))) {
421 /* Refresh ephemeral port. */
423 bp
->port
= inet_sk(sk
)->inet_num
;
425 /* Add the address to the bind address list.
426 * Use GFP_ATOMIC since BHs will be disabled.
428 ret
= sctp_add_bind_addr(bp
, addr
, af
->sockaddr_len
,
429 SCTP_ADDR_SRC
, GFP_ATOMIC
);
431 /* Copy back into socket for getsockname() use. */
433 inet_sk(sk
)->inet_sport
= htons(inet_sk(sk
)->inet_num
);
434 sp
->pf
->to_sk_saddr(addr
, sk
);
440 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
442 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
443 * at any one time. If a sender, after sending an ASCONF chunk, decides
444 * it needs to transfer another ASCONF Chunk, it MUST wait until the
445 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
446 * subsequent ASCONF. Note this restriction binds each side, so at any
447 * time two ASCONF may be in-transit on any given association (one sent
448 * from each endpoint).
450 static int sctp_send_asconf(struct sctp_association
*asoc
,
451 struct sctp_chunk
*chunk
)
453 struct net
*net
= sock_net(asoc
->base
.sk
);
456 /* If there is an outstanding ASCONF chunk, queue it for later
459 if (asoc
->addip_last_asconf
) {
460 list_add_tail(&chunk
->list
, &asoc
->addip_chunk_list
);
464 /* Hold the chunk until an ASCONF_ACK is received. */
465 sctp_chunk_hold(chunk
);
466 retval
= sctp_primitive_ASCONF(net
, asoc
, chunk
);
468 sctp_chunk_free(chunk
);
470 asoc
->addip_last_asconf
= chunk
;
476 /* Add a list of addresses as bind addresses to local endpoint or
479 * Basically run through each address specified in the addrs/addrcnt
480 * array/length pair, determine if it is IPv6 or IPv4 and call
481 * sctp_do_bind() on it.
483 * If any of them fails, then the operation will be reversed and the
484 * ones that were added will be removed.
486 * Only sctp_setsockopt_bindx() is supposed to call this function.
488 static int sctp_bindx_add(struct sock
*sk
, struct sockaddr
*addrs
, int addrcnt
)
493 struct sockaddr
*sa_addr
;
496 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__
, sk
,
500 for (cnt
= 0; cnt
< addrcnt
; cnt
++) {
501 /* The list may contain either IPv4 or IPv6 address;
502 * determine the address length for walking thru the list.
505 af
= sctp_get_af_specific(sa_addr
->sa_family
);
511 retval
= sctp_do_bind(sk
, (union sctp_addr
*)sa_addr
,
514 addr_buf
+= af
->sockaddr_len
;
518 /* Failed. Cleanup the ones that have been added */
520 sctp_bindx_rem(sk
, addrs
, cnt
);
528 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the
529 * associations that are part of the endpoint indicating that a list of local
530 * addresses are added to the endpoint.
532 * If any of the addresses is already in the bind address list of the
533 * association, we do not send the chunk for that association. But it will not
534 * affect other associations.
536 * Only sctp_setsockopt_bindx() is supposed to call this function.
538 static int sctp_send_asconf_add_ip(struct sock
*sk
,
539 struct sockaddr
*addrs
,
542 struct net
*net
= sock_net(sk
);
543 struct sctp_sock
*sp
;
544 struct sctp_endpoint
*ep
;
545 struct sctp_association
*asoc
;
546 struct sctp_bind_addr
*bp
;
547 struct sctp_chunk
*chunk
;
548 struct sctp_sockaddr_entry
*laddr
;
549 union sctp_addr
*addr
;
550 union sctp_addr saveaddr
;
557 if (!net
->sctp
.addip_enable
)
563 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
564 __func__
, sk
, addrs
, addrcnt
);
566 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
567 if (!asoc
->peer
.asconf_capable
)
570 if (asoc
->peer
.addip_disabled_mask
& SCTP_PARAM_ADD_IP
)
573 if (!sctp_state(asoc
, ESTABLISHED
))
576 /* Check if any address in the packed array of addresses is
577 * in the bind address list of the association. If so,
578 * do not send the asconf chunk to its peer, but continue with
579 * other associations.
582 for (i
= 0; i
< addrcnt
; i
++) {
584 af
= sctp_get_af_specific(addr
->v4
.sin_family
);
590 if (sctp_assoc_lookup_laddr(asoc
, addr
))
593 addr_buf
+= af
->sockaddr_len
;
598 /* Use the first valid address in bind addr list of
599 * association as Address Parameter of ASCONF CHUNK.
601 bp
= &asoc
->base
.bind_addr
;
602 p
= bp
->address_list
.next
;
603 laddr
= list_entry(p
, struct sctp_sockaddr_entry
, list
);
604 chunk
= sctp_make_asconf_update_ip(asoc
, &laddr
->a
, addrs
,
605 addrcnt
, SCTP_PARAM_ADD_IP
);
611 /* Add the new addresses to the bind address list with
612 * use_as_src set to 0.
615 for (i
= 0; i
< addrcnt
; i
++) {
617 af
= sctp_get_af_specific(addr
->v4
.sin_family
);
618 memcpy(&saveaddr
, addr
, af
->sockaddr_len
);
619 retval
= sctp_add_bind_addr(bp
, &saveaddr
,
621 SCTP_ADDR_NEW
, GFP_ATOMIC
);
622 addr_buf
+= af
->sockaddr_len
;
624 if (asoc
->src_out_of_asoc_ok
) {
625 struct sctp_transport
*trans
;
627 list_for_each_entry(trans
,
628 &asoc
->peer
.transport_addr_list
, transports
) {
629 trans
->cwnd
= min(4*asoc
->pathmtu
, max_t(__u32
,
630 2*asoc
->pathmtu
, 4380));
631 trans
->ssthresh
= asoc
->peer
.i
.a_rwnd
;
632 trans
->rto
= asoc
->rto_initial
;
633 sctp_max_rto(asoc
, trans
);
634 trans
->rtt
= trans
->srtt
= trans
->rttvar
= 0;
635 /* Clear the source and route cache */
636 sctp_transport_route(trans
, NULL
,
637 sctp_sk(asoc
->base
.sk
));
640 retval
= sctp_send_asconf(asoc
, chunk
);
647 /* Remove a list of addresses from bind addresses list. Do not remove the
650 * Basically run through each address specified in the addrs/addrcnt
651 * array/length pair, determine if it is IPv6 or IPv4 and call
652 * sctp_del_bind() on it.
654 * If any of them fails, then the operation will be reversed and the
655 * ones that were removed will be added back.
657 * At least one address has to be left; if only one address is
658 * available, the operation will return -EBUSY.
660 * Only sctp_setsockopt_bindx() is supposed to call this function.
662 static int sctp_bindx_rem(struct sock
*sk
, struct sockaddr
*addrs
, int addrcnt
)
664 struct sctp_sock
*sp
= sctp_sk(sk
);
665 struct sctp_endpoint
*ep
= sp
->ep
;
667 struct sctp_bind_addr
*bp
= &ep
->base
.bind_addr
;
670 union sctp_addr
*sa_addr
;
673 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
674 __func__
, sk
, addrs
, addrcnt
);
677 for (cnt
= 0; cnt
< addrcnt
; cnt
++) {
678 /* If the bind address list is empty or if there is only one
679 * bind address, there is nothing more to be removed (we need
680 * at least one address here).
682 if (list_empty(&bp
->address_list
) ||
683 (sctp_list_single_entry(&bp
->address_list
))) {
689 af
= sctp_get_af_specific(sa_addr
->sa
.sa_family
);
695 if (!af
->addr_valid(sa_addr
, sp
, NULL
)) {
696 retval
= -EADDRNOTAVAIL
;
700 if (sa_addr
->v4
.sin_port
&&
701 sa_addr
->v4
.sin_port
!= htons(bp
->port
)) {
706 if (!sa_addr
->v4
.sin_port
)
707 sa_addr
->v4
.sin_port
= htons(bp
->port
);
709 /* FIXME - There is probably a need to check if sk->sk_saddr and
710 * sk->sk_rcv_addr are currently set to one of the addresses to
711 * be removed. This is something which needs to be looked into
712 * when we are fixing the outstanding issues with multi-homing
713 * socket routing and failover schemes. Refer to comments in
714 * sctp_do_bind(). -daisy
716 retval
= sctp_del_bind_addr(bp
, sa_addr
);
718 addr_buf
+= af
->sockaddr_len
;
721 /* Failed. Add the ones that has been removed back */
723 sctp_bindx_add(sk
, addrs
, cnt
);
731 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of
732 * the associations that are part of the endpoint indicating that a list of
733 * local addresses are removed from the endpoint.
735 * If any of the addresses is already in the bind address list of the
736 * association, we do not send the chunk for that association. But it will not
737 * affect other associations.
739 * Only sctp_setsockopt_bindx() is supposed to call this function.
741 static int sctp_send_asconf_del_ip(struct sock
*sk
,
742 struct sockaddr
*addrs
,
745 struct net
*net
= sock_net(sk
);
746 struct sctp_sock
*sp
;
747 struct sctp_endpoint
*ep
;
748 struct sctp_association
*asoc
;
749 struct sctp_transport
*transport
;
750 struct sctp_bind_addr
*bp
;
751 struct sctp_chunk
*chunk
;
752 union sctp_addr
*laddr
;
755 struct sctp_sockaddr_entry
*saddr
;
761 if (!net
->sctp
.addip_enable
)
767 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
768 __func__
, sk
, addrs
, addrcnt
);
770 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
772 if (!asoc
->peer
.asconf_capable
)
775 if (asoc
->peer
.addip_disabled_mask
& SCTP_PARAM_DEL_IP
)
778 if (!sctp_state(asoc
, ESTABLISHED
))
781 /* Check if any address in the packed array of addresses is
782 * not present in the bind address list of the association.
783 * If so, do not send the asconf chunk to its peer, but
784 * continue with other associations.
787 for (i
= 0; i
< addrcnt
; i
++) {
789 af
= sctp_get_af_specific(laddr
->v4
.sin_family
);
795 if (!sctp_assoc_lookup_laddr(asoc
, laddr
))
798 addr_buf
+= af
->sockaddr_len
;
803 /* Find one address in the association's bind address list
804 * that is not in the packed array of addresses. This is to
805 * make sure that we do not delete all the addresses in the
808 bp
= &asoc
->base
.bind_addr
;
809 laddr
= sctp_find_unmatch_addr(bp
, (union sctp_addr
*)addrs
,
811 if ((laddr
== NULL
) && (addrcnt
== 1)) {
812 if (asoc
->asconf_addr_del_pending
)
814 asoc
->asconf_addr_del_pending
=
815 kzalloc(sizeof(union sctp_addr
), GFP_ATOMIC
);
816 if (asoc
->asconf_addr_del_pending
== NULL
) {
820 asoc
->asconf_addr_del_pending
->sa
.sa_family
=
822 asoc
->asconf_addr_del_pending
->v4
.sin_port
=
824 if (addrs
->sa_family
== AF_INET
) {
825 struct sockaddr_in
*sin
;
827 sin
= (struct sockaddr_in
*)addrs
;
828 asoc
->asconf_addr_del_pending
->v4
.sin_addr
.s_addr
= sin
->sin_addr
.s_addr
;
829 } else if (addrs
->sa_family
== AF_INET6
) {
830 struct sockaddr_in6
*sin6
;
832 sin6
= (struct sockaddr_in6
*)addrs
;
833 asoc
->asconf_addr_del_pending
->v6
.sin6_addr
= sin6
->sin6_addr
;
836 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
837 __func__
, asoc
, &asoc
->asconf_addr_del_pending
->sa
,
838 asoc
->asconf_addr_del_pending
);
840 asoc
->src_out_of_asoc_ok
= 1;
848 /* We do not need RCU protection throughout this loop
849 * because this is done under a socket lock from the
852 chunk
= sctp_make_asconf_update_ip(asoc
, laddr
, addrs
, addrcnt
,
860 /* Reset use_as_src flag for the addresses in the bind address
861 * list that are to be deleted.
864 for (i
= 0; i
< addrcnt
; i
++) {
866 af
= sctp_get_af_specific(laddr
->v4
.sin_family
);
867 list_for_each_entry(saddr
, &bp
->address_list
, list
) {
868 if (sctp_cmp_addr_exact(&saddr
->a
, laddr
))
869 saddr
->state
= SCTP_ADDR_DEL
;
871 addr_buf
+= af
->sockaddr_len
;
874 /* Update the route and saddr entries for all the transports
875 * as some of the addresses in the bind address list are
876 * about to be deleted and cannot be used as source addresses.
878 list_for_each_entry(transport
, &asoc
->peer
.transport_addr_list
,
880 sctp_transport_route(transport
, NULL
,
881 sctp_sk(asoc
->base
.sk
));
885 /* We don't need to transmit ASCONF */
887 retval
= sctp_send_asconf(asoc
, chunk
);
893 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
894 int sctp_asconf_mgmt(struct sctp_sock
*sp
, struct sctp_sockaddr_entry
*addrw
)
896 struct sock
*sk
= sctp_opt2sk(sp
);
897 union sctp_addr
*addr
;
900 /* It is safe to write port space in caller. */
902 addr
->v4
.sin_port
= htons(sp
->ep
->base
.bind_addr
.port
);
903 af
= sctp_get_af_specific(addr
->sa
.sa_family
);
906 if (sctp_verify_addr(sk
, addr
, af
->sockaddr_len
))
909 if (addrw
->state
== SCTP_ADDR_NEW
)
910 return sctp_send_asconf_add_ip(sk
, (struct sockaddr
*)addr
, 1);
912 return sctp_send_asconf_del_ip(sk
, (struct sockaddr
*)addr
, 1);
915 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
918 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
921 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
922 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
925 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
926 * Section 3.1.2 for this usage.
928 * addrs is a pointer to an array of one or more socket addresses. Each
929 * address is contained in its appropriate structure (i.e. struct
930 * sockaddr_in or struct sockaddr_in6) the family of the address type
931 * must be used to distinguish the address length (note that this
932 * representation is termed a "packed array" of addresses). The caller
933 * specifies the number of addresses in the array with addrcnt.
935 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
936 * -1, and sets errno to the appropriate error code.
938 * For SCTP, the port given in each socket address must be the same, or
939 * sctp_bindx() will fail, setting errno to EINVAL.
941 * The flags parameter is formed from the bitwise OR of zero or more of
942 * the following currently defined flags:
944 * SCTP_BINDX_ADD_ADDR
946 * SCTP_BINDX_REM_ADDR
948 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
949 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
950 * addresses from the association. The two flags are mutually exclusive;
951 * if both are given, sctp_bindx() will fail with EINVAL. A caller may
952 * not remove all addresses from an association; sctp_bindx() will
953 * reject such an attempt with EINVAL.
955 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
956 * additional addresses with an endpoint after calling bind(). Or use
957 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
958 * socket is associated with so that no new association accepted will be
959 * associated with those addresses. If the endpoint supports dynamic
960 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
961 * endpoint to send the appropriate message to the peer to change the
962 * peers address lists.
964 * Adding and removing addresses from a connected association is
965 * optional functionality. Implementations that do not support this
966 * functionality should return EOPNOTSUPP.
968 * Basically do nothing but copying the addresses from user to kernel
969 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
970 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
973 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
976 * sk The sk of the socket
977 * addrs The pointer to the addresses in user land
978 * addrssize Size of the addrs buffer
979 * op Operation to perform (add or remove, see the flags of
982 * Returns 0 if ok, <0 errno code on error.
984 static int sctp_setsockopt_bindx(struct sock
*sk
,
985 struct sockaddr __user
*addrs
,
986 int addrs_size
, int op
)
988 struct sockaddr
*kaddrs
;
992 struct sockaddr
*sa_addr
;
996 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
997 __func__
, sk
, addrs
, addrs_size
, op
);
999 if (unlikely(addrs_size
<= 0))
1002 kaddrs
= vmemdup_user(addrs
, addrs_size
);
1003 if (unlikely(IS_ERR(kaddrs
)))
1004 return PTR_ERR(kaddrs
);
1006 /* Walk through the addrs buffer and count the number of addresses. */
1008 while (walk_size
< addrs_size
) {
1009 if (walk_size
+ sizeof(sa_family_t
) > addrs_size
) {
1015 af
= sctp_get_af_specific(sa_addr
->sa_family
);
1017 /* If the address family is not supported or if this address
1018 * causes the address buffer to overflow return EINVAL.
1020 if (!af
|| (walk_size
+ af
->sockaddr_len
) > addrs_size
) {
1025 addr_buf
+= af
->sockaddr_len
;
1026 walk_size
+= af
->sockaddr_len
;
1031 case SCTP_BINDX_ADD_ADDR
:
1032 /* Allow security module to validate bindx addresses. */
1033 err
= security_sctp_bind_connect(sk
, SCTP_SOCKOPT_BINDX_ADD
,
1034 (struct sockaddr
*)kaddrs
,
1038 err
= sctp_bindx_add(sk
, kaddrs
, addrcnt
);
1041 err
= sctp_send_asconf_add_ip(sk
, kaddrs
, addrcnt
);
1044 case SCTP_BINDX_REM_ADDR
:
1045 err
= sctp_bindx_rem(sk
, kaddrs
, addrcnt
);
1048 err
= sctp_send_asconf_del_ip(sk
, kaddrs
, addrcnt
);
1062 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
1064 * Common routine for handling connect() and sctp_connectx().
1065 * Connect will come in with just a single address.
1067 static int __sctp_connect(struct sock
*sk
,
1068 struct sockaddr
*kaddrs
,
1069 int addrs_size
, int flags
,
1070 sctp_assoc_t
*assoc_id
)
1072 struct net
*net
= sock_net(sk
);
1073 struct sctp_sock
*sp
;
1074 struct sctp_endpoint
*ep
;
1075 struct sctp_association
*asoc
= NULL
;
1076 struct sctp_association
*asoc2
;
1077 struct sctp_transport
*transport
;
1079 enum sctp_scope scope
;
1084 union sctp_addr
*sa_addr
= NULL
;
1086 unsigned short port
;
1091 /* connect() cannot be done on a socket that is already in ESTABLISHED
1092 * state - UDP-style peeled off socket or a TCP-style socket that
1093 * is already connected.
1094 * It cannot be done even on a TCP-style listening socket.
1096 if (sctp_sstate(sk
, ESTABLISHED
) || sctp_sstate(sk
, CLOSING
) ||
1097 (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))) {
1102 /* Walk through the addrs buffer and count the number of addresses. */
1104 while (walk_size
< addrs_size
) {
1107 if (walk_size
+ sizeof(sa_family_t
) > addrs_size
) {
1113 af
= sctp_get_af_specific(sa_addr
->sa
.sa_family
);
1115 /* If the address family is not supported or if this address
1116 * causes the address buffer to overflow return EINVAL.
1118 if (!af
|| (walk_size
+ af
->sockaddr_len
) > addrs_size
) {
1123 port
= ntohs(sa_addr
->v4
.sin_port
);
1125 /* Save current address so we can work with it */
1126 memcpy(&to
, sa_addr
, af
->sockaddr_len
);
1128 err
= sctp_verify_addr(sk
, &to
, af
->sockaddr_len
);
1132 /* Make sure the destination port is correctly set
1135 if (asoc
&& asoc
->peer
.port
&& asoc
->peer
.port
!= port
) {
1140 /* Check if there already is a matching association on the
1141 * endpoint (other than the one created here).
1143 asoc2
= sctp_endpoint_lookup_assoc(ep
, &to
, &transport
);
1144 if (asoc2
&& asoc2
!= asoc
) {
1145 if (asoc2
->state
>= SCTP_STATE_ESTABLISHED
)
1152 /* If we could not find a matching association on the endpoint,
1153 * make sure that there is no peeled-off association matching
1154 * the peer address even on another socket.
1156 if (sctp_endpoint_is_peeled_off(ep
, &to
)) {
1157 err
= -EADDRNOTAVAIL
;
1162 /* If a bind() or sctp_bindx() is not called prior to
1163 * an sctp_connectx() call, the system picks an
1164 * ephemeral port and will choose an address set
1165 * equivalent to binding with a wildcard address.
1167 if (!ep
->base
.bind_addr
.port
) {
1168 if (sctp_autobind(sk
)) {
1174 * If an unprivileged user inherits a 1-many
1175 * style socket with open associations on a
1176 * privileged port, it MAY be permitted to
1177 * accept new associations, but it SHOULD NOT
1178 * be permitted to open new associations.
1180 if (ep
->base
.bind_addr
.port
<
1181 inet_prot_sock(net
) &&
1182 !ns_capable(net
->user_ns
,
1183 CAP_NET_BIND_SERVICE
)) {
1189 scope
= sctp_scope(&to
);
1190 asoc
= sctp_association_new(ep
, sk
, scope
, GFP_KERNEL
);
1196 err
= sctp_assoc_set_bind_addr_from_ep(asoc
, scope
,
1204 /* Prime the peer's transport structures. */
1205 transport
= sctp_assoc_add_peer(asoc
, &to
, GFP_KERNEL
,
1213 addr_buf
+= af
->sockaddr_len
;
1214 walk_size
+= af
->sockaddr_len
;
1217 /* In case the user of sctp_connectx() wants an association
1218 * id back, assign one now.
1221 err
= sctp_assoc_set_id(asoc
, GFP_KERNEL
);
1226 err
= sctp_primitive_ASSOCIATE(net
, asoc
, NULL
);
1231 /* Initialize sk's dport and daddr for getpeername() */
1232 inet_sk(sk
)->inet_dport
= htons(asoc
->peer
.port
);
1233 sp
->pf
->to_sk_daddr(sa_addr
, sk
);
1236 timeo
= sock_sndtimeo(sk
, flags
& O_NONBLOCK
);
1239 *assoc_id
= asoc
->assoc_id
;
1241 err
= sctp_wait_for_connect(asoc
, &timeo
);
1242 /* Note: the asoc may be freed after the return of
1243 * sctp_wait_for_connect.
1246 /* Don't free association on exit. */
1250 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
1251 __func__
, asoc
, kaddrs
, err
);
1254 /* sctp_primitive_ASSOCIATE may have added this association
1255 * To the hash table, try to unhash it, just in case, its a noop
1256 * if it wasn't hashed so we're safe
1258 sctp_association_free(asoc
);
1263 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1266 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
1267 * sctp_assoc_t *asoc);
1269 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1270 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
1271 * or IPv6 addresses.
1273 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
1274 * Section 3.1.2 for this usage.
1276 * addrs is a pointer to an array of one or more socket addresses. Each
1277 * address is contained in its appropriate structure (i.e. struct
1278 * sockaddr_in or struct sockaddr_in6) the family of the address type
1279 * must be used to distengish the address length (note that this
1280 * representation is termed a "packed array" of addresses). The caller
1281 * specifies the number of addresses in the array with addrcnt.
1283 * On success, sctp_connectx() returns 0. It also sets the assoc_id to
1284 * the association id of the new association. On failure, sctp_connectx()
1285 * returns -1, and sets errno to the appropriate error code. The assoc_id
1286 * is not touched by the kernel.
1288 * For SCTP, the port given in each socket address must be the same, or
1289 * sctp_connectx() will fail, setting errno to EINVAL.
1291 * An application can use sctp_connectx to initiate an association with
1292 * an endpoint that is multi-homed. Much like sctp_bindx() this call
1293 * allows a caller to specify multiple addresses at which a peer can be
1294 * reached. The way the SCTP stack uses the list of addresses to set up
1295 * the association is implementation dependent. This function only
1296 * specifies that the stack will try to make use of all the addresses in
1297 * the list when needed.
1299 * Note that the list of addresses passed in is only used for setting up
1300 * the association. It does not necessarily equal the set of addresses
1301 * the peer uses for the resulting association. If the caller wants to
1302 * find out the set of peer addresses, it must use sctp_getpaddrs() to
1303 * retrieve them after the association has been set up.
1305 * Basically do nothing but copying the addresses from user to kernel
1306 * land and invoking either sctp_connectx(). This is used for tunneling
1307 * the sctp_connectx() request through sctp_setsockopt() from userspace.
1309 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
1312 * sk The sk of the socket
1313 * addrs The pointer to the addresses in user land
1314 * addrssize Size of the addrs buffer
1316 * Returns >=0 if ok, <0 errno code on error.
1318 static int __sctp_setsockopt_connectx(struct sock
*sk
,
1319 struct sockaddr __user
*addrs
,
1321 sctp_assoc_t
*assoc_id
)
1323 struct sockaddr
*kaddrs
;
1324 int err
= 0, flags
= 0;
1326 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
1327 __func__
, sk
, addrs
, addrs_size
);
1329 if (unlikely(addrs_size
<= 0))
1332 kaddrs
= vmemdup_user(addrs
, addrs_size
);
1333 if (unlikely(IS_ERR(kaddrs
)))
1334 return PTR_ERR(kaddrs
);
1336 /* Allow security module to validate connectx addresses. */
1337 err
= security_sctp_bind_connect(sk
, SCTP_SOCKOPT_CONNECTX
,
1338 (struct sockaddr
*)kaddrs
,
1343 /* in-kernel sockets don't generally have a file allocated to them
1344 * if all they do is call sock_create_kern().
1346 if (sk
->sk_socket
->file
)
1347 flags
= sk
->sk_socket
->file
->f_flags
;
1349 err
= __sctp_connect(sk
, kaddrs
, addrs_size
, flags
, assoc_id
);
1358 * This is an older interface. It's kept for backward compatibility
1359 * to the option that doesn't provide association id.
1361 static int sctp_setsockopt_connectx_old(struct sock
*sk
,
1362 struct sockaddr __user
*addrs
,
1365 return __sctp_setsockopt_connectx(sk
, addrs
, addrs_size
, NULL
);
1369 * New interface for the API. The since the API is done with a socket
1370 * option, to make it simple we feed back the association id is as a return
1371 * indication to the call. Error is always negative and association id is
1374 static int sctp_setsockopt_connectx(struct sock
*sk
,
1375 struct sockaddr __user
*addrs
,
1378 sctp_assoc_t assoc_id
= 0;
1381 err
= __sctp_setsockopt_connectx(sk
, addrs
, addrs_size
, &assoc_id
);
1390 * New (hopefully final) interface for the API.
1391 * We use the sctp_getaddrs_old structure so that use-space library
1392 * can avoid any unnecessary allocations. The only different part
1393 * is that we store the actual length of the address buffer into the
1394 * addrs_num structure member. That way we can re-use the existing
1397 #ifdef CONFIG_COMPAT
1398 struct compat_sctp_getaddrs_old
{
1399 sctp_assoc_t assoc_id
;
1401 compat_uptr_t addrs
; /* struct sockaddr * */
1405 static int sctp_getsockopt_connectx3(struct sock
*sk
, int len
,
1406 char __user
*optval
,
1409 struct sctp_getaddrs_old param
;
1410 sctp_assoc_t assoc_id
= 0;
1413 #ifdef CONFIG_COMPAT
1414 if (in_compat_syscall()) {
1415 struct compat_sctp_getaddrs_old param32
;
1417 if (len
< sizeof(param32
))
1419 if (copy_from_user(¶m32
, optval
, sizeof(param32
)))
1422 param
.assoc_id
= param32
.assoc_id
;
1423 param
.addr_num
= param32
.addr_num
;
1424 param
.addrs
= compat_ptr(param32
.addrs
);
1428 if (len
< sizeof(param
))
1430 if (copy_from_user(¶m
, optval
, sizeof(param
)))
1434 err
= __sctp_setsockopt_connectx(sk
, (struct sockaddr __user
*)
1435 param
.addrs
, param
.addr_num
,
1437 if (err
== 0 || err
== -EINPROGRESS
) {
1438 if (copy_to_user(optval
, &assoc_id
, sizeof(assoc_id
)))
1440 if (put_user(sizeof(assoc_id
), optlen
))
1447 /* API 3.1.4 close() - UDP Style Syntax
1448 * Applications use close() to perform graceful shutdown (as described in
1449 * Section 10.1 of [SCTP]) on ALL the associations currently represented
1450 * by a UDP-style socket.
1454 * ret = close(int sd);
1456 * sd - the socket descriptor of the associations to be closed.
1458 * To gracefully shutdown a specific association represented by the
1459 * UDP-style socket, an application should use the sendmsg() call,
1460 * passing no user data, but including the appropriate flag in the
1461 * ancillary data (see Section xxxx).
1463 * If sd in the close() call is a branched-off socket representing only
1464 * one association, the shutdown is performed on that association only.
1466 * 4.1.6 close() - TCP Style Syntax
1468 * Applications use close() to gracefully close down an association.
1472 * int close(int sd);
1474 * sd - the socket descriptor of the association to be closed.
1476 * After an application calls close() on a socket descriptor, no further
1477 * socket operations will succeed on that descriptor.
1479 * API 7.1.4 SO_LINGER
1481 * An application using the TCP-style socket can use this option to
1482 * perform the SCTP ABORT primitive. The linger option structure is:
1485 * int l_onoff; // option on/off
1486 * int l_linger; // linger time
1489 * To enable the option, set l_onoff to 1. If the l_linger value is set
1490 * to 0, calling close() is the same as the ABORT primitive. If the
1491 * value is set to a negative value, the setsockopt() call will return
1492 * an error. If the value is set to a positive value linger_time, the
1493 * close() can be blocked for at most linger_time ms. If the graceful
1494 * shutdown phase does not finish during this period, close() will
1495 * return but the graceful shutdown phase continues in the system.
1497 static void sctp_close(struct sock
*sk
, long timeout
)
1499 struct net
*net
= sock_net(sk
);
1500 struct sctp_endpoint
*ep
;
1501 struct sctp_association
*asoc
;
1502 struct list_head
*pos
, *temp
;
1503 unsigned int data_was_unread
;
1505 pr_debug("%s: sk:%p, timeout:%ld\n", __func__
, sk
, timeout
);
1507 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1508 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1509 inet_sk_set_state(sk
, SCTP_SS_CLOSING
);
1511 ep
= sctp_sk(sk
)->ep
;
1513 /* Clean up any skbs sitting on the receive queue. */
1514 data_was_unread
= sctp_queue_purge_ulpevents(&sk
->sk_receive_queue
);
1515 data_was_unread
+= sctp_queue_purge_ulpevents(&sctp_sk(sk
)->pd_lobby
);
1517 /* Walk all associations on an endpoint. */
1518 list_for_each_safe(pos
, temp
, &ep
->asocs
) {
1519 asoc
= list_entry(pos
, struct sctp_association
, asocs
);
1521 if (sctp_style(sk
, TCP
)) {
1522 /* A closed association can still be in the list if
1523 * it belongs to a TCP-style listening socket that is
1524 * not yet accepted. If so, free it. If not, send an
1525 * ABORT or SHUTDOWN based on the linger options.
1527 if (sctp_state(asoc
, CLOSED
)) {
1528 sctp_association_free(asoc
);
1533 if (data_was_unread
|| !skb_queue_empty(&asoc
->ulpq
.lobby
) ||
1534 !skb_queue_empty(&asoc
->ulpq
.reasm
) ||
1535 !skb_queue_empty(&asoc
->ulpq
.reasm_uo
) ||
1536 (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
)) {
1537 struct sctp_chunk
*chunk
;
1539 chunk
= sctp_make_abort_user(asoc
, NULL
, 0);
1540 sctp_primitive_ABORT(net
, asoc
, chunk
);
1542 sctp_primitive_SHUTDOWN(net
, asoc
, NULL
);
1545 /* On a TCP-style socket, block for at most linger_time if set. */
1546 if (sctp_style(sk
, TCP
) && timeout
)
1547 sctp_wait_for_close(sk
, timeout
);
1549 /* This will run the backlog queue. */
1552 /* Supposedly, no process has access to the socket, but
1553 * the net layers still may.
1554 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
1555 * held and that should be grabbed before socket lock.
1557 spin_lock_bh(&net
->sctp
.addr_wq_lock
);
1558 bh_lock_sock_nested(sk
);
1560 /* Hold the sock, since sk_common_release() will put sock_put()
1561 * and we have just a little more cleanup.
1564 sk_common_release(sk
);
1567 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
1571 SCTP_DBG_OBJCNT_DEC(sock
);
1574 /* Handle EPIPE error. */
1575 static int sctp_error(struct sock
*sk
, int flags
, int err
)
1578 err
= sock_error(sk
) ? : -EPIPE
;
1579 if (err
== -EPIPE
&& !(flags
& MSG_NOSIGNAL
))
1580 send_sig(SIGPIPE
, current
, 0);
1584 /* API 3.1.3 sendmsg() - UDP Style Syntax
1586 * An application uses sendmsg() and recvmsg() calls to transmit data to
1587 * and receive data from its peer.
1589 * ssize_t sendmsg(int socket, const struct msghdr *message,
1592 * socket - the socket descriptor of the endpoint.
1593 * message - pointer to the msghdr structure which contains a single
1594 * user message and possibly some ancillary data.
1596 * See Section 5 for complete description of the data
1599 * flags - flags sent or received with the user message, see Section
1600 * 5 for complete description of the flags.
1602 * Note: This function could use a rewrite especially when explicit
1603 * connect support comes in.
1605 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
1607 static int sctp_msghdr_parse(const struct msghdr
*msg
,
1608 struct sctp_cmsgs
*cmsgs
);
1610 static int sctp_sendmsg_parse(struct sock
*sk
, struct sctp_cmsgs
*cmsgs
,
1611 struct sctp_sndrcvinfo
*srinfo
,
1612 const struct msghdr
*msg
, size_t msg_len
)
1617 if (sctp_sstate(sk
, LISTENING
) && sctp_style(sk
, TCP
))
1620 if (msg_len
> sk
->sk_sndbuf
)
1623 memset(cmsgs
, 0, sizeof(*cmsgs
));
1624 err
= sctp_msghdr_parse(msg
, cmsgs
);
1626 pr_debug("%s: msghdr parse err:%x\n", __func__
, err
);
1630 memset(srinfo
, 0, sizeof(*srinfo
));
1631 if (cmsgs
->srinfo
) {
1632 srinfo
->sinfo_stream
= cmsgs
->srinfo
->sinfo_stream
;
1633 srinfo
->sinfo_flags
= cmsgs
->srinfo
->sinfo_flags
;
1634 srinfo
->sinfo_ppid
= cmsgs
->srinfo
->sinfo_ppid
;
1635 srinfo
->sinfo_context
= cmsgs
->srinfo
->sinfo_context
;
1636 srinfo
->sinfo_assoc_id
= cmsgs
->srinfo
->sinfo_assoc_id
;
1637 srinfo
->sinfo_timetolive
= cmsgs
->srinfo
->sinfo_timetolive
;
1641 srinfo
->sinfo_stream
= cmsgs
->sinfo
->snd_sid
;
1642 srinfo
->sinfo_flags
= cmsgs
->sinfo
->snd_flags
;
1643 srinfo
->sinfo_ppid
= cmsgs
->sinfo
->snd_ppid
;
1644 srinfo
->sinfo_context
= cmsgs
->sinfo
->snd_context
;
1645 srinfo
->sinfo_assoc_id
= cmsgs
->sinfo
->snd_assoc_id
;
1648 if (cmsgs
->prinfo
) {
1649 srinfo
->sinfo_timetolive
= cmsgs
->prinfo
->pr_value
;
1650 SCTP_PR_SET_POLICY(srinfo
->sinfo_flags
,
1651 cmsgs
->prinfo
->pr_policy
);
1654 sflags
= srinfo
->sinfo_flags
;
1655 if (!sflags
&& msg_len
)
1658 if (sctp_style(sk
, TCP
) && (sflags
& (SCTP_EOF
| SCTP_ABORT
)))
1661 if (((sflags
& SCTP_EOF
) && msg_len
> 0) ||
1662 (!(sflags
& (SCTP_EOF
| SCTP_ABORT
)) && msg_len
== 0))
1665 if ((sflags
& SCTP_ADDR_OVER
) && !msg
->msg_name
)
1671 static int sctp_sendmsg_new_asoc(struct sock
*sk
, __u16 sflags
,
1672 struct sctp_cmsgs
*cmsgs
,
1673 union sctp_addr
*daddr
,
1674 struct sctp_transport
**tp
)
1676 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
1677 struct net
*net
= sock_net(sk
);
1678 struct sctp_association
*asoc
;
1679 enum sctp_scope scope
;
1680 struct cmsghdr
*cmsg
;
1681 __be32 flowinfo
= 0;
1687 if (sflags
& (SCTP_EOF
| SCTP_ABORT
))
1690 if (sctp_style(sk
, TCP
) && (sctp_sstate(sk
, ESTABLISHED
) ||
1691 sctp_sstate(sk
, CLOSING
)))
1692 return -EADDRNOTAVAIL
;
1694 if (sctp_endpoint_is_peeled_off(ep
, daddr
))
1695 return -EADDRNOTAVAIL
;
1697 if (!ep
->base
.bind_addr
.port
) {
1698 if (sctp_autobind(sk
))
1701 if (ep
->base
.bind_addr
.port
< inet_prot_sock(net
) &&
1702 !ns_capable(net
->user_ns
, CAP_NET_BIND_SERVICE
))
1706 scope
= sctp_scope(daddr
);
1708 /* Label connection socket for first association 1-to-many
1709 * style for client sequence socket()->sendmsg(). This
1710 * needs to be done before sctp_assoc_add_peer() as that will
1711 * set up the initial packet that needs to account for any
1712 * security ip options (CIPSO/CALIPSO) added to the packet.
1714 af
= sctp_get_af_specific(daddr
->sa
.sa_family
);
1717 err
= security_sctp_bind_connect(sk
, SCTP_SENDMSG_CONNECT
,
1718 (struct sockaddr
*)daddr
,
1723 asoc
= sctp_association_new(ep
, sk
, scope
, GFP_KERNEL
);
1727 if (sctp_assoc_set_bind_addr_from_ep(asoc
, scope
, GFP_KERNEL
) < 0) {
1733 struct sctp_initmsg
*init
= cmsgs
->init
;
1735 if (init
->sinit_num_ostreams
) {
1736 __u16 outcnt
= init
->sinit_num_ostreams
;
1738 asoc
->c
.sinit_num_ostreams
= outcnt
;
1739 /* outcnt has been changed, need to re-init stream */
1740 err
= sctp_stream_init(&asoc
->stream
, outcnt
, 0,
1746 if (init
->sinit_max_instreams
)
1747 asoc
->c
.sinit_max_instreams
= init
->sinit_max_instreams
;
1749 if (init
->sinit_max_attempts
)
1750 asoc
->max_init_attempts
= init
->sinit_max_attempts
;
1752 if (init
->sinit_max_init_timeo
)
1753 asoc
->max_init_timeo
=
1754 msecs_to_jiffies(init
->sinit_max_init_timeo
);
1757 *tp
= sctp_assoc_add_peer(asoc
, daddr
, GFP_KERNEL
, SCTP_UNKNOWN
);
1763 if (!cmsgs
->addrs_msg
)
1766 if (daddr
->sa
.sa_family
== AF_INET6
)
1767 flowinfo
= daddr
->v6
.sin6_flowinfo
;
1769 /* sendv addr list parse */
1770 for_each_cmsghdr(cmsg
, cmsgs
->addrs_msg
) {
1771 struct sctp_transport
*transport
;
1772 struct sctp_association
*old
;
1773 union sctp_addr _daddr
;
1776 if (cmsg
->cmsg_level
!= IPPROTO_SCTP
||
1777 (cmsg
->cmsg_type
!= SCTP_DSTADDRV4
&&
1778 cmsg
->cmsg_type
!= SCTP_DSTADDRV6
))
1782 memset(daddr
, 0, sizeof(*daddr
));
1783 dlen
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1784 if (cmsg
->cmsg_type
== SCTP_DSTADDRV4
) {
1785 if (dlen
< sizeof(struct in_addr
)) {
1790 dlen
= sizeof(struct in_addr
);
1791 daddr
->v4
.sin_family
= AF_INET
;
1792 daddr
->v4
.sin_port
= htons(asoc
->peer
.port
);
1793 memcpy(&daddr
->v4
.sin_addr
, CMSG_DATA(cmsg
), dlen
);
1795 if (dlen
< sizeof(struct in6_addr
)) {
1800 dlen
= sizeof(struct in6_addr
);
1801 daddr
->v6
.sin6_flowinfo
= flowinfo
;
1802 daddr
->v6
.sin6_family
= AF_INET6
;
1803 daddr
->v6
.sin6_port
= htons(asoc
->peer
.port
);
1804 memcpy(&daddr
->v6
.sin6_addr
, CMSG_DATA(cmsg
), dlen
);
1806 err
= sctp_verify_addr(sk
, daddr
, sizeof(*daddr
));
1810 old
= sctp_endpoint_lookup_assoc(ep
, daddr
, &transport
);
1811 if (old
&& old
!= asoc
) {
1812 if (old
->state
>= SCTP_STATE_ESTABLISHED
)
1819 if (sctp_endpoint_is_peeled_off(ep
, daddr
)) {
1820 err
= -EADDRNOTAVAIL
;
1824 transport
= sctp_assoc_add_peer(asoc
, daddr
, GFP_KERNEL
,
1835 sctp_association_free(asoc
);
1839 static int sctp_sendmsg_check_sflags(struct sctp_association
*asoc
,
1840 __u16 sflags
, struct msghdr
*msg
,
1843 struct sock
*sk
= asoc
->base
.sk
;
1844 struct net
*net
= sock_net(sk
);
1846 if (sctp_state(asoc
, CLOSED
) && sctp_style(sk
, TCP
))
1849 if ((sflags
& SCTP_SENDALL
) && sctp_style(sk
, UDP
) &&
1850 !sctp_state(asoc
, ESTABLISHED
))
1853 if (sflags
& SCTP_EOF
) {
1854 pr_debug("%s: shutting down association:%p\n", __func__
, asoc
);
1855 sctp_primitive_SHUTDOWN(net
, asoc
, NULL
);
1860 if (sflags
& SCTP_ABORT
) {
1861 struct sctp_chunk
*chunk
;
1863 chunk
= sctp_make_abort_user(asoc
, msg
, msg_len
);
1867 pr_debug("%s: aborting association:%p\n", __func__
, asoc
);
1868 sctp_primitive_ABORT(net
, asoc
, chunk
);
1869 iov_iter_revert(&msg
->msg_iter
, msg_len
);
1877 static int sctp_sendmsg_to_asoc(struct sctp_association
*asoc
,
1878 struct msghdr
*msg
, size_t msg_len
,
1879 struct sctp_transport
*transport
,
1880 struct sctp_sndrcvinfo
*sinfo
)
1882 struct sock
*sk
= asoc
->base
.sk
;
1883 struct sctp_sock
*sp
= sctp_sk(sk
);
1884 struct net
*net
= sock_net(sk
);
1885 struct sctp_datamsg
*datamsg
;
1886 bool wait_connect
= false;
1887 struct sctp_chunk
*chunk
;
1891 if (sinfo
->sinfo_stream
>= asoc
->stream
.outcnt
) {
1896 if (unlikely(!SCTP_SO(&asoc
->stream
, sinfo
->sinfo_stream
)->ext
)) {
1897 err
= sctp_stream_init_ext(&asoc
->stream
, sinfo
->sinfo_stream
);
1902 if (sp
->disable_fragments
&& msg_len
> asoc
->frag_point
) {
1907 if (asoc
->pmtu_pending
) {
1908 if (sp
->param_flags
& SPP_PMTUD_ENABLE
)
1909 sctp_assoc_sync_pmtu(asoc
);
1910 asoc
->pmtu_pending
= 0;
1913 if (sctp_wspace(asoc
) < (int)msg_len
)
1914 sctp_prsctp_prune(asoc
, sinfo
, msg_len
- sctp_wspace(asoc
));
1916 if (sctp_wspace(asoc
) <= 0) {
1917 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1918 err
= sctp_wait_for_sndbuf(asoc
, &timeo
, msg_len
);
1923 if (sctp_state(asoc
, CLOSED
)) {
1924 err
= sctp_primitive_ASSOCIATE(net
, asoc
, NULL
);
1928 if (sp
->strm_interleave
) {
1929 timeo
= sock_sndtimeo(sk
, 0);
1930 err
= sctp_wait_for_connect(asoc
, &timeo
);
1936 wait_connect
= true;
1939 pr_debug("%s: we associated primitively\n", __func__
);
1942 datamsg
= sctp_datamsg_from_user(asoc
, sinfo
, &msg
->msg_iter
);
1943 if (IS_ERR(datamsg
)) {
1944 err
= PTR_ERR(datamsg
);
1948 asoc
->force_delay
= !!(msg
->msg_flags
& MSG_MORE
);
1950 list_for_each_entry(chunk
, &datamsg
->chunks
, frag_list
) {
1951 sctp_chunk_hold(chunk
);
1952 sctp_set_owner_w(chunk
);
1953 chunk
->transport
= transport
;
1956 err
= sctp_primitive_SEND(net
, asoc
, datamsg
);
1958 sctp_datamsg_free(datamsg
);
1962 pr_debug("%s: we sent primitively\n", __func__
);
1964 sctp_datamsg_put(datamsg
);
1966 if (unlikely(wait_connect
)) {
1967 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1968 sctp_wait_for_connect(asoc
, &timeo
);
1977 static union sctp_addr
*sctp_sendmsg_get_daddr(struct sock
*sk
,
1978 const struct msghdr
*msg
,
1979 struct sctp_cmsgs
*cmsgs
)
1981 union sctp_addr
*daddr
= NULL
;
1984 if (!sctp_style(sk
, UDP_HIGH_BANDWIDTH
) && msg
->msg_name
) {
1985 int len
= msg
->msg_namelen
;
1987 if (len
> sizeof(*daddr
))
1988 len
= sizeof(*daddr
);
1990 daddr
= (union sctp_addr
*)msg
->msg_name
;
1992 err
= sctp_verify_addr(sk
, daddr
, len
);
1994 return ERR_PTR(err
);
2000 static void sctp_sendmsg_update_sinfo(struct sctp_association
*asoc
,
2001 struct sctp_sndrcvinfo
*sinfo
,
2002 struct sctp_cmsgs
*cmsgs
)
2004 if (!cmsgs
->srinfo
&& !cmsgs
->sinfo
) {
2005 sinfo
->sinfo_stream
= asoc
->default_stream
;
2006 sinfo
->sinfo_ppid
= asoc
->default_ppid
;
2007 sinfo
->sinfo_context
= asoc
->default_context
;
2008 sinfo
->sinfo_assoc_id
= sctp_assoc2id(asoc
);
2011 sinfo
->sinfo_flags
= asoc
->default_flags
;
2014 if (!cmsgs
->srinfo
&& !cmsgs
->prinfo
)
2015 sinfo
->sinfo_timetolive
= asoc
->default_timetolive
;
2017 if (cmsgs
->authinfo
) {
2018 /* Reuse sinfo_tsn to indicate that authinfo was set and
2019 * sinfo_ssn to save the keyid on tx path.
2021 sinfo
->sinfo_tsn
= 1;
2022 sinfo
->sinfo_ssn
= cmsgs
->authinfo
->auth_keynumber
;
2026 static int sctp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t msg_len
)
2028 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
2029 struct sctp_transport
*transport
= NULL
;
2030 struct sctp_sndrcvinfo _sinfo
, *sinfo
;
2031 struct sctp_association
*asoc
, *tmp
;
2032 struct sctp_cmsgs cmsgs
;
2033 union sctp_addr
*daddr
;
2038 /* Parse and get snd_info */
2039 err
= sctp_sendmsg_parse(sk
, &cmsgs
, &_sinfo
, msg
, msg_len
);
2044 sflags
= sinfo
->sinfo_flags
;
2046 /* Get daddr from msg */
2047 daddr
= sctp_sendmsg_get_daddr(sk
, msg
, &cmsgs
);
2048 if (IS_ERR(daddr
)) {
2049 err
= PTR_ERR(daddr
);
2055 /* SCTP_SENDALL process */
2056 if ((sflags
& SCTP_SENDALL
) && sctp_style(sk
, UDP
)) {
2057 list_for_each_entry_safe(asoc
, tmp
, &ep
->asocs
, asocs
) {
2058 err
= sctp_sendmsg_check_sflags(asoc
, sflags
, msg
,
2065 sctp_sendmsg_update_sinfo(asoc
, sinfo
, &cmsgs
);
2067 err
= sctp_sendmsg_to_asoc(asoc
, msg
, msg_len
,
2072 iov_iter_revert(&msg
->msg_iter
, err
);
2078 /* Get and check or create asoc */
2080 asoc
= sctp_endpoint_lookup_assoc(ep
, daddr
, &transport
);
2082 err
= sctp_sendmsg_check_sflags(asoc
, sflags
, msg
,
2087 err
= sctp_sendmsg_new_asoc(sk
, sflags
, &cmsgs
, daddr
,
2092 asoc
= transport
->asoc
;
2096 if (!sctp_style(sk
, TCP
) && !(sflags
& SCTP_ADDR_OVER
))
2099 asoc
= sctp_id2assoc(sk
, sinfo
->sinfo_assoc_id
);
2105 err
= sctp_sendmsg_check_sflags(asoc
, sflags
, msg
, msg_len
);
2110 /* Update snd_info with the asoc */
2111 sctp_sendmsg_update_sinfo(asoc
, sinfo
, &cmsgs
);
2113 /* Send msg to the asoc */
2114 err
= sctp_sendmsg_to_asoc(asoc
, msg
, msg_len
, transport
, sinfo
);
2115 if (err
< 0 && err
!= -ESRCH
&& new)
2116 sctp_association_free(asoc
);
2121 return sctp_error(sk
, msg
->msg_flags
, err
);
2124 /* This is an extended version of skb_pull() that removes the data from the
2125 * start of a skb even when data is spread across the list of skb's in the
2126 * frag_list. len specifies the total amount of data that needs to be removed.
2127 * when 'len' bytes could be removed from the skb, it returns 0.
2128 * If 'len' exceeds the total skb length, it returns the no. of bytes that
2129 * could not be removed.
2131 static int sctp_skb_pull(struct sk_buff
*skb
, int len
)
2133 struct sk_buff
*list
;
2134 int skb_len
= skb_headlen(skb
);
2137 if (len
<= skb_len
) {
2138 __skb_pull(skb
, len
);
2142 __skb_pull(skb
, skb_len
);
2144 skb_walk_frags(skb
, list
) {
2145 rlen
= sctp_skb_pull(list
, len
);
2146 skb
->len
-= (len
-rlen
);
2147 skb
->data_len
-= (len
-rlen
);
2158 /* API 3.1.3 recvmsg() - UDP Style Syntax
2160 * ssize_t recvmsg(int socket, struct msghdr *message,
2163 * socket - the socket descriptor of the endpoint.
2164 * message - pointer to the msghdr structure which contains a single
2165 * user message and possibly some ancillary data.
2167 * See Section 5 for complete description of the data
2170 * flags - flags sent or received with the user message, see Section
2171 * 5 for complete description of the flags.
2173 static int sctp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
2174 int noblock
, int flags
, int *addr_len
)
2176 struct sctp_ulpevent
*event
= NULL
;
2177 struct sctp_sock
*sp
= sctp_sk(sk
);
2178 struct sk_buff
*skb
, *head_skb
;
2183 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
2184 "addr_len:%p)\n", __func__
, sk
, msg
, len
, noblock
, flags
,
2189 if (sctp_style(sk
, TCP
) && !sctp_sstate(sk
, ESTABLISHED
) &&
2190 !sctp_sstate(sk
, CLOSING
) && !sctp_sstate(sk
, CLOSED
)) {
2195 skb
= sctp_skb_recv_datagram(sk
, flags
, noblock
, &err
);
2199 /* Get the total length of the skb including any skb's in the
2208 err
= skb_copy_datagram_msg(skb
, 0, msg
, copied
);
2210 event
= sctp_skb2event(skb
);
2215 if (event
->chunk
&& event
->chunk
->head_skb
)
2216 head_skb
= event
->chunk
->head_skb
;
2219 sock_recv_ts_and_drops(msg
, sk
, head_skb
);
2220 if (sctp_ulpevent_is_notification(event
)) {
2221 msg
->msg_flags
|= MSG_NOTIFICATION
;
2222 sp
->pf
->event_msgname(event
, msg
->msg_name
, addr_len
);
2224 sp
->pf
->skb_msgname(head_skb
, msg
->msg_name
, addr_len
);
2227 /* Check if we allow SCTP_NXTINFO. */
2228 if (sp
->recvnxtinfo
)
2229 sctp_ulpevent_read_nxtinfo(event
, msg
, sk
);
2230 /* Check if we allow SCTP_RCVINFO. */
2231 if (sp
->recvrcvinfo
)
2232 sctp_ulpevent_read_rcvinfo(event
, msg
);
2233 /* Check if we allow SCTP_SNDRCVINFO. */
2234 if (sctp_ulpevent_type_enabled(sp
->subscribe
, SCTP_DATA_IO_EVENT
))
2235 sctp_ulpevent_read_sndrcvinfo(event
, msg
);
2239 /* If skb's length exceeds the user's buffer, update the skb and
2240 * push it back to the receive_queue so that the next call to
2241 * recvmsg() will return the remaining data. Don't set MSG_EOR.
2243 if (skb_len
> copied
) {
2244 msg
->msg_flags
&= ~MSG_EOR
;
2245 if (flags
& MSG_PEEK
)
2247 sctp_skb_pull(skb
, copied
);
2248 skb_queue_head(&sk
->sk_receive_queue
, skb
);
2250 /* When only partial message is copied to the user, increase
2251 * rwnd by that amount. If all the data in the skb is read,
2252 * rwnd is updated when the event is freed.
2254 if (!sctp_ulpevent_is_notification(event
))
2255 sctp_assoc_rwnd_increase(event
->asoc
, copied
);
2257 } else if ((event
->msg_flags
& MSG_NOTIFICATION
) ||
2258 (event
->msg_flags
& MSG_EOR
))
2259 msg
->msg_flags
|= MSG_EOR
;
2261 msg
->msg_flags
&= ~MSG_EOR
;
2264 if (flags
& MSG_PEEK
) {
2265 /* Release the skb reference acquired after peeking the skb in
2266 * sctp_skb_recv_datagram().
2270 /* Free the event which includes releasing the reference to
2271 * the owner of the skb, freeing the skb and updating the
2274 sctp_ulpevent_free(event
);
2281 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
2283 * This option is a on/off flag. If enabled no SCTP message
2284 * fragmentation will be performed. Instead if a message being sent
2285 * exceeds the current PMTU size, the message will NOT be sent and
2286 * instead a error will be indicated to the user.
2288 static int sctp_setsockopt_disable_fragments(struct sock
*sk
,
2289 char __user
*optval
,
2290 unsigned int optlen
)
2294 if (optlen
< sizeof(int))
2297 if (get_user(val
, (int __user
*)optval
))
2300 sctp_sk(sk
)->disable_fragments
= (val
== 0) ? 0 : 1;
2305 static int sctp_setsockopt_events(struct sock
*sk
, char __user
*optval
,
2306 unsigned int optlen
)
2308 struct sctp_event_subscribe subscribe
;
2309 __u8
*sn_type
= (__u8
*)&subscribe
;
2310 struct sctp_sock
*sp
= sctp_sk(sk
);
2311 struct sctp_association
*asoc
;
2314 if (optlen
> sizeof(struct sctp_event_subscribe
))
2317 if (copy_from_user(&subscribe
, optval
, optlen
))
2320 for (i
= 0; i
< optlen
; i
++)
2321 sctp_ulpevent_type_set(&sp
->subscribe
, SCTP_SN_TYPE_BASE
+ i
,
2324 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
)
2325 asoc
->subscribe
= sctp_sk(sk
)->subscribe
;
2327 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2328 * if there is no data to be sent or retransmit, the stack will
2329 * immediately send up this notification.
2331 if (sctp_ulpevent_type_enabled(sp
->subscribe
, SCTP_SENDER_DRY_EVENT
)) {
2332 struct sctp_ulpevent
*event
;
2334 asoc
= sctp_id2assoc(sk
, 0);
2335 if (asoc
&& sctp_outq_is_empty(&asoc
->outqueue
)) {
2336 event
= sctp_ulpevent_make_sender_dry_event(asoc
,
2337 GFP_USER
| __GFP_NOWARN
);
2341 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
, event
);
2348 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
2350 * This socket option is applicable to the UDP-style socket only. When
2351 * set it will cause associations that are idle for more than the
2352 * specified number of seconds to automatically close. An association
2353 * being idle is defined an association that has NOT sent or received
2354 * user data. The special value of '0' indicates that no automatic
2355 * close of any associations should be performed. The option expects an
2356 * integer defining the number of seconds of idle time before an
2357 * association is closed.
2359 static int sctp_setsockopt_autoclose(struct sock
*sk
, char __user
*optval
,
2360 unsigned int optlen
)
2362 struct sctp_sock
*sp
= sctp_sk(sk
);
2363 struct net
*net
= sock_net(sk
);
2365 /* Applicable to UDP-style socket only */
2366 if (sctp_style(sk
, TCP
))
2368 if (optlen
!= sizeof(int))
2370 if (copy_from_user(&sp
->autoclose
, optval
, optlen
))
2373 if (sp
->autoclose
> net
->sctp
.max_autoclose
)
2374 sp
->autoclose
= net
->sctp
.max_autoclose
;
2379 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
2381 * Applications can enable or disable heartbeats for any peer address of
2382 * an association, modify an address's heartbeat interval, force a
2383 * heartbeat to be sent immediately, and adjust the address's maximum
2384 * number of retransmissions sent before an address is considered
2385 * unreachable. The following structure is used to access and modify an
2386 * address's parameters:
2388 * struct sctp_paddrparams {
2389 * sctp_assoc_t spp_assoc_id;
2390 * struct sockaddr_storage spp_address;
2391 * uint32_t spp_hbinterval;
2392 * uint16_t spp_pathmaxrxt;
2393 * uint32_t spp_pathmtu;
2394 * uint32_t spp_sackdelay;
2395 * uint32_t spp_flags;
2396 * uint32_t spp_ipv6_flowlabel;
2400 * spp_assoc_id - (one-to-many style socket) This is filled in the
2401 * application, and identifies the association for
2403 * spp_address - This specifies which address is of interest.
2404 * spp_hbinterval - This contains the value of the heartbeat interval,
2405 * in milliseconds. If a value of zero
2406 * is present in this field then no changes are to
2407 * be made to this parameter.
2408 * spp_pathmaxrxt - This contains the maximum number of
2409 * retransmissions before this address shall be
2410 * considered unreachable. If a value of zero
2411 * is present in this field then no changes are to
2412 * be made to this parameter.
2413 * spp_pathmtu - When Path MTU discovery is disabled the value
2414 * specified here will be the "fixed" path mtu.
2415 * Note that if the spp_address field is empty
2416 * then all associations on this address will
2417 * have this fixed path mtu set upon them.
2419 * spp_sackdelay - When delayed sack is enabled, this value specifies
2420 * the number of milliseconds that sacks will be delayed
2421 * for. This value will apply to all addresses of an
2422 * association if the spp_address field is empty. Note
2423 * also, that if delayed sack is enabled and this
2424 * value is set to 0, no change is made to the last
2425 * recorded delayed sack timer value.
2427 * spp_flags - These flags are used to control various features
2428 * on an association. The flag field may contain
2429 * zero or more of the following options.
2431 * SPP_HB_ENABLE - Enable heartbeats on the
2432 * specified address. Note that if the address
2433 * field is empty all addresses for the association
2434 * have heartbeats enabled upon them.
2436 * SPP_HB_DISABLE - Disable heartbeats on the
2437 * speicifed address. Note that if the address
2438 * field is empty all addresses for the association
2439 * will have their heartbeats disabled. Note also
2440 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
2441 * mutually exclusive, only one of these two should
2442 * be specified. Enabling both fields will have
2443 * undetermined results.
2445 * SPP_HB_DEMAND - Request a user initiated heartbeat
2446 * to be made immediately.
2448 * SPP_HB_TIME_IS_ZERO - Specify's that the time for
2449 * heartbeat delayis to be set to the value of 0
2452 * SPP_PMTUD_ENABLE - This field will enable PMTU
2453 * discovery upon the specified address. Note that
2454 * if the address feild is empty then all addresses
2455 * on the association are effected.
2457 * SPP_PMTUD_DISABLE - This field will disable PMTU
2458 * discovery upon the specified address. Note that
2459 * if the address feild is empty then all addresses
2460 * on the association are effected. Not also that
2461 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
2462 * exclusive. Enabling both will have undetermined
2465 * SPP_SACKDELAY_ENABLE - Setting this flag turns
2466 * on delayed sack. The time specified in spp_sackdelay
2467 * is used to specify the sack delay for this address. Note
2468 * that if spp_address is empty then all addresses will
2469 * enable delayed sack and take on the sack delay
2470 * value specified in spp_sackdelay.
2471 * SPP_SACKDELAY_DISABLE - Setting this flag turns
2472 * off delayed sack. If the spp_address field is blank then
2473 * delayed sack is disabled for the entire association. Note
2474 * also that this field is mutually exclusive to
2475 * SPP_SACKDELAY_ENABLE, setting both will have undefined
2478 * SPP_IPV6_FLOWLABEL: Setting this flag enables the
2479 * setting of the IPV6 flow label value. The value is
2480 * contained in the spp_ipv6_flowlabel field.
2481 * Upon retrieval, this flag will be set to indicate that
2482 * the spp_ipv6_flowlabel field has a valid value returned.
2483 * If a specific destination address is set (in the
2484 * spp_address field), then the value returned is that of
2485 * the address. If just an association is specified (and
2486 * no address), then the association's default flow label
2487 * is returned. If neither an association nor a destination
2488 * is specified, then the socket's default flow label is
2489 * returned. For non-IPv6 sockets, this flag will be left
2492 * SPP_DSCP: Setting this flag enables the setting of the
2493 * Differentiated Services Code Point (DSCP) value
2494 * associated with either the association or a specific
2495 * address. The value is obtained in the spp_dscp field.
2496 * Upon retrieval, this flag will be set to indicate that
2497 * the spp_dscp field has a valid value returned. If a
2498 * specific destination address is set when called (in the
2499 * spp_address field), then that specific destination
2500 * address's DSCP value is returned. If just an association
2501 * is specified, then the association's default DSCP is
2502 * returned. If neither an association nor a destination is
2503 * specified, then the socket's default DSCP is returned.
2505 * spp_ipv6_flowlabel
2506 * - This field is used in conjunction with the
2507 * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
2508 * The 20 least significant bits are used for the flow
2509 * label. This setting has precedence over any IPv6-layer
2512 * spp_dscp - This field is used in conjunction with the SPP_DSCP flag
2513 * and contains the DSCP. The 6 most significant bits are
2514 * used for the DSCP. This setting has precedence over any
2515 * IPv4- or IPv6- layer setting.
2517 static int sctp_apply_peer_addr_params(struct sctp_paddrparams
*params
,
2518 struct sctp_transport
*trans
,
2519 struct sctp_association
*asoc
,
2520 struct sctp_sock
*sp
,
2523 int sackdelay_change
)
2527 if (params
->spp_flags
& SPP_HB_DEMAND
&& trans
) {
2528 struct net
*net
= sock_net(trans
->asoc
->base
.sk
);
2530 error
= sctp_primitive_REQUESTHEARTBEAT(net
, trans
->asoc
, trans
);
2535 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
2536 * this field is ignored. Note also that a value of zero indicates
2537 * the current setting should be left unchanged.
2539 if (params
->spp_flags
& SPP_HB_ENABLE
) {
2541 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
2542 * set. This lets us use 0 value when this flag
2545 if (params
->spp_flags
& SPP_HB_TIME_IS_ZERO
)
2546 params
->spp_hbinterval
= 0;
2548 if (params
->spp_hbinterval
||
2549 (params
->spp_flags
& SPP_HB_TIME_IS_ZERO
)) {
2552 msecs_to_jiffies(params
->spp_hbinterval
);
2555 msecs_to_jiffies(params
->spp_hbinterval
);
2557 sp
->hbinterval
= params
->spp_hbinterval
;
2564 trans
->param_flags
=
2565 (trans
->param_flags
& ~SPP_HB
) | hb_change
;
2568 (asoc
->param_flags
& ~SPP_HB
) | hb_change
;
2571 (sp
->param_flags
& ~SPP_HB
) | hb_change
;
2575 /* When Path MTU discovery is disabled the value specified here will
2576 * be the "fixed" path mtu (i.e. the value of the spp_flags field must
2577 * include the flag SPP_PMTUD_DISABLE for this field to have any
2580 if ((params
->spp_flags
& SPP_PMTUD_DISABLE
) && params
->spp_pathmtu
) {
2582 trans
->pathmtu
= params
->spp_pathmtu
;
2583 sctp_assoc_sync_pmtu(asoc
);
2585 sctp_assoc_set_pmtu(asoc
, params
->spp_pathmtu
);
2587 sp
->pathmtu
= params
->spp_pathmtu
;
2593 int update
= (trans
->param_flags
& SPP_PMTUD_DISABLE
) &&
2594 (params
->spp_flags
& SPP_PMTUD_ENABLE
);
2595 trans
->param_flags
=
2596 (trans
->param_flags
& ~SPP_PMTUD
) | pmtud_change
;
2598 sctp_transport_pmtu(trans
, sctp_opt2sk(sp
));
2599 sctp_assoc_sync_pmtu(asoc
);
2603 (asoc
->param_flags
& ~SPP_PMTUD
) | pmtud_change
;
2606 (sp
->param_flags
& ~SPP_PMTUD
) | pmtud_change
;
2610 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
2611 * value of this field is ignored. Note also that a value of zero
2612 * indicates the current setting should be left unchanged.
2614 if ((params
->spp_flags
& SPP_SACKDELAY_ENABLE
) && params
->spp_sackdelay
) {
2617 msecs_to_jiffies(params
->spp_sackdelay
);
2620 msecs_to_jiffies(params
->spp_sackdelay
);
2622 sp
->sackdelay
= params
->spp_sackdelay
;
2626 if (sackdelay_change
) {
2628 trans
->param_flags
=
2629 (trans
->param_flags
& ~SPP_SACKDELAY
) |
2633 (asoc
->param_flags
& ~SPP_SACKDELAY
) |
2637 (sp
->param_flags
& ~SPP_SACKDELAY
) |
2642 /* Note that a value of zero indicates the current setting should be
2645 if (params
->spp_pathmaxrxt
) {
2647 trans
->pathmaxrxt
= params
->spp_pathmaxrxt
;
2649 asoc
->pathmaxrxt
= params
->spp_pathmaxrxt
;
2651 sp
->pathmaxrxt
= params
->spp_pathmaxrxt
;
2655 if (params
->spp_flags
& SPP_IPV6_FLOWLABEL
) {
2657 if (trans
->ipaddr
.sa
.sa_family
== AF_INET6
) {
2658 trans
->flowlabel
= params
->spp_ipv6_flowlabel
&
2659 SCTP_FLOWLABEL_VAL_MASK
;
2660 trans
->flowlabel
|= SCTP_FLOWLABEL_SET_MASK
;
2663 struct sctp_transport
*t
;
2665 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
2667 if (t
->ipaddr
.sa
.sa_family
!= AF_INET6
)
2669 t
->flowlabel
= params
->spp_ipv6_flowlabel
&
2670 SCTP_FLOWLABEL_VAL_MASK
;
2671 t
->flowlabel
|= SCTP_FLOWLABEL_SET_MASK
;
2673 asoc
->flowlabel
= params
->spp_ipv6_flowlabel
&
2674 SCTP_FLOWLABEL_VAL_MASK
;
2675 asoc
->flowlabel
|= SCTP_FLOWLABEL_SET_MASK
;
2676 } else if (sctp_opt2sk(sp
)->sk_family
== AF_INET6
) {
2677 sp
->flowlabel
= params
->spp_ipv6_flowlabel
&
2678 SCTP_FLOWLABEL_VAL_MASK
;
2679 sp
->flowlabel
|= SCTP_FLOWLABEL_SET_MASK
;
2683 if (params
->spp_flags
& SPP_DSCP
) {
2685 trans
->dscp
= params
->spp_dscp
& SCTP_DSCP_VAL_MASK
;
2686 trans
->dscp
|= SCTP_DSCP_SET_MASK
;
2688 struct sctp_transport
*t
;
2690 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
2692 t
->dscp
= params
->spp_dscp
&
2694 t
->dscp
|= SCTP_DSCP_SET_MASK
;
2696 asoc
->dscp
= params
->spp_dscp
& SCTP_DSCP_VAL_MASK
;
2697 asoc
->dscp
|= SCTP_DSCP_SET_MASK
;
2699 sp
->dscp
= params
->spp_dscp
& SCTP_DSCP_VAL_MASK
;
2700 sp
->dscp
|= SCTP_DSCP_SET_MASK
;
2707 static int sctp_setsockopt_peer_addr_params(struct sock
*sk
,
2708 char __user
*optval
,
2709 unsigned int optlen
)
2711 struct sctp_paddrparams params
;
2712 struct sctp_transport
*trans
= NULL
;
2713 struct sctp_association
*asoc
= NULL
;
2714 struct sctp_sock
*sp
= sctp_sk(sk
);
2716 int hb_change
, pmtud_change
, sackdelay_change
;
2718 if (optlen
== sizeof(params
)) {
2719 if (copy_from_user(¶ms
, optval
, optlen
))
2721 } else if (optlen
== ALIGN(offsetof(struct sctp_paddrparams
,
2722 spp_ipv6_flowlabel
), 4)) {
2723 if (copy_from_user(¶ms
, optval
, optlen
))
2725 if (params
.spp_flags
& (SPP_DSCP
| SPP_IPV6_FLOWLABEL
))
2731 /* Validate flags and value parameters. */
2732 hb_change
= params
.spp_flags
& SPP_HB
;
2733 pmtud_change
= params
.spp_flags
& SPP_PMTUD
;
2734 sackdelay_change
= params
.spp_flags
& SPP_SACKDELAY
;
2736 if (hb_change
== SPP_HB
||
2737 pmtud_change
== SPP_PMTUD
||
2738 sackdelay_change
== SPP_SACKDELAY
||
2739 params
.spp_sackdelay
> 500 ||
2740 (params
.spp_pathmtu
&&
2741 params
.spp_pathmtu
< SCTP_DEFAULT_MINSEGMENT
))
2744 /* If an address other than INADDR_ANY is specified, and
2745 * no transport is found, then the request is invalid.
2747 if (!sctp_is_any(sk
, (union sctp_addr
*)¶ms
.spp_address
)) {
2748 trans
= sctp_addr_id2transport(sk
, ¶ms
.spp_address
,
2749 params
.spp_assoc_id
);
2754 /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
2755 * socket is a one to many style socket, and an association
2756 * was not found, then the id was invalid.
2758 asoc
= sctp_id2assoc(sk
, params
.spp_assoc_id
);
2759 if (!asoc
&& params
.spp_assoc_id
!= SCTP_FUTURE_ASSOC
&&
2760 sctp_style(sk
, UDP
))
2763 /* Heartbeat demand can only be sent on a transport or
2764 * association, but not a socket.
2766 if (params
.spp_flags
& SPP_HB_DEMAND
&& !trans
&& !asoc
)
2769 /* Process parameters. */
2770 error
= sctp_apply_peer_addr_params(¶ms
, trans
, asoc
, sp
,
2771 hb_change
, pmtud_change
,
2777 /* If changes are for association, also apply parameters to each
2780 if (!trans
&& asoc
) {
2781 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
2783 sctp_apply_peer_addr_params(¶ms
, trans
, asoc
, sp
,
2784 hb_change
, pmtud_change
,
2792 static inline __u32
sctp_spp_sackdelay_enable(__u32 param_flags
)
2794 return (param_flags
& ~SPP_SACKDELAY
) | SPP_SACKDELAY_ENABLE
;
2797 static inline __u32
sctp_spp_sackdelay_disable(__u32 param_flags
)
2799 return (param_flags
& ~SPP_SACKDELAY
) | SPP_SACKDELAY_DISABLE
;
2802 static void sctp_apply_asoc_delayed_ack(struct sctp_sack_info
*params
,
2803 struct sctp_association
*asoc
)
2805 struct sctp_transport
*trans
;
2807 if (params
->sack_delay
) {
2808 asoc
->sackdelay
= msecs_to_jiffies(params
->sack_delay
);
2810 sctp_spp_sackdelay_enable(asoc
->param_flags
);
2812 if (params
->sack_freq
== 1) {
2814 sctp_spp_sackdelay_disable(asoc
->param_flags
);
2815 } else if (params
->sack_freq
> 1) {
2816 asoc
->sackfreq
= params
->sack_freq
;
2818 sctp_spp_sackdelay_enable(asoc
->param_flags
);
2821 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
2823 if (params
->sack_delay
) {
2824 trans
->sackdelay
= msecs_to_jiffies(params
->sack_delay
);
2825 trans
->param_flags
=
2826 sctp_spp_sackdelay_enable(trans
->param_flags
);
2828 if (params
->sack_freq
== 1) {
2829 trans
->param_flags
=
2830 sctp_spp_sackdelay_disable(trans
->param_flags
);
2831 } else if (params
->sack_freq
> 1) {
2832 trans
->sackfreq
= params
->sack_freq
;
2833 trans
->param_flags
=
2834 sctp_spp_sackdelay_enable(trans
->param_flags
);
2840 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
2842 * This option will effect the way delayed acks are performed. This
2843 * option allows you to get or set the delayed ack time, in
2844 * milliseconds. It also allows changing the delayed ack frequency.
2845 * Changing the frequency to 1 disables the delayed sack algorithm. If
2846 * the assoc_id is 0, then this sets or gets the endpoints default
2847 * values. If the assoc_id field is non-zero, then the set or get
2848 * effects the specified association for the one to many model (the
2849 * assoc_id field is ignored by the one to one model). Note that if
2850 * sack_delay or sack_freq are 0 when setting this option, then the
2851 * current values will remain unchanged.
2853 * struct sctp_sack_info {
2854 * sctp_assoc_t sack_assoc_id;
2855 * uint32_t sack_delay;
2856 * uint32_t sack_freq;
2859 * sack_assoc_id - This parameter, indicates which association the user
2860 * is performing an action upon. Note that if this field's value is
2861 * zero then the endpoints default value is changed (effecting future
2862 * associations only).
2864 * sack_delay - This parameter contains the number of milliseconds that
2865 * the user is requesting the delayed ACK timer be set to. Note that
2866 * this value is defined in the standard to be between 200 and 500
2869 * sack_freq - This parameter contains the number of packets that must
2870 * be received before a sack is sent without waiting for the delay
2871 * timer to expire. The default value for this is 2, setting this
2872 * value to 1 will disable the delayed sack algorithm.
2875 static int sctp_setsockopt_delayed_ack(struct sock
*sk
,
2876 char __user
*optval
, unsigned int optlen
)
2878 struct sctp_sock
*sp
= sctp_sk(sk
);
2879 struct sctp_association
*asoc
;
2880 struct sctp_sack_info params
;
2882 if (optlen
== sizeof(struct sctp_sack_info
)) {
2883 if (copy_from_user(¶ms
, optval
, optlen
))
2886 if (params
.sack_delay
== 0 && params
.sack_freq
== 0)
2888 } else if (optlen
== sizeof(struct sctp_assoc_value
)) {
2889 pr_warn_ratelimited(DEPRECATED
2891 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
2892 "Use struct sctp_sack_info instead\n",
2893 current
->comm
, task_pid_nr(current
));
2894 if (copy_from_user(¶ms
, optval
, optlen
))
2897 if (params
.sack_delay
== 0)
2898 params
.sack_freq
= 1;
2900 params
.sack_freq
= 0;
2904 /* Validate value parameter. */
2905 if (params
.sack_delay
> 500)
2908 /* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
2909 * socket is a one to many style socket, and an association
2910 * was not found, then the id was invalid.
2912 asoc
= sctp_id2assoc(sk
, params
.sack_assoc_id
);
2913 if (!asoc
&& params
.sack_assoc_id
> SCTP_ALL_ASSOC
&&
2914 sctp_style(sk
, UDP
))
2918 sctp_apply_asoc_delayed_ack(¶ms
, asoc
);
2923 if (params
.sack_assoc_id
== SCTP_FUTURE_ASSOC
||
2924 params
.sack_assoc_id
== SCTP_ALL_ASSOC
) {
2925 if (params
.sack_delay
) {
2926 sp
->sackdelay
= params
.sack_delay
;
2928 sctp_spp_sackdelay_enable(sp
->param_flags
);
2930 if (params
.sack_freq
== 1) {
2932 sctp_spp_sackdelay_disable(sp
->param_flags
);
2933 } else if (params
.sack_freq
> 1) {
2934 sp
->sackfreq
= params
.sack_freq
;
2936 sctp_spp_sackdelay_enable(sp
->param_flags
);
2940 if (params
.sack_assoc_id
== SCTP_CURRENT_ASSOC
||
2941 params
.sack_assoc_id
== SCTP_ALL_ASSOC
)
2942 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
)
2943 sctp_apply_asoc_delayed_ack(¶ms
, asoc
);
2948 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
2950 * Applications can specify protocol parameters for the default association
2951 * initialization. The option name argument to setsockopt() and getsockopt()
2954 * Setting initialization parameters is effective only on an unconnected
2955 * socket (for UDP-style sockets only future associations are effected
2956 * by the change). With TCP-style sockets, this option is inherited by
2957 * sockets derived from a listener socket.
2959 static int sctp_setsockopt_initmsg(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
2961 struct sctp_initmsg sinit
;
2962 struct sctp_sock
*sp
= sctp_sk(sk
);
2964 if (optlen
!= sizeof(struct sctp_initmsg
))
2966 if (copy_from_user(&sinit
, optval
, optlen
))
2969 if (sinit
.sinit_num_ostreams
)
2970 sp
->initmsg
.sinit_num_ostreams
= sinit
.sinit_num_ostreams
;
2971 if (sinit
.sinit_max_instreams
)
2972 sp
->initmsg
.sinit_max_instreams
= sinit
.sinit_max_instreams
;
2973 if (sinit
.sinit_max_attempts
)
2974 sp
->initmsg
.sinit_max_attempts
= sinit
.sinit_max_attempts
;
2975 if (sinit
.sinit_max_init_timeo
)
2976 sp
->initmsg
.sinit_max_init_timeo
= sinit
.sinit_max_init_timeo
;
2982 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
2984 * Applications that wish to use the sendto() system call may wish to
2985 * specify a default set of parameters that would normally be supplied
2986 * through the inclusion of ancillary data. This socket option allows
2987 * such an application to set the default sctp_sndrcvinfo structure.
2988 * The application that wishes to use this socket option simply passes
2989 * in to this call the sctp_sndrcvinfo structure defined in Section
2990 * 5.2.2) The input parameters accepted by this call include
2991 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
2992 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
2993 * to this call if the caller is using the UDP model.
2995 static int sctp_setsockopt_default_send_param(struct sock
*sk
,
2996 char __user
*optval
,
2997 unsigned int optlen
)
2999 struct sctp_sock
*sp
= sctp_sk(sk
);
3000 struct sctp_association
*asoc
;
3001 struct sctp_sndrcvinfo info
;
3003 if (optlen
!= sizeof(info
))
3005 if (copy_from_user(&info
, optval
, optlen
))
3007 if (info
.sinfo_flags
&
3008 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
3009 SCTP_ABORT
| SCTP_EOF
))
3012 asoc
= sctp_id2assoc(sk
, info
.sinfo_assoc_id
);
3013 if (!asoc
&& info
.sinfo_assoc_id
> SCTP_ALL_ASSOC
&&
3014 sctp_style(sk
, UDP
))
3018 asoc
->default_stream
= info
.sinfo_stream
;
3019 asoc
->default_flags
= info
.sinfo_flags
;
3020 asoc
->default_ppid
= info
.sinfo_ppid
;
3021 asoc
->default_context
= info
.sinfo_context
;
3022 asoc
->default_timetolive
= info
.sinfo_timetolive
;
3027 if (info
.sinfo_assoc_id
== SCTP_FUTURE_ASSOC
||
3028 info
.sinfo_assoc_id
== SCTP_ALL_ASSOC
) {
3029 sp
->default_stream
= info
.sinfo_stream
;
3030 sp
->default_flags
= info
.sinfo_flags
;
3031 sp
->default_ppid
= info
.sinfo_ppid
;
3032 sp
->default_context
= info
.sinfo_context
;
3033 sp
->default_timetolive
= info
.sinfo_timetolive
;
3036 if (info
.sinfo_assoc_id
== SCTP_CURRENT_ASSOC
||
3037 info
.sinfo_assoc_id
== SCTP_ALL_ASSOC
) {
3038 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
) {
3039 asoc
->default_stream
= info
.sinfo_stream
;
3040 asoc
->default_flags
= info
.sinfo_flags
;
3041 asoc
->default_ppid
= info
.sinfo_ppid
;
3042 asoc
->default_context
= info
.sinfo_context
;
3043 asoc
->default_timetolive
= info
.sinfo_timetolive
;
3050 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
3051 * (SCTP_DEFAULT_SNDINFO)
3053 static int sctp_setsockopt_default_sndinfo(struct sock
*sk
,
3054 char __user
*optval
,
3055 unsigned int optlen
)
3057 struct sctp_sock
*sp
= sctp_sk(sk
);
3058 struct sctp_association
*asoc
;
3059 struct sctp_sndinfo info
;
3061 if (optlen
!= sizeof(info
))
3063 if (copy_from_user(&info
, optval
, optlen
))
3065 if (info
.snd_flags
&
3066 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
3067 SCTP_ABORT
| SCTP_EOF
))
3070 asoc
= sctp_id2assoc(sk
, info
.snd_assoc_id
);
3071 if (!asoc
&& info
.snd_assoc_id
> SCTP_ALL_ASSOC
&&
3072 sctp_style(sk
, UDP
))
3076 asoc
->default_stream
= info
.snd_sid
;
3077 asoc
->default_flags
= info
.snd_flags
;
3078 asoc
->default_ppid
= info
.snd_ppid
;
3079 asoc
->default_context
= info
.snd_context
;
3084 if (info
.snd_assoc_id
== SCTP_FUTURE_ASSOC
||
3085 info
.snd_assoc_id
== SCTP_ALL_ASSOC
) {
3086 sp
->default_stream
= info
.snd_sid
;
3087 sp
->default_flags
= info
.snd_flags
;
3088 sp
->default_ppid
= info
.snd_ppid
;
3089 sp
->default_context
= info
.snd_context
;
3092 if (info
.snd_assoc_id
== SCTP_CURRENT_ASSOC
||
3093 info
.snd_assoc_id
== SCTP_ALL_ASSOC
) {
3094 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
) {
3095 asoc
->default_stream
= info
.snd_sid
;
3096 asoc
->default_flags
= info
.snd_flags
;
3097 asoc
->default_ppid
= info
.snd_ppid
;
3098 asoc
->default_context
= info
.snd_context
;
3105 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
3107 * Requests that the local SCTP stack use the enclosed peer address as
3108 * the association primary. The enclosed address must be one of the
3109 * association peer's addresses.
3111 static int sctp_setsockopt_primary_addr(struct sock
*sk
, char __user
*optval
,
3112 unsigned int optlen
)
3114 struct sctp_prim prim
;
3115 struct sctp_transport
*trans
;
3119 if (optlen
!= sizeof(struct sctp_prim
))
3122 if (copy_from_user(&prim
, optval
, sizeof(struct sctp_prim
)))
3125 /* Allow security module to validate address but need address len. */
3126 af
= sctp_get_af_specific(prim
.ssp_addr
.ss_family
);
3130 err
= security_sctp_bind_connect(sk
, SCTP_PRIMARY_ADDR
,
3131 (struct sockaddr
*)&prim
.ssp_addr
,
3136 trans
= sctp_addr_id2transport(sk
, &prim
.ssp_addr
, prim
.ssp_assoc_id
);
3140 sctp_assoc_set_primary(trans
->asoc
, trans
);
3146 * 7.1.5 SCTP_NODELAY
3148 * Turn on/off any Nagle-like algorithm. This means that packets are
3149 * generally sent as soon as possible and no unnecessary delays are
3150 * introduced, at the cost of more packets in the network. Expects an
3151 * integer boolean flag.
3153 static int sctp_setsockopt_nodelay(struct sock
*sk
, char __user
*optval
,
3154 unsigned int optlen
)
3158 if (optlen
< sizeof(int))
3160 if (get_user(val
, (int __user
*)optval
))
3163 sctp_sk(sk
)->nodelay
= (val
== 0) ? 0 : 1;
3169 * 7.1.1 SCTP_RTOINFO
3171 * The protocol parameters used to initialize and bound retransmission
3172 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
3173 * and modify these parameters.
3174 * All parameters are time values, in milliseconds. A value of 0, when
3175 * modifying the parameters, indicates that the current value should not
3179 static int sctp_setsockopt_rtoinfo(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
3181 struct sctp_rtoinfo rtoinfo
;
3182 struct sctp_association
*asoc
;
3183 unsigned long rto_min
, rto_max
;
3184 struct sctp_sock
*sp
= sctp_sk(sk
);
3186 if (optlen
!= sizeof (struct sctp_rtoinfo
))
3189 if (copy_from_user(&rtoinfo
, optval
, optlen
))
3192 asoc
= sctp_id2assoc(sk
, rtoinfo
.srto_assoc_id
);
3194 /* Set the values to the specific association */
3195 if (!asoc
&& rtoinfo
.srto_assoc_id
!= SCTP_FUTURE_ASSOC
&&
3196 sctp_style(sk
, UDP
))
3199 rto_max
= rtoinfo
.srto_max
;
3200 rto_min
= rtoinfo
.srto_min
;
3203 rto_max
= asoc
? msecs_to_jiffies(rto_max
) : rto_max
;
3205 rto_max
= asoc
? asoc
->rto_max
: sp
->rtoinfo
.srto_max
;
3208 rto_min
= asoc
? msecs_to_jiffies(rto_min
) : rto_min
;
3210 rto_min
= asoc
? asoc
->rto_min
: sp
->rtoinfo
.srto_min
;
3212 if (rto_min
> rto_max
)
3216 if (rtoinfo
.srto_initial
!= 0)
3218 msecs_to_jiffies(rtoinfo
.srto_initial
);
3219 asoc
->rto_max
= rto_max
;
3220 asoc
->rto_min
= rto_min
;
3222 /* If there is no association or the association-id = 0
3223 * set the values to the endpoint.
3225 if (rtoinfo
.srto_initial
!= 0)
3226 sp
->rtoinfo
.srto_initial
= rtoinfo
.srto_initial
;
3227 sp
->rtoinfo
.srto_max
= rto_max
;
3228 sp
->rtoinfo
.srto_min
= rto_min
;
3236 * 7.1.2 SCTP_ASSOCINFO
3238 * This option is used to tune the maximum retransmission attempts
3239 * of the association.
3240 * Returns an error if the new association retransmission value is
3241 * greater than the sum of the retransmission value of the peer.
3242 * See [SCTP] for more information.
3245 static int sctp_setsockopt_associnfo(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
3248 struct sctp_assocparams assocparams
;
3249 struct sctp_association
*asoc
;
3251 if (optlen
!= sizeof(struct sctp_assocparams
))
3253 if (copy_from_user(&assocparams
, optval
, optlen
))
3256 asoc
= sctp_id2assoc(sk
, assocparams
.sasoc_assoc_id
);
3258 if (!asoc
&& assocparams
.sasoc_assoc_id
!= SCTP_FUTURE_ASSOC
&&
3259 sctp_style(sk
, UDP
))
3262 /* Set the values to the specific association */
3264 if (assocparams
.sasoc_asocmaxrxt
!= 0) {
3267 struct sctp_transport
*peer_addr
;
3269 list_for_each_entry(peer_addr
, &asoc
->peer
.transport_addr_list
,
3271 path_sum
+= peer_addr
->pathmaxrxt
;
3275 /* Only validate asocmaxrxt if we have more than
3276 * one path/transport. We do this because path
3277 * retransmissions are only counted when we have more
3281 assocparams
.sasoc_asocmaxrxt
> path_sum
)
3284 asoc
->max_retrans
= assocparams
.sasoc_asocmaxrxt
;
3287 if (assocparams
.sasoc_cookie_life
!= 0)
3288 asoc
->cookie_life
= ms_to_ktime(assocparams
.sasoc_cookie_life
);
3290 /* Set the values to the endpoint */
3291 struct sctp_sock
*sp
= sctp_sk(sk
);
3293 if (assocparams
.sasoc_asocmaxrxt
!= 0)
3294 sp
->assocparams
.sasoc_asocmaxrxt
=
3295 assocparams
.sasoc_asocmaxrxt
;
3296 if (assocparams
.sasoc_cookie_life
!= 0)
3297 sp
->assocparams
.sasoc_cookie_life
=
3298 assocparams
.sasoc_cookie_life
;
3304 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
3306 * This socket option is a boolean flag which turns on or off mapped V4
3307 * addresses. If this option is turned on and the socket is type
3308 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
3309 * If this option is turned off, then no mapping will be done of V4
3310 * addresses and a user will receive both PF_INET6 and PF_INET type
3311 * addresses on the socket.
3313 static int sctp_setsockopt_mappedv4(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
3316 struct sctp_sock
*sp
= sctp_sk(sk
);
3318 if (optlen
< sizeof(int))
3320 if (get_user(val
, (int __user
*)optval
))
3331 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
3332 * This option will get or set the maximum size to put in any outgoing
3333 * SCTP DATA chunk. If a message is larger than this size it will be
3334 * fragmented by SCTP into the specified size. Note that the underlying
3335 * SCTP implementation may fragment into smaller sized chunks when the
3336 * PMTU of the underlying association is smaller than the value set by
3337 * the user. The default value for this option is '0' which indicates
3338 * the user is NOT limiting fragmentation and only the PMTU will effect
3339 * SCTP's choice of DATA chunk size. Note also that values set larger
3340 * than the maximum size of an IP datagram will effectively let SCTP
3341 * control fragmentation (i.e. the same as setting this option to 0).
3343 * The following structure is used to access and modify this parameter:
3345 * struct sctp_assoc_value {
3346 * sctp_assoc_t assoc_id;
3347 * uint32_t assoc_value;
3350 * assoc_id: This parameter is ignored for one-to-one style sockets.
3351 * For one-to-many style sockets this parameter indicates which
3352 * association the user is performing an action upon. Note that if
3353 * this field's value is zero then the endpoints default value is
3354 * changed (effecting future associations only).
3355 * assoc_value: This parameter specifies the maximum size in bytes.
3357 static int sctp_setsockopt_maxseg(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
3359 struct sctp_sock
*sp
= sctp_sk(sk
);
3360 struct sctp_assoc_value params
;
3361 struct sctp_association
*asoc
;
3364 if (optlen
== sizeof(int)) {
3365 pr_warn_ratelimited(DEPRECATED
3367 "Use of int in maxseg socket option.\n"
3368 "Use struct sctp_assoc_value instead\n",
3369 current
->comm
, task_pid_nr(current
));
3370 if (copy_from_user(&val
, optval
, optlen
))
3372 params
.assoc_id
= SCTP_FUTURE_ASSOC
;
3373 } else if (optlen
== sizeof(struct sctp_assoc_value
)) {
3374 if (copy_from_user(¶ms
, optval
, optlen
))
3376 val
= params
.assoc_value
;
3381 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
3382 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
3383 sctp_style(sk
, UDP
))
3387 int min_len
, max_len
;
3388 __u16 datasize
= asoc
? sctp_datachk_len(&asoc
->stream
) :
3389 sizeof(struct sctp_data_chunk
);
3391 min_len
= sctp_min_frag_point(sp
, datasize
);
3392 max_len
= SCTP_MAX_CHUNK_LEN
- datasize
;
3394 if (val
< min_len
|| val
> max_len
)
3399 asoc
->user_frag
= val
;
3400 sctp_assoc_update_frag_point(asoc
);
3402 sp
->user_frag
= val
;
3410 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
3412 * Requests that the peer mark the enclosed address as the association
3413 * primary. The enclosed address must be one of the association's
3414 * locally bound addresses. The following structure is used to make a
3415 * set primary request:
3417 static int sctp_setsockopt_peer_primary_addr(struct sock
*sk
, char __user
*optval
,
3418 unsigned int optlen
)
3420 struct net
*net
= sock_net(sk
);
3421 struct sctp_sock
*sp
;
3422 struct sctp_association
*asoc
= NULL
;
3423 struct sctp_setpeerprim prim
;
3424 struct sctp_chunk
*chunk
;
3430 if (!net
->sctp
.addip_enable
)
3433 if (optlen
!= sizeof(struct sctp_setpeerprim
))
3436 if (copy_from_user(&prim
, optval
, optlen
))
3439 asoc
= sctp_id2assoc(sk
, prim
.sspp_assoc_id
);
3443 if (!asoc
->peer
.asconf_capable
)
3446 if (asoc
->peer
.addip_disabled_mask
& SCTP_PARAM_SET_PRIMARY
)
3449 if (!sctp_state(asoc
, ESTABLISHED
))
3452 af
= sctp_get_af_specific(prim
.sspp_addr
.ss_family
);
3456 if (!af
->addr_valid((union sctp_addr
*)&prim
.sspp_addr
, sp
, NULL
))
3457 return -EADDRNOTAVAIL
;
3459 if (!sctp_assoc_lookup_laddr(asoc
, (union sctp_addr
*)&prim
.sspp_addr
))
3460 return -EADDRNOTAVAIL
;
3462 /* Allow security module to validate address. */
3463 err
= security_sctp_bind_connect(sk
, SCTP_SET_PEER_PRIMARY_ADDR
,
3464 (struct sockaddr
*)&prim
.sspp_addr
,
3469 /* Create an ASCONF chunk with SET_PRIMARY parameter */
3470 chunk
= sctp_make_asconf_set_prim(asoc
,
3471 (union sctp_addr
*)&prim
.sspp_addr
);
3475 err
= sctp_send_asconf(asoc
, chunk
);
3477 pr_debug("%s: we set peer primary addr primitively\n", __func__
);
3482 static int sctp_setsockopt_adaptation_layer(struct sock
*sk
, char __user
*optval
,
3483 unsigned int optlen
)
3485 struct sctp_setadaptation adaptation
;
3487 if (optlen
!= sizeof(struct sctp_setadaptation
))
3489 if (copy_from_user(&adaptation
, optval
, optlen
))
3492 sctp_sk(sk
)->adaptation_ind
= adaptation
.ssb_adaptation_ind
;
3498 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
3500 * The context field in the sctp_sndrcvinfo structure is normally only
3501 * used when a failed message is retrieved holding the value that was
3502 * sent down on the actual send call. This option allows the setting of
3503 * a default context on an association basis that will be received on
3504 * reading messages from the peer. This is especially helpful in the
3505 * one-2-many model for an application to keep some reference to an
3506 * internal state machine that is processing messages on the
3507 * association. Note that the setting of this value only effects
3508 * received messages from the peer and does not effect the value that is
3509 * saved with outbound messages.
3511 static int sctp_setsockopt_context(struct sock
*sk
, char __user
*optval
,
3512 unsigned int optlen
)
3514 struct sctp_sock
*sp
= sctp_sk(sk
);
3515 struct sctp_assoc_value params
;
3516 struct sctp_association
*asoc
;
3518 if (optlen
!= sizeof(struct sctp_assoc_value
))
3520 if (copy_from_user(¶ms
, optval
, optlen
))
3523 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
3524 if (!asoc
&& params
.assoc_id
> SCTP_ALL_ASSOC
&&
3525 sctp_style(sk
, UDP
))
3529 asoc
->default_rcv_context
= params
.assoc_value
;
3534 if (params
.assoc_id
== SCTP_FUTURE_ASSOC
||
3535 params
.assoc_id
== SCTP_ALL_ASSOC
)
3536 sp
->default_rcv_context
= params
.assoc_value
;
3538 if (params
.assoc_id
== SCTP_CURRENT_ASSOC
||
3539 params
.assoc_id
== SCTP_ALL_ASSOC
)
3540 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
)
3541 asoc
->default_rcv_context
= params
.assoc_value
;
3547 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
3549 * This options will at a minimum specify if the implementation is doing
3550 * fragmented interleave. Fragmented interleave, for a one to many
3551 * socket, is when subsequent calls to receive a message may return
3552 * parts of messages from different associations. Some implementations
3553 * may allow you to turn this value on or off. If so, when turned off,
3554 * no fragment interleave will occur (which will cause a head of line
3555 * blocking amongst multiple associations sharing the same one to many
3556 * socket). When this option is turned on, then each receive call may
3557 * come from a different association (thus the user must receive data
3558 * with the extended calls (e.g. sctp_recvmsg) to keep track of which
3559 * association each receive belongs to.
3561 * This option takes a boolean value. A non-zero value indicates that
3562 * fragmented interleave is on. A value of zero indicates that
3563 * fragmented interleave is off.
3565 * Note that it is important that an implementation that allows this
3566 * option to be turned on, have it off by default. Otherwise an unaware
3567 * application using the one to many model may become confused and act
3570 static int sctp_setsockopt_fragment_interleave(struct sock
*sk
,
3571 char __user
*optval
,
3572 unsigned int optlen
)
3576 if (optlen
!= sizeof(int))
3578 if (get_user(val
, (int __user
*)optval
))
3581 sctp_sk(sk
)->frag_interleave
= !!val
;
3583 if (!sctp_sk(sk
)->frag_interleave
)
3584 sctp_sk(sk
)->strm_interleave
= 0;
3590 * 8.1.21. Set or Get the SCTP Partial Delivery Point
3591 * (SCTP_PARTIAL_DELIVERY_POINT)
3593 * This option will set or get the SCTP partial delivery point. This
3594 * point is the size of a message where the partial delivery API will be
3595 * invoked to help free up rwnd space for the peer. Setting this to a
3596 * lower value will cause partial deliveries to happen more often. The
3597 * calls argument is an integer that sets or gets the partial delivery
3598 * point. Note also that the call will fail if the user attempts to set
3599 * this value larger than the socket receive buffer size.
3601 * Note that any single message having a length smaller than or equal to
3602 * the SCTP partial delivery point will be delivered in one single read
3603 * call as long as the user provided buffer is large enough to hold the
3606 static int sctp_setsockopt_partial_delivery_point(struct sock
*sk
,
3607 char __user
*optval
,
3608 unsigned int optlen
)
3612 if (optlen
!= sizeof(u32
))
3614 if (get_user(val
, (int __user
*)optval
))
3617 /* Note: We double the receive buffer from what the user sets
3618 * it to be, also initial rwnd is based on rcvbuf/2.
3620 if (val
> (sk
->sk_rcvbuf
>> 1))
3623 sctp_sk(sk
)->pd_point
= val
;
3625 return 0; /* is this the right error code? */
3629 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
3631 * This option will allow a user to change the maximum burst of packets
3632 * that can be emitted by this association. Note that the default value
3633 * is 4, and some implementations may restrict this setting so that it
3634 * can only be lowered.
3636 * NOTE: This text doesn't seem right. Do this on a socket basis with
3637 * future associations inheriting the socket value.
3639 static int sctp_setsockopt_maxburst(struct sock
*sk
,
3640 char __user
*optval
,
3641 unsigned int optlen
)
3643 struct sctp_sock
*sp
= sctp_sk(sk
);
3644 struct sctp_assoc_value params
;
3645 struct sctp_association
*asoc
;
3647 if (optlen
== sizeof(int)) {
3648 pr_warn_ratelimited(DEPRECATED
3650 "Use of int in max_burst socket option deprecated.\n"
3651 "Use struct sctp_assoc_value instead\n",
3652 current
->comm
, task_pid_nr(current
));
3653 if (copy_from_user(¶ms
.assoc_value
, optval
, optlen
))
3655 params
.assoc_id
= SCTP_FUTURE_ASSOC
;
3656 } else if (optlen
== sizeof(struct sctp_assoc_value
)) {
3657 if (copy_from_user(¶ms
, optval
, optlen
))
3662 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
3663 if (!asoc
&& params
.assoc_id
> SCTP_ALL_ASSOC
&&
3664 sctp_style(sk
, UDP
))
3668 asoc
->max_burst
= params
.assoc_value
;
3673 if (params
.assoc_id
== SCTP_FUTURE_ASSOC
||
3674 params
.assoc_id
== SCTP_ALL_ASSOC
)
3675 sp
->max_burst
= params
.assoc_value
;
3677 if (params
.assoc_id
== SCTP_CURRENT_ASSOC
||
3678 params
.assoc_id
== SCTP_ALL_ASSOC
)
3679 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
)
3680 asoc
->max_burst
= params
.assoc_value
;
3686 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
3688 * This set option adds a chunk type that the user is requesting to be
3689 * received only in an authenticated way. Changes to the list of chunks
3690 * will only effect future associations on the socket.
3692 static int sctp_setsockopt_auth_chunk(struct sock
*sk
,
3693 char __user
*optval
,
3694 unsigned int optlen
)
3696 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3697 struct sctp_authchunk val
;
3699 if (!ep
->auth_enable
)
3702 if (optlen
!= sizeof(struct sctp_authchunk
))
3704 if (copy_from_user(&val
, optval
, optlen
))
3707 switch (val
.sauth_chunk
) {
3709 case SCTP_CID_INIT_ACK
:
3710 case SCTP_CID_SHUTDOWN_COMPLETE
:
3715 /* add this chunk id to the endpoint */
3716 return sctp_auth_ep_add_chunkid(ep
, val
.sauth_chunk
);
3720 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
3722 * This option gets or sets the list of HMAC algorithms that the local
3723 * endpoint requires the peer to use.
3725 static int sctp_setsockopt_hmac_ident(struct sock
*sk
,
3726 char __user
*optval
,
3727 unsigned int optlen
)
3729 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3730 struct sctp_hmacalgo
*hmacs
;
3734 if (!ep
->auth_enable
)
3737 if (optlen
< sizeof(struct sctp_hmacalgo
))
3739 optlen
= min_t(unsigned int, optlen
, sizeof(struct sctp_hmacalgo
) +
3740 SCTP_AUTH_NUM_HMACS
* sizeof(u16
));
3742 hmacs
= memdup_user(optval
, optlen
);
3744 return PTR_ERR(hmacs
);
3746 idents
= hmacs
->shmac_num_idents
;
3747 if (idents
== 0 || idents
> SCTP_AUTH_NUM_HMACS
||
3748 (idents
* sizeof(u16
)) > (optlen
- sizeof(struct sctp_hmacalgo
))) {
3753 err
= sctp_auth_ep_set_hmacs(ep
, hmacs
);
3760 * 7.1.20. Set a shared key (SCTP_AUTH_KEY)
3762 * This option will set a shared secret key which is used to build an
3763 * association shared key.
3765 static int sctp_setsockopt_auth_key(struct sock
*sk
,
3766 char __user
*optval
,
3767 unsigned int optlen
)
3769 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3770 struct sctp_authkey
*authkey
;
3771 struct sctp_association
*asoc
;
3774 if (!ep
->auth_enable
)
3777 if (optlen
<= sizeof(struct sctp_authkey
))
3779 /* authkey->sca_keylength is u16, so optlen can't be bigger than
3782 optlen
= min_t(unsigned int, optlen
, USHRT_MAX
+ sizeof(*authkey
));
3784 authkey
= memdup_user(optval
, optlen
);
3785 if (IS_ERR(authkey
))
3786 return PTR_ERR(authkey
);
3788 if (authkey
->sca_keylength
> optlen
- sizeof(*authkey
))
3791 asoc
= sctp_id2assoc(sk
, authkey
->sca_assoc_id
);
3792 if (!asoc
&& authkey
->sca_assoc_id
> SCTP_ALL_ASSOC
&&
3793 sctp_style(sk
, UDP
))
3797 ret
= sctp_auth_set_key(ep
, asoc
, authkey
);
3801 if (authkey
->sca_assoc_id
== SCTP_FUTURE_ASSOC
||
3802 authkey
->sca_assoc_id
== SCTP_ALL_ASSOC
) {
3803 ret
= sctp_auth_set_key(ep
, asoc
, authkey
);
3810 if (authkey
->sca_assoc_id
== SCTP_CURRENT_ASSOC
||
3811 authkey
->sca_assoc_id
== SCTP_ALL_ASSOC
) {
3812 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
3813 int res
= sctp_auth_set_key(ep
, asoc
, authkey
);
3826 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
3828 * This option will get or set the active shared key to be used to build
3829 * the association shared key.
3831 static int sctp_setsockopt_active_key(struct sock
*sk
,
3832 char __user
*optval
,
3833 unsigned int optlen
)
3835 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3836 struct sctp_association
*asoc
;
3837 struct sctp_authkeyid val
;
3840 if (!ep
->auth_enable
)
3843 if (optlen
!= sizeof(struct sctp_authkeyid
))
3845 if (copy_from_user(&val
, optval
, optlen
))
3848 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
3849 if (!asoc
&& val
.scact_assoc_id
> SCTP_ALL_ASSOC
&&
3850 sctp_style(sk
, UDP
))
3854 return sctp_auth_set_active_key(ep
, asoc
, val
.scact_keynumber
);
3856 if (val
.scact_assoc_id
== SCTP_FUTURE_ASSOC
||
3857 val
.scact_assoc_id
== SCTP_ALL_ASSOC
) {
3858 ret
= sctp_auth_set_active_key(ep
, asoc
, val
.scact_keynumber
);
3863 if (val
.scact_assoc_id
== SCTP_CURRENT_ASSOC
||
3864 val
.scact_assoc_id
== SCTP_ALL_ASSOC
) {
3865 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
3866 int res
= sctp_auth_set_active_key(ep
, asoc
,
3867 val
.scact_keynumber
);
3878 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
3880 * This set option will delete a shared secret key from use.
3882 static int sctp_setsockopt_del_key(struct sock
*sk
,
3883 char __user
*optval
,
3884 unsigned int optlen
)
3886 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3887 struct sctp_association
*asoc
;
3888 struct sctp_authkeyid val
;
3891 if (!ep
->auth_enable
)
3894 if (optlen
!= sizeof(struct sctp_authkeyid
))
3896 if (copy_from_user(&val
, optval
, optlen
))
3899 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
3900 if (!asoc
&& val
.scact_assoc_id
> SCTP_ALL_ASSOC
&&
3901 sctp_style(sk
, UDP
))
3905 return sctp_auth_del_key_id(ep
, asoc
, val
.scact_keynumber
);
3907 if (val
.scact_assoc_id
== SCTP_FUTURE_ASSOC
||
3908 val
.scact_assoc_id
== SCTP_ALL_ASSOC
) {
3909 ret
= sctp_auth_del_key_id(ep
, asoc
, val
.scact_keynumber
);
3914 if (val
.scact_assoc_id
== SCTP_CURRENT_ASSOC
||
3915 val
.scact_assoc_id
== SCTP_ALL_ASSOC
) {
3916 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
3917 int res
= sctp_auth_del_key_id(ep
, asoc
,
3918 val
.scact_keynumber
);
3929 * 8.3.4 Deactivate a Shared Key (SCTP_AUTH_DEACTIVATE_KEY)
3931 * This set option will deactivate a shared secret key.
3933 static int sctp_setsockopt_deactivate_key(struct sock
*sk
, char __user
*optval
,
3934 unsigned int optlen
)
3936 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3937 struct sctp_association
*asoc
;
3938 struct sctp_authkeyid val
;
3941 if (!ep
->auth_enable
)
3944 if (optlen
!= sizeof(struct sctp_authkeyid
))
3946 if (copy_from_user(&val
, optval
, optlen
))
3949 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
3950 if (!asoc
&& val
.scact_assoc_id
> SCTP_ALL_ASSOC
&&
3951 sctp_style(sk
, UDP
))
3955 return sctp_auth_deact_key_id(ep
, asoc
, val
.scact_keynumber
);
3957 if (val
.scact_assoc_id
== SCTP_FUTURE_ASSOC
||
3958 val
.scact_assoc_id
== SCTP_ALL_ASSOC
) {
3959 ret
= sctp_auth_deact_key_id(ep
, asoc
, val
.scact_keynumber
);
3964 if (val
.scact_assoc_id
== SCTP_CURRENT_ASSOC
||
3965 val
.scact_assoc_id
== SCTP_ALL_ASSOC
) {
3966 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
3967 int res
= sctp_auth_deact_key_id(ep
, asoc
,
3968 val
.scact_keynumber
);
3979 * 8.1.23 SCTP_AUTO_ASCONF
3981 * This option will enable or disable the use of the automatic generation of
3982 * ASCONF chunks to add and delete addresses to an existing association. Note
3983 * that this option has two caveats namely: a) it only affects sockets that
3984 * are bound to all addresses available to the SCTP stack, and b) the system
3985 * administrator may have an overriding control that turns the ASCONF feature
3986 * off no matter what setting the socket option may have.
3987 * This option expects an integer boolean flag, where a non-zero value turns on
3988 * the option, and a zero value turns off the option.
3989 * Note. In this implementation, socket operation overrides default parameter
3990 * being set by sysctl as well as FreeBSD implementation
3992 static int sctp_setsockopt_auto_asconf(struct sock
*sk
, char __user
*optval
,
3993 unsigned int optlen
)
3996 struct sctp_sock
*sp
= sctp_sk(sk
);
3998 if (optlen
< sizeof(int))
4000 if (get_user(val
, (int __user
*)optval
))
4002 if (!sctp_is_ep_boundall(sk
) && val
)
4004 if ((val
&& sp
->do_auto_asconf
) || (!val
&& !sp
->do_auto_asconf
))
4007 spin_lock_bh(&sock_net(sk
)->sctp
.addr_wq_lock
);
4008 if (val
== 0 && sp
->do_auto_asconf
) {
4009 list_del(&sp
->auto_asconf_list
);
4010 sp
->do_auto_asconf
= 0;
4011 } else if (val
&& !sp
->do_auto_asconf
) {
4012 list_add_tail(&sp
->auto_asconf_list
,
4013 &sock_net(sk
)->sctp
.auto_asconf_splist
);
4014 sp
->do_auto_asconf
= 1;
4016 spin_unlock_bh(&sock_net(sk
)->sctp
.addr_wq_lock
);
4021 * SCTP_PEER_ADDR_THLDS
4023 * This option allows us to alter the partially failed threshold for one or all
4024 * transports in an association. See Section 6.1 of:
4025 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
4027 static int sctp_setsockopt_paddr_thresholds(struct sock
*sk
,
4028 char __user
*optval
,
4029 unsigned int optlen
)
4031 struct sctp_paddrthlds val
;
4032 struct sctp_transport
*trans
;
4033 struct sctp_association
*asoc
;
4035 if (optlen
< sizeof(struct sctp_paddrthlds
))
4037 if (copy_from_user(&val
, (struct sctp_paddrthlds __user
*)optval
,
4038 sizeof(struct sctp_paddrthlds
)))
4041 if (!sctp_is_any(sk
, (const union sctp_addr
*)&val
.spt_address
)) {
4042 trans
= sctp_addr_id2transport(sk
, &val
.spt_address
,
4047 if (val
.spt_pathmaxrxt
)
4048 trans
->pathmaxrxt
= val
.spt_pathmaxrxt
;
4049 trans
->pf_retrans
= val
.spt_pathpfthld
;
4054 asoc
= sctp_id2assoc(sk
, val
.spt_assoc_id
);
4055 if (!asoc
&& val
.spt_assoc_id
!= SCTP_FUTURE_ASSOC
&&
4056 sctp_style(sk
, UDP
))
4060 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
4062 if (val
.spt_pathmaxrxt
)
4063 trans
->pathmaxrxt
= val
.spt_pathmaxrxt
;
4064 trans
->pf_retrans
= val
.spt_pathpfthld
;
4067 if (val
.spt_pathmaxrxt
)
4068 asoc
->pathmaxrxt
= val
.spt_pathmaxrxt
;
4069 asoc
->pf_retrans
= val
.spt_pathpfthld
;
4071 struct sctp_sock
*sp
= sctp_sk(sk
);
4073 if (val
.spt_pathmaxrxt
)
4074 sp
->pathmaxrxt
= val
.spt_pathmaxrxt
;
4075 sp
->pf_retrans
= val
.spt_pathpfthld
;
4081 static int sctp_setsockopt_recvrcvinfo(struct sock
*sk
,
4082 char __user
*optval
,
4083 unsigned int optlen
)
4087 if (optlen
< sizeof(int))
4089 if (get_user(val
, (int __user
*) optval
))
4092 sctp_sk(sk
)->recvrcvinfo
= (val
== 0) ? 0 : 1;
4097 static int sctp_setsockopt_recvnxtinfo(struct sock
*sk
,
4098 char __user
*optval
,
4099 unsigned int optlen
)
4103 if (optlen
< sizeof(int))
4105 if (get_user(val
, (int __user
*) optval
))
4108 sctp_sk(sk
)->recvnxtinfo
= (val
== 0) ? 0 : 1;
4113 static int sctp_setsockopt_pr_supported(struct sock
*sk
,
4114 char __user
*optval
,
4115 unsigned int optlen
)
4117 struct sctp_assoc_value params
;
4118 struct sctp_association
*asoc
;
4120 if (optlen
!= sizeof(params
))
4123 if (copy_from_user(¶ms
, optval
, optlen
))
4126 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4127 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
4128 sctp_style(sk
, UDP
))
4131 sctp_sk(sk
)->ep
->prsctp_enable
= !!params
.assoc_value
;
4136 static int sctp_setsockopt_default_prinfo(struct sock
*sk
,
4137 char __user
*optval
,
4138 unsigned int optlen
)
4140 struct sctp_sock
*sp
= sctp_sk(sk
);
4141 struct sctp_default_prinfo info
;
4142 struct sctp_association
*asoc
;
4143 int retval
= -EINVAL
;
4145 if (optlen
!= sizeof(info
))
4148 if (copy_from_user(&info
, optval
, sizeof(info
))) {
4153 if (info
.pr_policy
& ~SCTP_PR_SCTP_MASK
)
4156 if (info
.pr_policy
== SCTP_PR_SCTP_NONE
)
4159 asoc
= sctp_id2assoc(sk
, info
.pr_assoc_id
);
4160 if (!asoc
&& info
.pr_assoc_id
> SCTP_ALL_ASSOC
&&
4161 sctp_style(sk
, UDP
))
4167 SCTP_PR_SET_POLICY(asoc
->default_flags
, info
.pr_policy
);
4168 asoc
->default_timetolive
= info
.pr_value
;
4172 if (info
.pr_assoc_id
== SCTP_FUTURE_ASSOC
||
4173 info
.pr_assoc_id
== SCTP_ALL_ASSOC
) {
4174 SCTP_PR_SET_POLICY(sp
->default_flags
, info
.pr_policy
);
4175 sp
->default_timetolive
= info
.pr_value
;
4178 if (info
.pr_assoc_id
== SCTP_CURRENT_ASSOC
||
4179 info
.pr_assoc_id
== SCTP_ALL_ASSOC
) {
4180 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
) {
4181 SCTP_PR_SET_POLICY(asoc
->default_flags
, info
.pr_policy
);
4182 asoc
->default_timetolive
= info
.pr_value
;
4190 static int sctp_setsockopt_reconfig_supported(struct sock
*sk
,
4191 char __user
*optval
,
4192 unsigned int optlen
)
4194 struct sctp_assoc_value params
;
4195 struct sctp_association
*asoc
;
4196 int retval
= -EINVAL
;
4198 if (optlen
!= sizeof(params
))
4201 if (copy_from_user(¶ms
, optval
, optlen
)) {
4206 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4207 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
4208 sctp_style(sk
, UDP
))
4212 asoc
->reconf_enable
= !!params
.assoc_value
;
4214 sctp_sk(sk
)->ep
->reconf_enable
= !!params
.assoc_value
;
4222 static int sctp_setsockopt_enable_strreset(struct sock
*sk
,
4223 char __user
*optval
,
4224 unsigned int optlen
)
4226 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
4227 struct sctp_assoc_value params
;
4228 struct sctp_association
*asoc
;
4229 int retval
= -EINVAL
;
4231 if (optlen
!= sizeof(params
))
4234 if (copy_from_user(¶ms
, optval
, optlen
)) {
4239 if (params
.assoc_value
& (~SCTP_ENABLE_STRRESET_MASK
))
4242 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4243 if (!asoc
&& params
.assoc_id
> SCTP_ALL_ASSOC
&&
4244 sctp_style(sk
, UDP
))
4250 asoc
->strreset_enable
= params
.assoc_value
;
4254 if (params
.assoc_id
== SCTP_FUTURE_ASSOC
||
4255 params
.assoc_id
== SCTP_ALL_ASSOC
)
4256 ep
->strreset_enable
= params
.assoc_value
;
4258 if (params
.assoc_id
== SCTP_CURRENT_ASSOC
||
4259 params
.assoc_id
== SCTP_ALL_ASSOC
)
4260 list_for_each_entry(asoc
, &ep
->asocs
, asocs
)
4261 asoc
->strreset_enable
= params
.assoc_value
;
4267 static int sctp_setsockopt_reset_streams(struct sock
*sk
,
4268 char __user
*optval
,
4269 unsigned int optlen
)
4271 struct sctp_reset_streams
*params
;
4272 struct sctp_association
*asoc
;
4273 int retval
= -EINVAL
;
4275 if (optlen
< sizeof(*params
))
4277 /* srs_number_streams is u16, so optlen can't be bigger than this. */
4278 optlen
= min_t(unsigned int, optlen
, USHRT_MAX
+
4279 sizeof(__u16
) * sizeof(*params
));
4281 params
= memdup_user(optval
, optlen
);
4283 return PTR_ERR(params
);
4285 if (params
->srs_number_streams
* sizeof(__u16
) >
4286 optlen
- sizeof(*params
))
4289 asoc
= sctp_id2assoc(sk
, params
->srs_assoc_id
);
4293 retval
= sctp_send_reset_streams(asoc
, params
);
4300 static int sctp_setsockopt_reset_assoc(struct sock
*sk
,
4301 char __user
*optval
,
4302 unsigned int optlen
)
4304 struct sctp_association
*asoc
;
4305 sctp_assoc_t associd
;
4306 int retval
= -EINVAL
;
4308 if (optlen
!= sizeof(associd
))
4311 if (copy_from_user(&associd
, optval
, optlen
)) {
4316 asoc
= sctp_id2assoc(sk
, associd
);
4320 retval
= sctp_send_reset_assoc(asoc
);
4326 static int sctp_setsockopt_add_streams(struct sock
*sk
,
4327 char __user
*optval
,
4328 unsigned int optlen
)
4330 struct sctp_association
*asoc
;
4331 struct sctp_add_streams params
;
4332 int retval
= -EINVAL
;
4334 if (optlen
!= sizeof(params
))
4337 if (copy_from_user(¶ms
, optval
, optlen
)) {
4342 asoc
= sctp_id2assoc(sk
, params
.sas_assoc_id
);
4346 retval
= sctp_send_add_streams(asoc
, ¶ms
);
4352 static int sctp_setsockopt_scheduler(struct sock
*sk
,
4353 char __user
*optval
,
4354 unsigned int optlen
)
4356 struct sctp_sock
*sp
= sctp_sk(sk
);
4357 struct sctp_association
*asoc
;
4358 struct sctp_assoc_value params
;
4361 if (optlen
< sizeof(params
))
4364 optlen
= sizeof(params
);
4365 if (copy_from_user(¶ms
, optval
, optlen
))
4368 if (params
.assoc_value
> SCTP_SS_MAX
)
4371 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4372 if (!asoc
&& params
.assoc_id
> SCTP_ALL_ASSOC
&&
4373 sctp_style(sk
, UDP
))
4377 return sctp_sched_set_sched(asoc
, params
.assoc_value
);
4379 if (params
.assoc_id
== SCTP_FUTURE_ASSOC
||
4380 params
.assoc_id
== SCTP_ALL_ASSOC
)
4381 sp
->default_ss
= params
.assoc_value
;
4383 if (params
.assoc_id
== SCTP_CURRENT_ASSOC
||
4384 params
.assoc_id
== SCTP_ALL_ASSOC
) {
4385 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
) {
4386 int ret
= sctp_sched_set_sched(asoc
,
4387 params
.assoc_value
);
4397 static int sctp_setsockopt_scheduler_value(struct sock
*sk
,
4398 char __user
*optval
,
4399 unsigned int optlen
)
4401 struct sctp_stream_value params
;
4402 struct sctp_association
*asoc
;
4403 int retval
= -EINVAL
;
4405 if (optlen
< sizeof(params
))
4408 optlen
= sizeof(params
);
4409 if (copy_from_user(¶ms
, optval
, optlen
)) {
4414 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4415 if (!asoc
&& params
.assoc_id
!= SCTP_CURRENT_ASSOC
&&
4416 sctp_style(sk
, UDP
))
4420 retval
= sctp_sched_set_value(asoc
, params
.stream_id
,
4421 params
.stream_value
, GFP_KERNEL
);
4427 list_for_each_entry(asoc
, &sctp_sk(sk
)->ep
->asocs
, asocs
) {
4428 int ret
= sctp_sched_set_value(asoc
, params
.stream_id
,
4429 params
.stream_value
, GFP_KERNEL
);
4430 if (ret
&& !retval
) /* try to return the 1st error. */
4438 static int sctp_setsockopt_interleaving_supported(struct sock
*sk
,
4439 char __user
*optval
,
4440 unsigned int optlen
)
4442 struct sctp_sock
*sp
= sctp_sk(sk
);
4443 struct sctp_assoc_value params
;
4444 struct sctp_association
*asoc
;
4445 int retval
= -EINVAL
;
4447 if (optlen
< sizeof(params
))
4450 optlen
= sizeof(params
);
4451 if (copy_from_user(¶ms
, optval
, optlen
)) {
4456 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4457 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
4458 sctp_style(sk
, UDP
))
4461 if (!sock_net(sk
)->sctp
.intl_enable
|| !sp
->frag_interleave
) {
4466 sp
->strm_interleave
= !!params
.assoc_value
;
4474 static int sctp_setsockopt_reuse_port(struct sock
*sk
, char __user
*optval
,
4475 unsigned int optlen
)
4479 if (!sctp_style(sk
, TCP
))
4482 if (sctp_sk(sk
)->ep
->base
.bind_addr
.port
)
4485 if (optlen
< sizeof(int))
4488 if (get_user(val
, (int __user
*)optval
))
4491 sctp_sk(sk
)->reuse
= !!val
;
4496 static int sctp_assoc_ulpevent_type_set(struct sctp_event
*param
,
4497 struct sctp_association
*asoc
)
4499 struct sctp_ulpevent
*event
;
4501 sctp_ulpevent_type_set(&asoc
->subscribe
, param
->se_type
, param
->se_on
);
4503 if (param
->se_type
== SCTP_SENDER_DRY_EVENT
&& param
->se_on
) {
4504 if (sctp_outq_is_empty(&asoc
->outqueue
)) {
4505 event
= sctp_ulpevent_make_sender_dry_event(asoc
,
4506 GFP_USER
| __GFP_NOWARN
);
4510 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
, event
);
4517 static int sctp_setsockopt_event(struct sock
*sk
, char __user
*optval
,
4518 unsigned int optlen
)
4520 struct sctp_sock
*sp
= sctp_sk(sk
);
4521 struct sctp_association
*asoc
;
4522 struct sctp_event param
;
4525 if (optlen
< sizeof(param
))
4528 optlen
= sizeof(param
);
4529 if (copy_from_user(¶m
, optval
, optlen
))
4532 if (param
.se_type
< SCTP_SN_TYPE_BASE
||
4533 param
.se_type
> SCTP_SN_TYPE_MAX
)
4536 asoc
= sctp_id2assoc(sk
, param
.se_assoc_id
);
4537 if (!asoc
&& param
.se_assoc_id
> SCTP_ALL_ASSOC
&&
4538 sctp_style(sk
, UDP
))
4542 return sctp_assoc_ulpevent_type_set(¶m
, asoc
);
4544 if (param
.se_assoc_id
== SCTP_FUTURE_ASSOC
||
4545 param
.se_assoc_id
== SCTP_ALL_ASSOC
)
4546 sctp_ulpevent_type_set(&sp
->subscribe
,
4547 param
.se_type
, param
.se_on
);
4549 if (param
.se_assoc_id
== SCTP_CURRENT_ASSOC
||
4550 param
.se_assoc_id
== SCTP_ALL_ASSOC
) {
4551 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
) {
4552 int ret
= sctp_assoc_ulpevent_type_set(¶m
, asoc
);
4562 /* API 6.2 setsockopt(), getsockopt()
4564 * Applications use setsockopt() and getsockopt() to set or retrieve
4565 * socket options. Socket options are used to change the default
4566 * behavior of sockets calls. They are described in Section 7.
4570 * ret = getsockopt(int sd, int level, int optname, void __user *optval,
4571 * int __user *optlen);
4572 * ret = setsockopt(int sd, int level, int optname, const void __user *optval,
4575 * sd - the socket descript.
4576 * level - set to IPPROTO_SCTP for all SCTP options.
4577 * optname - the option name.
4578 * optval - the buffer to store the value of the option.
4579 * optlen - the size of the buffer.
4581 static int sctp_setsockopt(struct sock
*sk
, int level
, int optname
,
4582 char __user
*optval
, unsigned int optlen
)
4586 pr_debug("%s: sk:%p, optname:%d\n", __func__
, sk
, optname
);
4588 /* I can hardly begin to describe how wrong this is. This is
4589 * so broken as to be worse than useless. The API draft
4590 * REALLY is NOT helpful here... I am not convinced that the
4591 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP
4592 * are at all well-founded.
4594 if (level
!= SOL_SCTP
) {
4595 struct sctp_af
*af
= sctp_sk(sk
)->pf
->af
;
4596 retval
= af
->setsockopt(sk
, level
, optname
, optval
, optlen
);
4603 case SCTP_SOCKOPT_BINDX_ADD
:
4604 /* 'optlen' is the size of the addresses buffer. */
4605 retval
= sctp_setsockopt_bindx(sk
, (struct sockaddr __user
*)optval
,
4606 optlen
, SCTP_BINDX_ADD_ADDR
);
4609 case SCTP_SOCKOPT_BINDX_REM
:
4610 /* 'optlen' is the size of the addresses buffer. */
4611 retval
= sctp_setsockopt_bindx(sk
, (struct sockaddr __user
*)optval
,
4612 optlen
, SCTP_BINDX_REM_ADDR
);
4615 case SCTP_SOCKOPT_CONNECTX_OLD
:
4616 /* 'optlen' is the size of the addresses buffer. */
4617 retval
= sctp_setsockopt_connectx_old(sk
,
4618 (struct sockaddr __user
*)optval
,
4622 case SCTP_SOCKOPT_CONNECTX
:
4623 /* 'optlen' is the size of the addresses buffer. */
4624 retval
= sctp_setsockopt_connectx(sk
,
4625 (struct sockaddr __user
*)optval
,
4629 case SCTP_DISABLE_FRAGMENTS
:
4630 retval
= sctp_setsockopt_disable_fragments(sk
, optval
, optlen
);
4634 retval
= sctp_setsockopt_events(sk
, optval
, optlen
);
4637 case SCTP_AUTOCLOSE
:
4638 retval
= sctp_setsockopt_autoclose(sk
, optval
, optlen
);
4641 case SCTP_PEER_ADDR_PARAMS
:
4642 retval
= sctp_setsockopt_peer_addr_params(sk
, optval
, optlen
);
4645 case SCTP_DELAYED_SACK
:
4646 retval
= sctp_setsockopt_delayed_ack(sk
, optval
, optlen
);
4648 case SCTP_PARTIAL_DELIVERY_POINT
:
4649 retval
= sctp_setsockopt_partial_delivery_point(sk
, optval
, optlen
);
4653 retval
= sctp_setsockopt_initmsg(sk
, optval
, optlen
);
4655 case SCTP_DEFAULT_SEND_PARAM
:
4656 retval
= sctp_setsockopt_default_send_param(sk
, optval
,
4659 case SCTP_DEFAULT_SNDINFO
:
4660 retval
= sctp_setsockopt_default_sndinfo(sk
, optval
, optlen
);
4662 case SCTP_PRIMARY_ADDR
:
4663 retval
= sctp_setsockopt_primary_addr(sk
, optval
, optlen
);
4665 case SCTP_SET_PEER_PRIMARY_ADDR
:
4666 retval
= sctp_setsockopt_peer_primary_addr(sk
, optval
, optlen
);
4669 retval
= sctp_setsockopt_nodelay(sk
, optval
, optlen
);
4672 retval
= sctp_setsockopt_rtoinfo(sk
, optval
, optlen
);
4674 case SCTP_ASSOCINFO
:
4675 retval
= sctp_setsockopt_associnfo(sk
, optval
, optlen
);
4677 case SCTP_I_WANT_MAPPED_V4_ADDR
:
4678 retval
= sctp_setsockopt_mappedv4(sk
, optval
, optlen
);
4681 retval
= sctp_setsockopt_maxseg(sk
, optval
, optlen
);
4683 case SCTP_ADAPTATION_LAYER
:
4684 retval
= sctp_setsockopt_adaptation_layer(sk
, optval
, optlen
);
4687 retval
= sctp_setsockopt_context(sk
, optval
, optlen
);
4689 case SCTP_FRAGMENT_INTERLEAVE
:
4690 retval
= sctp_setsockopt_fragment_interleave(sk
, optval
, optlen
);
4692 case SCTP_MAX_BURST
:
4693 retval
= sctp_setsockopt_maxburst(sk
, optval
, optlen
);
4695 case SCTP_AUTH_CHUNK
:
4696 retval
= sctp_setsockopt_auth_chunk(sk
, optval
, optlen
);
4698 case SCTP_HMAC_IDENT
:
4699 retval
= sctp_setsockopt_hmac_ident(sk
, optval
, optlen
);
4702 retval
= sctp_setsockopt_auth_key(sk
, optval
, optlen
);
4704 case SCTP_AUTH_ACTIVE_KEY
:
4705 retval
= sctp_setsockopt_active_key(sk
, optval
, optlen
);
4707 case SCTP_AUTH_DELETE_KEY
:
4708 retval
= sctp_setsockopt_del_key(sk
, optval
, optlen
);
4710 case SCTP_AUTH_DEACTIVATE_KEY
:
4711 retval
= sctp_setsockopt_deactivate_key(sk
, optval
, optlen
);
4713 case SCTP_AUTO_ASCONF
:
4714 retval
= sctp_setsockopt_auto_asconf(sk
, optval
, optlen
);
4716 case SCTP_PEER_ADDR_THLDS
:
4717 retval
= sctp_setsockopt_paddr_thresholds(sk
, optval
, optlen
);
4719 case SCTP_RECVRCVINFO
:
4720 retval
= sctp_setsockopt_recvrcvinfo(sk
, optval
, optlen
);
4722 case SCTP_RECVNXTINFO
:
4723 retval
= sctp_setsockopt_recvnxtinfo(sk
, optval
, optlen
);
4725 case SCTP_PR_SUPPORTED
:
4726 retval
= sctp_setsockopt_pr_supported(sk
, optval
, optlen
);
4728 case SCTP_DEFAULT_PRINFO
:
4729 retval
= sctp_setsockopt_default_prinfo(sk
, optval
, optlen
);
4731 case SCTP_RECONFIG_SUPPORTED
:
4732 retval
= sctp_setsockopt_reconfig_supported(sk
, optval
, optlen
);
4734 case SCTP_ENABLE_STREAM_RESET
:
4735 retval
= sctp_setsockopt_enable_strreset(sk
, optval
, optlen
);
4737 case SCTP_RESET_STREAMS
:
4738 retval
= sctp_setsockopt_reset_streams(sk
, optval
, optlen
);
4740 case SCTP_RESET_ASSOC
:
4741 retval
= sctp_setsockopt_reset_assoc(sk
, optval
, optlen
);
4743 case SCTP_ADD_STREAMS
:
4744 retval
= sctp_setsockopt_add_streams(sk
, optval
, optlen
);
4746 case SCTP_STREAM_SCHEDULER
:
4747 retval
= sctp_setsockopt_scheduler(sk
, optval
, optlen
);
4749 case SCTP_STREAM_SCHEDULER_VALUE
:
4750 retval
= sctp_setsockopt_scheduler_value(sk
, optval
, optlen
);
4752 case SCTP_INTERLEAVING_SUPPORTED
:
4753 retval
= sctp_setsockopt_interleaving_supported(sk
, optval
,
4756 case SCTP_REUSE_PORT
:
4757 retval
= sctp_setsockopt_reuse_port(sk
, optval
, optlen
);
4760 retval
= sctp_setsockopt_event(sk
, optval
, optlen
);
4763 retval
= -ENOPROTOOPT
;
4773 /* API 3.1.6 connect() - UDP Style Syntax
4775 * An application may use the connect() call in the UDP model to initiate an
4776 * association without sending data.
4780 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
4782 * sd: the socket descriptor to have a new association added to.
4784 * nam: the address structure (either struct sockaddr_in or struct
4785 * sockaddr_in6 defined in RFC2553 [7]).
4787 * len: the size of the address.
4789 static int sctp_connect(struct sock
*sk
, struct sockaddr
*addr
,
4790 int addr_len
, int flags
)
4792 struct inet_sock
*inet
= inet_sk(sk
);
4798 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__
, sk
,
4801 /* We may need to bind the socket. */
4802 if (!inet
->inet_num
) {
4803 if (sk
->sk_prot
->get_port(sk
, 0)) {
4807 inet
->inet_sport
= htons(inet
->inet_num
);
4810 /* Validate addr_len before calling common connect/connectx routine. */
4811 af
= sctp_get_af_specific(addr
->sa_family
);
4812 if (!af
|| addr_len
< af
->sockaddr_len
) {
4815 /* Pass correct addr len to common routine (so it knows there
4816 * is only one address being passed.
4818 err
= __sctp_connect(sk
, addr
, af
->sockaddr_len
, flags
, NULL
);
4825 int sctp_inet_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
4826 int addr_len
, int flags
)
4828 if (addr_len
< sizeof(uaddr
->sa_family
))
4831 if (uaddr
->sa_family
== AF_UNSPEC
)
4834 return sctp_connect(sock
->sk
, uaddr
, addr_len
, flags
);
4837 /* FIXME: Write comments. */
4838 static int sctp_disconnect(struct sock
*sk
, int flags
)
4840 return -EOPNOTSUPP
; /* STUB */
4843 /* 4.1.4 accept() - TCP Style Syntax
4845 * Applications use accept() call to remove an established SCTP
4846 * association from the accept queue of the endpoint. A new socket
4847 * descriptor will be returned from accept() to represent the newly
4848 * formed association.
4850 static struct sock
*sctp_accept(struct sock
*sk
, int flags
, int *err
, bool kern
)
4852 struct sctp_sock
*sp
;
4853 struct sctp_endpoint
*ep
;
4854 struct sock
*newsk
= NULL
;
4855 struct sctp_association
*asoc
;
4864 if (!sctp_style(sk
, TCP
)) {
4865 error
= -EOPNOTSUPP
;
4869 if (!sctp_sstate(sk
, LISTENING
)) {
4874 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
4876 error
= sctp_wait_for_accept(sk
, timeo
);
4880 /* We treat the list of associations on the endpoint as the accept
4881 * queue and pick the first association on the list.
4883 asoc
= list_entry(ep
->asocs
.next
, struct sctp_association
, asocs
);
4885 newsk
= sp
->pf
->create_accept_sk(sk
, asoc
, kern
);
4891 /* Populate the fields of the newsk from the oldsk and migrate the
4892 * asoc to the newsk.
4894 error
= sctp_sock_migrate(sk
, newsk
, asoc
, SCTP_SOCKET_TCP
);
4896 sk_common_release(newsk
);
4906 /* The SCTP ioctl handler. */
4907 static int sctp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
4914 * SEQPACKET-style sockets in LISTENING state are valid, for
4915 * SCTP, so only discard TCP-style sockets in LISTENING state.
4917 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))
4922 struct sk_buff
*skb
;
4923 unsigned int amount
= 0;
4925 skb
= skb_peek(&sk
->sk_receive_queue
);
4928 * We will only return the amount of this packet since
4929 * that is all that will be read.
4933 rc
= put_user(amount
, (int __user
*)arg
);
4945 /* This is the function which gets called during socket creation to
4946 * initialized the SCTP-specific portion of the sock.
4947 * The sock structure should already be zero-filled memory.
4949 static int sctp_init_sock(struct sock
*sk
)
4951 struct net
*net
= sock_net(sk
);
4952 struct sctp_sock
*sp
;
4954 pr_debug("%s: sk:%p\n", __func__
, sk
);
4958 /* Initialize the SCTP per socket area. */
4959 switch (sk
->sk_type
) {
4960 case SOCK_SEQPACKET
:
4961 sp
->type
= SCTP_SOCKET_UDP
;
4964 sp
->type
= SCTP_SOCKET_TCP
;
4967 return -ESOCKTNOSUPPORT
;
4970 sk
->sk_gso_type
= SKB_GSO_SCTP
;
4972 /* Initialize default send parameters. These parameters can be
4973 * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
4975 sp
->default_stream
= 0;
4976 sp
->default_ppid
= 0;
4977 sp
->default_flags
= 0;
4978 sp
->default_context
= 0;
4979 sp
->default_timetolive
= 0;
4981 sp
->default_rcv_context
= 0;
4982 sp
->max_burst
= net
->sctp
.max_burst
;
4984 sp
->sctp_hmac_alg
= net
->sctp
.sctp_hmac_alg
;
4986 /* Initialize default setup parameters. These parameters
4987 * can be modified with the SCTP_INITMSG socket option or
4988 * overridden by the SCTP_INIT CMSG.
4990 sp
->initmsg
.sinit_num_ostreams
= sctp_max_outstreams
;
4991 sp
->initmsg
.sinit_max_instreams
= sctp_max_instreams
;
4992 sp
->initmsg
.sinit_max_attempts
= net
->sctp
.max_retrans_init
;
4993 sp
->initmsg
.sinit_max_init_timeo
= net
->sctp
.rto_max
;
4995 /* Initialize default RTO related parameters. These parameters can
4996 * be modified for with the SCTP_RTOINFO socket option.
4998 sp
->rtoinfo
.srto_initial
= net
->sctp
.rto_initial
;
4999 sp
->rtoinfo
.srto_max
= net
->sctp
.rto_max
;
5000 sp
->rtoinfo
.srto_min
= net
->sctp
.rto_min
;
5002 /* Initialize default association related parameters. These parameters
5003 * can be modified with the SCTP_ASSOCINFO socket option.
5005 sp
->assocparams
.sasoc_asocmaxrxt
= net
->sctp
.max_retrans_association
;
5006 sp
->assocparams
.sasoc_number_peer_destinations
= 0;
5007 sp
->assocparams
.sasoc_peer_rwnd
= 0;
5008 sp
->assocparams
.sasoc_local_rwnd
= 0;
5009 sp
->assocparams
.sasoc_cookie_life
= net
->sctp
.valid_cookie_life
;
5011 /* Initialize default event subscriptions. By default, all the
5016 /* Default Peer Address Parameters. These defaults can
5017 * be modified via SCTP_PEER_ADDR_PARAMS
5019 sp
->hbinterval
= net
->sctp
.hb_interval
;
5020 sp
->pathmaxrxt
= net
->sctp
.max_retrans_path
;
5021 sp
->pf_retrans
= net
->sctp
.pf_retrans
;
5022 sp
->pathmtu
= 0; /* allow default discovery */
5023 sp
->sackdelay
= net
->sctp
.sack_timeout
;
5025 sp
->param_flags
= SPP_HB_ENABLE
|
5027 SPP_SACKDELAY_ENABLE
;
5028 sp
->default_ss
= SCTP_SS_DEFAULT
;
5030 /* If enabled no SCTP message fragmentation will be performed.
5031 * Configure through SCTP_DISABLE_FRAGMENTS socket option.
5033 sp
->disable_fragments
= 0;
5035 /* Enable Nagle algorithm by default. */
5038 sp
->recvrcvinfo
= 0;
5039 sp
->recvnxtinfo
= 0;
5041 /* Enable by default. */
5044 /* Auto-close idle associations after the configured
5045 * number of seconds. A value of 0 disables this
5046 * feature. Configure through the SCTP_AUTOCLOSE socket option,
5047 * for UDP-style sockets only.
5051 /* User specified fragmentation limit. */
5054 sp
->adaptation_ind
= 0;
5056 sp
->pf
= sctp_get_pf_specific(sk
->sk_family
);
5058 /* Control variables for partial data delivery. */
5059 atomic_set(&sp
->pd_mode
, 0);
5060 skb_queue_head_init(&sp
->pd_lobby
);
5061 sp
->frag_interleave
= 0;
5063 /* Create a per socket endpoint structure. Even if we
5064 * change the data structure relationships, this may still
5065 * be useful for storing pre-connect address information.
5067 sp
->ep
= sctp_endpoint_new(sk
, GFP_KERNEL
);
5073 sk
->sk_destruct
= sctp_destruct_sock
;
5075 SCTP_DBG_OBJCNT_INC(sock
);
5078 sk_sockets_allocated_inc(sk
);
5079 sock_prot_inuse_add(net
, sk
->sk_prot
, 1);
5081 /* Nothing can fail after this block, otherwise
5082 * sctp_destroy_sock() will be called without addr_wq_lock held
5084 if (net
->sctp
.default_auto_asconf
) {
5085 spin_lock(&sock_net(sk
)->sctp
.addr_wq_lock
);
5086 list_add_tail(&sp
->auto_asconf_list
,
5087 &net
->sctp
.auto_asconf_splist
);
5088 sp
->do_auto_asconf
= 1;
5089 spin_unlock(&sock_net(sk
)->sctp
.addr_wq_lock
);
5091 sp
->do_auto_asconf
= 0;
5099 /* Cleanup any SCTP per socket resources. Must be called with
5100 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
5102 static void sctp_destroy_sock(struct sock
*sk
)
5104 struct sctp_sock
*sp
;
5106 pr_debug("%s: sk:%p\n", __func__
, sk
);
5108 /* Release our hold on the endpoint. */
5110 /* This could happen during socket init, thus we bail out
5111 * early, since the rest of the below is not setup either.
5116 if (sp
->do_auto_asconf
) {
5117 sp
->do_auto_asconf
= 0;
5118 list_del(&sp
->auto_asconf_list
);
5120 sctp_endpoint_free(sp
->ep
);
5122 sk_sockets_allocated_dec(sk
);
5123 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
5127 /* Triggered when there are no references on the socket anymore */
5128 static void sctp_destruct_sock(struct sock
*sk
)
5130 struct sctp_sock
*sp
= sctp_sk(sk
);
5132 /* Free up the HMAC transform. */
5133 crypto_free_shash(sp
->hmac
);
5135 inet_sock_destruct(sk
);
5138 /* API 4.1.7 shutdown() - TCP Style Syntax
5139 * int shutdown(int socket, int how);
5141 * sd - the socket descriptor of the association to be closed.
5142 * how - Specifies the type of shutdown. The values are
5145 * Disables further receive operations. No SCTP
5146 * protocol action is taken.
5148 * Disables further send operations, and initiates
5149 * the SCTP shutdown sequence.
5151 * Disables further send and receive operations
5152 * and initiates the SCTP shutdown sequence.
5154 static void sctp_shutdown(struct sock
*sk
, int how
)
5156 struct net
*net
= sock_net(sk
);
5157 struct sctp_endpoint
*ep
;
5159 if (!sctp_style(sk
, TCP
))
5162 ep
= sctp_sk(sk
)->ep
;
5163 if (how
& SEND_SHUTDOWN
&& !list_empty(&ep
->asocs
)) {
5164 struct sctp_association
*asoc
;
5166 inet_sk_set_state(sk
, SCTP_SS_CLOSING
);
5167 asoc
= list_entry(ep
->asocs
.next
,
5168 struct sctp_association
, asocs
);
5169 sctp_primitive_SHUTDOWN(net
, asoc
, NULL
);
5173 int sctp_get_sctp_info(struct sock
*sk
, struct sctp_association
*asoc
,
5174 struct sctp_info
*info
)
5176 struct sctp_transport
*prim
;
5177 struct list_head
*pos
;
5180 memset(info
, 0, sizeof(*info
));
5182 struct sctp_sock
*sp
= sctp_sk(sk
);
5184 info
->sctpi_s_autoclose
= sp
->autoclose
;
5185 info
->sctpi_s_adaptation_ind
= sp
->adaptation_ind
;
5186 info
->sctpi_s_pd_point
= sp
->pd_point
;
5187 info
->sctpi_s_nodelay
= sp
->nodelay
;
5188 info
->sctpi_s_disable_fragments
= sp
->disable_fragments
;
5189 info
->sctpi_s_v4mapped
= sp
->v4mapped
;
5190 info
->sctpi_s_frag_interleave
= sp
->frag_interleave
;
5191 info
->sctpi_s_type
= sp
->type
;
5196 info
->sctpi_tag
= asoc
->c
.my_vtag
;
5197 info
->sctpi_state
= asoc
->state
;
5198 info
->sctpi_rwnd
= asoc
->a_rwnd
;
5199 info
->sctpi_unackdata
= asoc
->unack_data
;
5200 info
->sctpi_penddata
= sctp_tsnmap_pending(&asoc
->peer
.tsn_map
);
5201 info
->sctpi_instrms
= asoc
->stream
.incnt
;
5202 info
->sctpi_outstrms
= asoc
->stream
.outcnt
;
5203 list_for_each(pos
, &asoc
->base
.inqueue
.in_chunk_list
)
5204 info
->sctpi_inqueue
++;
5205 list_for_each(pos
, &asoc
->outqueue
.out_chunk_list
)
5206 info
->sctpi_outqueue
++;
5207 info
->sctpi_overall_error
= asoc
->overall_error_count
;
5208 info
->sctpi_max_burst
= asoc
->max_burst
;
5209 info
->sctpi_maxseg
= asoc
->frag_point
;
5210 info
->sctpi_peer_rwnd
= asoc
->peer
.rwnd
;
5211 info
->sctpi_peer_tag
= asoc
->c
.peer_vtag
;
5213 mask
= asoc
->peer
.ecn_capable
<< 1;
5214 mask
= (mask
| asoc
->peer
.ipv4_address
) << 1;
5215 mask
= (mask
| asoc
->peer
.ipv6_address
) << 1;
5216 mask
= (mask
| asoc
->peer
.hostname_address
) << 1;
5217 mask
= (mask
| asoc
->peer
.asconf_capable
) << 1;
5218 mask
= (mask
| asoc
->peer
.prsctp_capable
) << 1;
5219 mask
= (mask
| asoc
->peer
.auth_capable
);
5220 info
->sctpi_peer_capable
= mask
;
5221 mask
= asoc
->peer
.sack_needed
<< 1;
5222 mask
= (mask
| asoc
->peer
.sack_generation
) << 1;
5223 mask
= (mask
| asoc
->peer
.zero_window_announced
);
5224 info
->sctpi_peer_sack
= mask
;
5226 info
->sctpi_isacks
= asoc
->stats
.isacks
;
5227 info
->sctpi_osacks
= asoc
->stats
.osacks
;
5228 info
->sctpi_opackets
= asoc
->stats
.opackets
;
5229 info
->sctpi_ipackets
= asoc
->stats
.ipackets
;
5230 info
->sctpi_rtxchunks
= asoc
->stats
.rtxchunks
;
5231 info
->sctpi_outofseqtsns
= asoc
->stats
.outofseqtsns
;
5232 info
->sctpi_idupchunks
= asoc
->stats
.idupchunks
;
5233 info
->sctpi_gapcnt
= asoc
->stats
.gapcnt
;
5234 info
->sctpi_ouodchunks
= asoc
->stats
.ouodchunks
;
5235 info
->sctpi_iuodchunks
= asoc
->stats
.iuodchunks
;
5236 info
->sctpi_oodchunks
= asoc
->stats
.oodchunks
;
5237 info
->sctpi_iodchunks
= asoc
->stats
.iodchunks
;
5238 info
->sctpi_octrlchunks
= asoc
->stats
.octrlchunks
;
5239 info
->sctpi_ictrlchunks
= asoc
->stats
.ictrlchunks
;
5241 prim
= asoc
->peer
.primary_path
;
5242 memcpy(&info
->sctpi_p_address
, &prim
->ipaddr
, sizeof(prim
->ipaddr
));
5243 info
->sctpi_p_state
= prim
->state
;
5244 info
->sctpi_p_cwnd
= prim
->cwnd
;
5245 info
->sctpi_p_srtt
= prim
->srtt
;
5246 info
->sctpi_p_rto
= jiffies_to_msecs(prim
->rto
);
5247 info
->sctpi_p_hbinterval
= prim
->hbinterval
;
5248 info
->sctpi_p_pathmaxrxt
= prim
->pathmaxrxt
;
5249 info
->sctpi_p_sackdelay
= jiffies_to_msecs(prim
->sackdelay
);
5250 info
->sctpi_p_ssthresh
= prim
->ssthresh
;
5251 info
->sctpi_p_partial_bytes_acked
= prim
->partial_bytes_acked
;
5252 info
->sctpi_p_flight_size
= prim
->flight_size
;
5253 info
->sctpi_p_error
= prim
->error_count
;
5257 EXPORT_SYMBOL_GPL(sctp_get_sctp_info
);
5259 /* use callback to avoid exporting the core structure */
5260 void sctp_transport_walk_start(struct rhashtable_iter
*iter
)
5262 rhltable_walk_enter(&sctp_transport_hashtable
, iter
);
5264 rhashtable_walk_start(iter
);
5267 void sctp_transport_walk_stop(struct rhashtable_iter
*iter
)
5269 rhashtable_walk_stop(iter
);
5270 rhashtable_walk_exit(iter
);
5273 struct sctp_transport
*sctp_transport_get_next(struct net
*net
,
5274 struct rhashtable_iter
*iter
)
5276 struct sctp_transport
*t
;
5278 t
= rhashtable_walk_next(iter
);
5279 for (; t
; t
= rhashtable_walk_next(iter
)) {
5281 if (PTR_ERR(t
) == -EAGAIN
)
5286 if (!sctp_transport_hold(t
))
5289 if (net_eq(sock_net(t
->asoc
->base
.sk
), net
) &&
5290 t
->asoc
->peer
.primary_path
== t
)
5293 sctp_transport_put(t
);
5299 struct sctp_transport
*sctp_transport_get_idx(struct net
*net
,
5300 struct rhashtable_iter
*iter
,
5303 struct sctp_transport
*t
;
5306 return SEQ_START_TOKEN
;
5308 while ((t
= sctp_transport_get_next(net
, iter
)) && !IS_ERR(t
)) {
5311 sctp_transport_put(t
);
5317 int sctp_for_each_endpoint(int (*cb
)(struct sctp_endpoint
*, void *),
5321 struct sctp_ep_common
*epb
;
5322 struct sctp_hashbucket
*head
;
5324 for (head
= sctp_ep_hashtable
; hash
< sctp_ep_hashsize
;
5326 read_lock_bh(&head
->lock
);
5327 sctp_for_each_hentry(epb
, &head
->chain
) {
5328 err
= cb(sctp_ep(epb
), p
);
5332 read_unlock_bh(&head
->lock
);
5337 EXPORT_SYMBOL_GPL(sctp_for_each_endpoint
);
5339 int sctp_transport_lookup_process(int (*cb
)(struct sctp_transport
*, void *),
5341 const union sctp_addr
*laddr
,
5342 const union sctp_addr
*paddr
, void *p
)
5344 struct sctp_transport
*transport
;
5348 transport
= sctp_addrs_lookup_transport(net
, laddr
, paddr
);
5353 err
= cb(transport
, p
);
5354 sctp_transport_put(transport
);
5358 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process
);
5360 int sctp_for_each_transport(int (*cb
)(struct sctp_transport
*, void *),
5361 int (*cb_done
)(struct sctp_transport
*, void *),
5362 struct net
*net
, int *pos
, void *p
) {
5363 struct rhashtable_iter hti
;
5364 struct sctp_transport
*tsp
;
5369 sctp_transport_walk_start(&hti
);
5371 tsp
= sctp_transport_get_idx(net
, &hti
, *pos
+ 1);
5372 for (; !IS_ERR_OR_NULL(tsp
); tsp
= sctp_transport_get_next(net
, &hti
)) {
5377 sctp_transport_put(tsp
);
5379 sctp_transport_walk_stop(&hti
);
5382 if (cb_done
&& !cb_done(tsp
, p
)) {
5384 sctp_transport_put(tsp
);
5387 sctp_transport_put(tsp
);
5392 EXPORT_SYMBOL_GPL(sctp_for_each_transport
);
5394 /* 7.2.1 Association Status (SCTP_STATUS)
5396 * Applications can retrieve current status information about an
5397 * association, including association state, peer receiver window size,
5398 * number of unacked data chunks, and number of data chunks pending
5399 * receipt. This information is read-only.
5401 static int sctp_getsockopt_sctp_status(struct sock
*sk
, int len
,
5402 char __user
*optval
,
5405 struct sctp_status status
;
5406 struct sctp_association
*asoc
= NULL
;
5407 struct sctp_transport
*transport
;
5408 sctp_assoc_t associd
;
5411 if (len
< sizeof(status
)) {
5416 len
= sizeof(status
);
5417 if (copy_from_user(&status
, optval
, len
)) {
5422 associd
= status
.sstat_assoc_id
;
5423 asoc
= sctp_id2assoc(sk
, associd
);
5429 transport
= asoc
->peer
.primary_path
;
5431 status
.sstat_assoc_id
= sctp_assoc2id(asoc
);
5432 status
.sstat_state
= sctp_assoc_to_state(asoc
);
5433 status
.sstat_rwnd
= asoc
->peer
.rwnd
;
5434 status
.sstat_unackdata
= asoc
->unack_data
;
5436 status
.sstat_penddata
= sctp_tsnmap_pending(&asoc
->peer
.tsn_map
);
5437 status
.sstat_instrms
= asoc
->stream
.incnt
;
5438 status
.sstat_outstrms
= asoc
->stream
.outcnt
;
5439 status
.sstat_fragmentation_point
= asoc
->frag_point
;
5440 status
.sstat_primary
.spinfo_assoc_id
= sctp_assoc2id(transport
->asoc
);
5441 memcpy(&status
.sstat_primary
.spinfo_address
, &transport
->ipaddr
,
5442 transport
->af_specific
->sockaddr_len
);
5443 /* Map ipv4 address into v4-mapped-on-v6 address. */
5444 sctp_get_pf_specific(sk
->sk_family
)->addr_to_user(sctp_sk(sk
),
5445 (union sctp_addr
*)&status
.sstat_primary
.spinfo_address
);
5446 status
.sstat_primary
.spinfo_state
= transport
->state
;
5447 status
.sstat_primary
.spinfo_cwnd
= transport
->cwnd
;
5448 status
.sstat_primary
.spinfo_srtt
= transport
->srtt
;
5449 status
.sstat_primary
.spinfo_rto
= jiffies_to_msecs(transport
->rto
);
5450 status
.sstat_primary
.spinfo_mtu
= transport
->pathmtu
;
5452 if (status
.sstat_primary
.spinfo_state
== SCTP_UNKNOWN
)
5453 status
.sstat_primary
.spinfo_state
= SCTP_ACTIVE
;
5455 if (put_user(len
, optlen
)) {
5460 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
5461 __func__
, len
, status
.sstat_state
, status
.sstat_rwnd
,
5462 status
.sstat_assoc_id
);
5464 if (copy_to_user(optval
, &status
, len
)) {
5474 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
5476 * Applications can retrieve information about a specific peer address
5477 * of an association, including its reachability state, congestion
5478 * window, and retransmission timer values. This information is
5481 static int sctp_getsockopt_peer_addr_info(struct sock
*sk
, int len
,
5482 char __user
*optval
,
5485 struct sctp_paddrinfo pinfo
;
5486 struct sctp_transport
*transport
;
5489 if (len
< sizeof(pinfo
)) {
5494 len
= sizeof(pinfo
);
5495 if (copy_from_user(&pinfo
, optval
, len
)) {
5500 transport
= sctp_addr_id2transport(sk
, &pinfo
.spinfo_address
,
5501 pinfo
.spinfo_assoc_id
);
5505 pinfo
.spinfo_assoc_id
= sctp_assoc2id(transport
->asoc
);
5506 pinfo
.spinfo_state
= transport
->state
;
5507 pinfo
.spinfo_cwnd
= transport
->cwnd
;
5508 pinfo
.spinfo_srtt
= transport
->srtt
;
5509 pinfo
.spinfo_rto
= jiffies_to_msecs(transport
->rto
);
5510 pinfo
.spinfo_mtu
= transport
->pathmtu
;
5512 if (pinfo
.spinfo_state
== SCTP_UNKNOWN
)
5513 pinfo
.spinfo_state
= SCTP_ACTIVE
;
5515 if (put_user(len
, optlen
)) {
5520 if (copy_to_user(optval
, &pinfo
, len
)) {
5529 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
5531 * This option is a on/off flag. If enabled no SCTP message
5532 * fragmentation will be performed. Instead if a message being sent
5533 * exceeds the current PMTU size, the message will NOT be sent and
5534 * instead a error will be indicated to the user.
5536 static int sctp_getsockopt_disable_fragments(struct sock
*sk
, int len
,
5537 char __user
*optval
, int __user
*optlen
)
5541 if (len
< sizeof(int))
5545 val
= (sctp_sk(sk
)->disable_fragments
== 1);
5546 if (put_user(len
, optlen
))
5548 if (copy_to_user(optval
, &val
, len
))
5553 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
5555 * This socket option is used to specify various notifications and
5556 * ancillary data the user wishes to receive.
5558 static int sctp_getsockopt_events(struct sock
*sk
, int len
, char __user
*optval
,
5561 struct sctp_event_subscribe subscribe
;
5562 __u8
*sn_type
= (__u8
*)&subscribe
;
5567 if (len
> sizeof(struct sctp_event_subscribe
))
5568 len
= sizeof(struct sctp_event_subscribe
);
5569 if (put_user(len
, optlen
))
5572 for (i
= 0; i
< len
; i
++)
5573 sn_type
[i
] = sctp_ulpevent_type_enabled(sctp_sk(sk
)->subscribe
,
5574 SCTP_SN_TYPE_BASE
+ i
);
5576 if (copy_to_user(optval
, &subscribe
, len
))
5582 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
5584 * This socket option is applicable to the UDP-style socket only. When
5585 * set it will cause associations that are idle for more than the
5586 * specified number of seconds to automatically close. An association
5587 * being idle is defined an association that has NOT sent or received
5588 * user data. The special value of '0' indicates that no automatic
5589 * close of any associations should be performed. The option expects an
5590 * integer defining the number of seconds of idle time before an
5591 * association is closed.
5593 static int sctp_getsockopt_autoclose(struct sock
*sk
, int len
, char __user
*optval
, int __user
*optlen
)
5595 /* Applicable to UDP-style socket only */
5596 if (sctp_style(sk
, TCP
))
5598 if (len
< sizeof(int))
5601 if (put_user(len
, optlen
))
5603 if (put_user(sctp_sk(sk
)->autoclose
, (int __user
*)optval
))
5608 /* Helper routine to branch off an association to a new socket. */
5609 int sctp_do_peeloff(struct sock
*sk
, sctp_assoc_t id
, struct socket
**sockp
)
5611 struct sctp_association
*asoc
= sctp_id2assoc(sk
, id
);
5612 struct sctp_sock
*sp
= sctp_sk(sk
);
5613 struct socket
*sock
;
5616 /* Do not peel off from one netns to another one. */
5617 if (!net_eq(current
->nsproxy
->net_ns
, sock_net(sk
)))
5623 /* An association cannot be branched off from an already peeled-off
5624 * socket, nor is this supported for tcp style sockets.
5626 if (!sctp_style(sk
, UDP
))
5629 /* Create a new socket. */
5630 err
= sock_create(sk
->sk_family
, SOCK_SEQPACKET
, IPPROTO_SCTP
, &sock
);
5634 sctp_copy_sock(sock
->sk
, sk
, asoc
);
5636 /* Make peeled-off sockets more like 1-1 accepted sockets.
5637 * Set the daddr and initialize id to something more random and also
5638 * copy over any ip options.
5640 sp
->pf
->to_sk_daddr(&asoc
->peer
.primary_addr
, sk
);
5641 sp
->pf
->copy_ip_options(sk
, sock
->sk
);
5643 /* Populate the fields of the newsk from the oldsk and migrate the
5644 * asoc to the newsk.
5646 err
= sctp_sock_migrate(sk
, sock
->sk
, asoc
,
5647 SCTP_SOCKET_UDP_HIGH_BANDWIDTH
);
5657 EXPORT_SYMBOL(sctp_do_peeloff
);
5659 static int sctp_getsockopt_peeloff_common(struct sock
*sk
, sctp_peeloff_arg_t
*peeloff
,
5660 struct file
**newfile
, unsigned flags
)
5662 struct socket
*newsock
;
5665 retval
= sctp_do_peeloff(sk
, peeloff
->associd
, &newsock
);
5669 /* Map the socket to an unused fd that can be returned to the user. */
5670 retval
= get_unused_fd_flags(flags
& SOCK_CLOEXEC
);
5672 sock_release(newsock
);
5676 *newfile
= sock_alloc_file(newsock
, 0, NULL
);
5677 if (IS_ERR(*newfile
)) {
5678 put_unused_fd(retval
);
5679 retval
= PTR_ERR(*newfile
);
5684 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__
, sk
, newsock
->sk
,
5687 peeloff
->sd
= retval
;
5689 if (flags
& SOCK_NONBLOCK
)
5690 (*newfile
)->f_flags
|= O_NONBLOCK
;
5695 static int sctp_getsockopt_peeloff(struct sock
*sk
, int len
, char __user
*optval
, int __user
*optlen
)
5697 sctp_peeloff_arg_t peeloff
;
5698 struct file
*newfile
= NULL
;
5701 if (len
< sizeof(sctp_peeloff_arg_t
))
5703 len
= sizeof(sctp_peeloff_arg_t
);
5704 if (copy_from_user(&peeloff
, optval
, len
))
5707 retval
= sctp_getsockopt_peeloff_common(sk
, &peeloff
, &newfile
, 0);
5711 /* Return the fd mapped to the new socket. */
5712 if (put_user(len
, optlen
)) {
5714 put_unused_fd(retval
);
5718 if (copy_to_user(optval
, &peeloff
, len
)) {
5720 put_unused_fd(retval
);
5723 fd_install(retval
, newfile
);
5728 static int sctp_getsockopt_peeloff_flags(struct sock
*sk
, int len
,
5729 char __user
*optval
, int __user
*optlen
)
5731 sctp_peeloff_flags_arg_t peeloff
;
5732 struct file
*newfile
= NULL
;
5735 if (len
< sizeof(sctp_peeloff_flags_arg_t
))
5737 len
= sizeof(sctp_peeloff_flags_arg_t
);
5738 if (copy_from_user(&peeloff
, optval
, len
))
5741 retval
= sctp_getsockopt_peeloff_common(sk
, &peeloff
.p_arg
,
5742 &newfile
, peeloff
.flags
);
5746 /* Return the fd mapped to the new socket. */
5747 if (put_user(len
, optlen
)) {
5749 put_unused_fd(retval
);
5753 if (copy_to_user(optval
, &peeloff
, len
)) {
5755 put_unused_fd(retval
);
5758 fd_install(retval
, newfile
);
5763 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
5765 * Applications can enable or disable heartbeats for any peer address of
5766 * an association, modify an address's heartbeat interval, force a
5767 * heartbeat to be sent immediately, and adjust the address's maximum
5768 * number of retransmissions sent before an address is considered
5769 * unreachable. The following structure is used to access and modify an
5770 * address's parameters:
5772 * struct sctp_paddrparams {
5773 * sctp_assoc_t spp_assoc_id;
5774 * struct sockaddr_storage spp_address;
5775 * uint32_t spp_hbinterval;
5776 * uint16_t spp_pathmaxrxt;
5777 * uint32_t spp_pathmtu;
5778 * uint32_t spp_sackdelay;
5779 * uint32_t spp_flags;
5782 * spp_assoc_id - (one-to-many style socket) This is filled in the
5783 * application, and identifies the association for
5785 * spp_address - This specifies which address is of interest.
5786 * spp_hbinterval - This contains the value of the heartbeat interval,
5787 * in milliseconds. If a value of zero
5788 * is present in this field then no changes are to
5789 * be made to this parameter.
5790 * spp_pathmaxrxt - This contains the maximum number of
5791 * retransmissions before this address shall be
5792 * considered unreachable. If a value of zero
5793 * is present in this field then no changes are to
5794 * be made to this parameter.
5795 * spp_pathmtu - When Path MTU discovery is disabled the value
5796 * specified here will be the "fixed" path mtu.
5797 * Note that if the spp_address field is empty
5798 * then all associations on this address will
5799 * have this fixed path mtu set upon them.
5801 * spp_sackdelay - When delayed sack is enabled, this value specifies
5802 * the number of milliseconds that sacks will be delayed
5803 * for. This value will apply to all addresses of an
5804 * association if the spp_address field is empty. Note
5805 * also, that if delayed sack is enabled and this
5806 * value is set to 0, no change is made to the last
5807 * recorded delayed sack timer value.
5809 * spp_flags - These flags are used to control various features
5810 * on an association. The flag field may contain
5811 * zero or more of the following options.
5813 * SPP_HB_ENABLE - Enable heartbeats on the
5814 * specified address. Note that if the address
5815 * field is empty all addresses for the association
5816 * have heartbeats enabled upon them.
5818 * SPP_HB_DISABLE - Disable heartbeats on the
5819 * speicifed address. Note that if the address
5820 * field is empty all addresses for the association
5821 * will have their heartbeats disabled. Note also
5822 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
5823 * mutually exclusive, only one of these two should
5824 * be specified. Enabling both fields will have
5825 * undetermined results.
5827 * SPP_HB_DEMAND - Request a user initiated heartbeat
5828 * to be made immediately.
5830 * SPP_PMTUD_ENABLE - This field will enable PMTU
5831 * discovery upon the specified address. Note that
5832 * if the address feild is empty then all addresses
5833 * on the association are effected.
5835 * SPP_PMTUD_DISABLE - This field will disable PMTU
5836 * discovery upon the specified address. Note that
5837 * if the address feild is empty then all addresses
5838 * on the association are effected. Not also that
5839 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
5840 * exclusive. Enabling both will have undetermined
5843 * SPP_SACKDELAY_ENABLE - Setting this flag turns
5844 * on delayed sack. The time specified in spp_sackdelay
5845 * is used to specify the sack delay for this address. Note
5846 * that if spp_address is empty then all addresses will
5847 * enable delayed sack and take on the sack delay
5848 * value specified in spp_sackdelay.
5849 * SPP_SACKDELAY_DISABLE - Setting this flag turns
5850 * off delayed sack. If the spp_address field is blank then
5851 * delayed sack is disabled for the entire association. Note
5852 * also that this field is mutually exclusive to
5853 * SPP_SACKDELAY_ENABLE, setting both will have undefined
5856 * SPP_IPV6_FLOWLABEL: Setting this flag enables the
5857 * setting of the IPV6 flow label value. The value is
5858 * contained in the spp_ipv6_flowlabel field.
5859 * Upon retrieval, this flag will be set to indicate that
5860 * the spp_ipv6_flowlabel field has a valid value returned.
5861 * If a specific destination address is set (in the
5862 * spp_address field), then the value returned is that of
5863 * the address. If just an association is specified (and
5864 * no address), then the association's default flow label
5865 * is returned. If neither an association nor a destination
5866 * is specified, then the socket's default flow label is
5867 * returned. For non-IPv6 sockets, this flag will be left
5870 * SPP_DSCP: Setting this flag enables the setting of the
5871 * Differentiated Services Code Point (DSCP) value
5872 * associated with either the association or a specific
5873 * address. The value is obtained in the spp_dscp field.
5874 * Upon retrieval, this flag will be set to indicate that
5875 * the spp_dscp field has a valid value returned. If a
5876 * specific destination address is set when called (in the
5877 * spp_address field), then that specific destination
5878 * address's DSCP value is returned. If just an association
5879 * is specified, then the association's default DSCP is
5880 * returned. If neither an association nor a destination is
5881 * specified, then the socket's default DSCP is returned.
5883 * spp_ipv6_flowlabel
5884 * - This field is used in conjunction with the
5885 * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
5886 * The 20 least significant bits are used for the flow
5887 * label. This setting has precedence over any IPv6-layer
5890 * spp_dscp - This field is used in conjunction with the SPP_DSCP flag
5891 * and contains the DSCP. The 6 most significant bits are
5892 * used for the DSCP. This setting has precedence over any
5893 * IPv4- or IPv6- layer setting.
5895 static int sctp_getsockopt_peer_addr_params(struct sock
*sk
, int len
,
5896 char __user
*optval
, int __user
*optlen
)
5898 struct sctp_paddrparams params
;
5899 struct sctp_transport
*trans
= NULL
;
5900 struct sctp_association
*asoc
= NULL
;
5901 struct sctp_sock
*sp
= sctp_sk(sk
);
5903 if (len
>= sizeof(params
))
5904 len
= sizeof(params
);
5905 else if (len
>= ALIGN(offsetof(struct sctp_paddrparams
,
5906 spp_ipv6_flowlabel
), 4))
5907 len
= ALIGN(offsetof(struct sctp_paddrparams
,
5908 spp_ipv6_flowlabel
), 4);
5912 if (copy_from_user(¶ms
, optval
, len
))
5915 /* If an address other than INADDR_ANY is specified, and
5916 * no transport is found, then the request is invalid.
5918 if (!sctp_is_any(sk
, (union sctp_addr
*)¶ms
.spp_address
)) {
5919 trans
= sctp_addr_id2transport(sk
, ¶ms
.spp_address
,
5920 params
.spp_assoc_id
);
5922 pr_debug("%s: failed no transport\n", __func__
);
5927 /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
5928 * socket is a one to many style socket, and an association
5929 * was not found, then the id was invalid.
5931 asoc
= sctp_id2assoc(sk
, params
.spp_assoc_id
);
5932 if (!asoc
&& params
.spp_assoc_id
!= SCTP_FUTURE_ASSOC
&&
5933 sctp_style(sk
, UDP
)) {
5934 pr_debug("%s: failed no association\n", __func__
);
5939 /* Fetch transport values. */
5940 params
.spp_hbinterval
= jiffies_to_msecs(trans
->hbinterval
);
5941 params
.spp_pathmtu
= trans
->pathmtu
;
5942 params
.spp_pathmaxrxt
= trans
->pathmaxrxt
;
5943 params
.spp_sackdelay
= jiffies_to_msecs(trans
->sackdelay
);
5945 /*draft-11 doesn't say what to return in spp_flags*/
5946 params
.spp_flags
= trans
->param_flags
;
5947 if (trans
->flowlabel
& SCTP_FLOWLABEL_SET_MASK
) {
5948 params
.spp_ipv6_flowlabel
= trans
->flowlabel
&
5949 SCTP_FLOWLABEL_VAL_MASK
;
5950 params
.spp_flags
|= SPP_IPV6_FLOWLABEL
;
5952 if (trans
->dscp
& SCTP_DSCP_SET_MASK
) {
5953 params
.spp_dscp
= trans
->dscp
& SCTP_DSCP_VAL_MASK
;
5954 params
.spp_flags
|= SPP_DSCP
;
5957 /* Fetch association values. */
5958 params
.spp_hbinterval
= jiffies_to_msecs(asoc
->hbinterval
);
5959 params
.spp_pathmtu
= asoc
->pathmtu
;
5960 params
.spp_pathmaxrxt
= asoc
->pathmaxrxt
;
5961 params
.spp_sackdelay
= jiffies_to_msecs(asoc
->sackdelay
);
5963 /*draft-11 doesn't say what to return in spp_flags*/
5964 params
.spp_flags
= asoc
->param_flags
;
5965 if (asoc
->flowlabel
& SCTP_FLOWLABEL_SET_MASK
) {
5966 params
.spp_ipv6_flowlabel
= asoc
->flowlabel
&
5967 SCTP_FLOWLABEL_VAL_MASK
;
5968 params
.spp_flags
|= SPP_IPV6_FLOWLABEL
;
5970 if (asoc
->dscp
& SCTP_DSCP_SET_MASK
) {
5971 params
.spp_dscp
= asoc
->dscp
& SCTP_DSCP_VAL_MASK
;
5972 params
.spp_flags
|= SPP_DSCP
;
5975 /* Fetch socket values. */
5976 params
.spp_hbinterval
= sp
->hbinterval
;
5977 params
.spp_pathmtu
= sp
->pathmtu
;
5978 params
.spp_sackdelay
= sp
->sackdelay
;
5979 params
.spp_pathmaxrxt
= sp
->pathmaxrxt
;
5981 /*draft-11 doesn't say what to return in spp_flags*/
5982 params
.spp_flags
= sp
->param_flags
;
5983 if (sp
->flowlabel
& SCTP_FLOWLABEL_SET_MASK
) {
5984 params
.spp_ipv6_flowlabel
= sp
->flowlabel
&
5985 SCTP_FLOWLABEL_VAL_MASK
;
5986 params
.spp_flags
|= SPP_IPV6_FLOWLABEL
;
5988 if (sp
->dscp
& SCTP_DSCP_SET_MASK
) {
5989 params
.spp_dscp
= sp
->dscp
& SCTP_DSCP_VAL_MASK
;
5990 params
.spp_flags
|= SPP_DSCP
;
5994 if (copy_to_user(optval
, ¶ms
, len
))
5997 if (put_user(len
, optlen
))
6004 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
6006 * This option will effect the way delayed acks are performed. This
6007 * option allows you to get or set the delayed ack time, in
6008 * milliseconds. It also allows changing the delayed ack frequency.
6009 * Changing the frequency to 1 disables the delayed sack algorithm. If
6010 * the assoc_id is 0, then this sets or gets the endpoints default
6011 * values. If the assoc_id field is non-zero, then the set or get
6012 * effects the specified association for the one to many model (the
6013 * assoc_id field is ignored by the one to one model). Note that if
6014 * sack_delay or sack_freq are 0 when setting this option, then the
6015 * current values will remain unchanged.
6017 * struct sctp_sack_info {
6018 * sctp_assoc_t sack_assoc_id;
6019 * uint32_t sack_delay;
6020 * uint32_t sack_freq;
6023 * sack_assoc_id - This parameter, indicates which association the user
6024 * is performing an action upon. Note that if this field's value is
6025 * zero then the endpoints default value is changed (effecting future
6026 * associations only).
6028 * sack_delay - This parameter contains the number of milliseconds that
6029 * the user is requesting the delayed ACK timer be set to. Note that
6030 * this value is defined in the standard to be between 200 and 500
6033 * sack_freq - This parameter contains the number of packets that must
6034 * be received before a sack is sent without waiting for the delay
6035 * timer to expire. The default value for this is 2, setting this
6036 * value to 1 will disable the delayed sack algorithm.
6038 static int sctp_getsockopt_delayed_ack(struct sock
*sk
, int len
,
6039 char __user
*optval
,
6042 struct sctp_sack_info params
;
6043 struct sctp_association
*asoc
= NULL
;
6044 struct sctp_sock
*sp
= sctp_sk(sk
);
6046 if (len
>= sizeof(struct sctp_sack_info
)) {
6047 len
= sizeof(struct sctp_sack_info
);
6049 if (copy_from_user(¶ms
, optval
, len
))
6051 } else if (len
== sizeof(struct sctp_assoc_value
)) {
6052 pr_warn_ratelimited(DEPRECATED
6054 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
6055 "Use struct sctp_sack_info instead\n",
6056 current
->comm
, task_pid_nr(current
));
6057 if (copy_from_user(¶ms
, optval
, len
))
6062 /* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
6063 * socket is a one to many style socket, and an association
6064 * was not found, then the id was invalid.
6066 asoc
= sctp_id2assoc(sk
, params
.sack_assoc_id
);
6067 if (!asoc
&& params
.sack_assoc_id
!= SCTP_FUTURE_ASSOC
&&
6068 sctp_style(sk
, UDP
))
6072 /* Fetch association values. */
6073 if (asoc
->param_flags
& SPP_SACKDELAY_ENABLE
) {
6074 params
.sack_delay
= jiffies_to_msecs(asoc
->sackdelay
);
6075 params
.sack_freq
= asoc
->sackfreq
;
6078 params
.sack_delay
= 0;
6079 params
.sack_freq
= 1;
6082 /* Fetch socket values. */
6083 if (sp
->param_flags
& SPP_SACKDELAY_ENABLE
) {
6084 params
.sack_delay
= sp
->sackdelay
;
6085 params
.sack_freq
= sp
->sackfreq
;
6087 params
.sack_delay
= 0;
6088 params
.sack_freq
= 1;
6092 if (copy_to_user(optval
, ¶ms
, len
))
6095 if (put_user(len
, optlen
))
6101 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
6103 * Applications can specify protocol parameters for the default association
6104 * initialization. The option name argument to setsockopt() and getsockopt()
6107 * Setting initialization parameters is effective only on an unconnected
6108 * socket (for UDP-style sockets only future associations are effected
6109 * by the change). With TCP-style sockets, this option is inherited by
6110 * sockets derived from a listener socket.
6112 static int sctp_getsockopt_initmsg(struct sock
*sk
, int len
, char __user
*optval
, int __user
*optlen
)
6114 if (len
< sizeof(struct sctp_initmsg
))
6116 len
= sizeof(struct sctp_initmsg
);
6117 if (put_user(len
, optlen
))
6119 if (copy_to_user(optval
, &sctp_sk(sk
)->initmsg
, len
))
6125 static int sctp_getsockopt_peer_addrs(struct sock
*sk
, int len
,
6126 char __user
*optval
, int __user
*optlen
)
6128 struct sctp_association
*asoc
;
6130 struct sctp_getaddrs getaddrs
;
6131 struct sctp_transport
*from
;
6133 union sctp_addr temp
;
6134 struct sctp_sock
*sp
= sctp_sk(sk
);
6139 if (len
< sizeof(struct sctp_getaddrs
))
6142 if (copy_from_user(&getaddrs
, optval
, sizeof(struct sctp_getaddrs
)))
6145 /* For UDP-style sockets, id specifies the association to query. */
6146 asoc
= sctp_id2assoc(sk
, getaddrs
.assoc_id
);
6150 to
= optval
+ offsetof(struct sctp_getaddrs
, addrs
);
6151 space_left
= len
- offsetof(struct sctp_getaddrs
, addrs
);
6153 list_for_each_entry(from
, &asoc
->peer
.transport_addr_list
,
6155 memcpy(&temp
, &from
->ipaddr
, sizeof(temp
));
6156 addrlen
= sctp_get_pf_specific(sk
->sk_family
)
6157 ->addr_to_user(sp
, &temp
);
6158 if (space_left
< addrlen
)
6160 if (copy_to_user(to
, &temp
, addrlen
))
6164 space_left
-= addrlen
;
6167 if (put_user(cnt
, &((struct sctp_getaddrs __user
*)optval
)->addr_num
))
6169 bytes_copied
= ((char __user
*)to
) - optval
;
6170 if (put_user(bytes_copied
, optlen
))
6176 static int sctp_copy_laddrs(struct sock
*sk
, __u16 port
, void *to
,
6177 size_t space_left
, int *bytes_copied
)
6179 struct sctp_sockaddr_entry
*addr
;
6180 union sctp_addr temp
;
6183 struct net
*net
= sock_net(sk
);
6186 list_for_each_entry_rcu(addr
, &net
->sctp
.local_addr_list
, list
) {
6190 if ((PF_INET
== sk
->sk_family
) &&
6191 (AF_INET6
== addr
->a
.sa
.sa_family
))
6193 if ((PF_INET6
== sk
->sk_family
) &&
6194 inet_v6_ipv6only(sk
) &&
6195 (AF_INET
== addr
->a
.sa
.sa_family
))
6197 memcpy(&temp
, &addr
->a
, sizeof(temp
));
6198 if (!temp
.v4
.sin_port
)
6199 temp
.v4
.sin_port
= htons(port
);
6201 addrlen
= sctp_get_pf_specific(sk
->sk_family
)
6202 ->addr_to_user(sctp_sk(sk
), &temp
);
6204 if (space_left
< addrlen
) {
6208 memcpy(to
, &temp
, addrlen
);
6212 space_left
-= addrlen
;
6213 *bytes_copied
+= addrlen
;
6221 static int sctp_getsockopt_local_addrs(struct sock
*sk
, int len
,
6222 char __user
*optval
, int __user
*optlen
)
6224 struct sctp_bind_addr
*bp
;
6225 struct sctp_association
*asoc
;
6227 struct sctp_getaddrs getaddrs
;
6228 struct sctp_sockaddr_entry
*addr
;
6230 union sctp_addr temp
;
6231 struct sctp_sock
*sp
= sctp_sk(sk
);
6235 int bytes_copied
= 0;
6239 if (len
< sizeof(struct sctp_getaddrs
))
6242 if (copy_from_user(&getaddrs
, optval
, sizeof(struct sctp_getaddrs
)))
6246 * For UDP-style sockets, id specifies the association to query.
6247 * If the id field is set to the value '0' then the locally bound
6248 * addresses are returned without regard to any particular
6251 if (0 == getaddrs
.assoc_id
) {
6252 bp
= &sctp_sk(sk
)->ep
->base
.bind_addr
;
6254 asoc
= sctp_id2assoc(sk
, getaddrs
.assoc_id
);
6257 bp
= &asoc
->base
.bind_addr
;
6260 to
= optval
+ offsetof(struct sctp_getaddrs
, addrs
);
6261 space_left
= len
- offsetof(struct sctp_getaddrs
, addrs
);
6263 addrs
= kmalloc(space_left
, GFP_USER
| __GFP_NOWARN
);
6267 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
6268 * addresses from the global local address list.
6270 if (sctp_list_single_entry(&bp
->address_list
)) {
6271 addr
= list_entry(bp
->address_list
.next
,
6272 struct sctp_sockaddr_entry
, list
);
6273 if (sctp_is_any(sk
, &addr
->a
)) {
6274 cnt
= sctp_copy_laddrs(sk
, bp
->port
, addrs
,
6275 space_left
, &bytes_copied
);
6285 /* Protection on the bound address list is not needed since
6286 * in the socket option context we hold a socket lock and
6287 * thus the bound address list can't change.
6289 list_for_each_entry(addr
, &bp
->address_list
, list
) {
6290 memcpy(&temp
, &addr
->a
, sizeof(temp
));
6291 addrlen
= sctp_get_pf_specific(sk
->sk_family
)
6292 ->addr_to_user(sp
, &temp
);
6293 if (space_left
< addrlen
) {
6294 err
= -ENOMEM
; /*fixme: right error?*/
6297 memcpy(buf
, &temp
, addrlen
);
6299 bytes_copied
+= addrlen
;
6301 space_left
-= addrlen
;
6305 if (copy_to_user(to
, addrs
, bytes_copied
)) {
6309 if (put_user(cnt
, &((struct sctp_getaddrs __user
*)optval
)->addr_num
)) {
6313 /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
6314 * but we can't change it anymore.
6316 if (put_user(bytes_copied
, optlen
))
6323 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
6325 * Requests that the local SCTP stack use the enclosed peer address as
6326 * the association primary. The enclosed address must be one of the
6327 * association peer's addresses.
6329 static int sctp_getsockopt_primary_addr(struct sock
*sk
, int len
,
6330 char __user
*optval
, int __user
*optlen
)
6332 struct sctp_prim prim
;
6333 struct sctp_association
*asoc
;
6334 struct sctp_sock
*sp
= sctp_sk(sk
);
6336 if (len
< sizeof(struct sctp_prim
))
6339 len
= sizeof(struct sctp_prim
);
6341 if (copy_from_user(&prim
, optval
, len
))
6344 asoc
= sctp_id2assoc(sk
, prim
.ssp_assoc_id
);
6348 if (!asoc
->peer
.primary_path
)
6351 memcpy(&prim
.ssp_addr
, &asoc
->peer
.primary_path
->ipaddr
,
6352 asoc
->peer
.primary_path
->af_specific
->sockaddr_len
);
6354 sctp_get_pf_specific(sk
->sk_family
)->addr_to_user(sp
,
6355 (union sctp_addr
*)&prim
.ssp_addr
);
6357 if (put_user(len
, optlen
))
6359 if (copy_to_user(optval
, &prim
, len
))
6366 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
6368 * Requests that the local endpoint set the specified Adaptation Layer
6369 * Indication parameter for all future INIT and INIT-ACK exchanges.
6371 static int sctp_getsockopt_adaptation_layer(struct sock
*sk
, int len
,
6372 char __user
*optval
, int __user
*optlen
)
6374 struct sctp_setadaptation adaptation
;
6376 if (len
< sizeof(struct sctp_setadaptation
))
6379 len
= sizeof(struct sctp_setadaptation
);
6381 adaptation
.ssb_adaptation_ind
= sctp_sk(sk
)->adaptation_ind
;
6383 if (put_user(len
, optlen
))
6385 if (copy_to_user(optval
, &adaptation
, len
))
6393 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
6395 * Applications that wish to use the sendto() system call may wish to
6396 * specify a default set of parameters that would normally be supplied
6397 * through the inclusion of ancillary data. This socket option allows
6398 * such an application to set the default sctp_sndrcvinfo structure.
6401 * The application that wishes to use this socket option simply passes
6402 * in to this call the sctp_sndrcvinfo structure defined in Section
6403 * 5.2.2) The input parameters accepted by this call include
6404 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
6405 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
6406 * to this call if the caller is using the UDP model.
6408 * For getsockopt, it get the default sctp_sndrcvinfo structure.
6410 static int sctp_getsockopt_default_send_param(struct sock
*sk
,
6411 int len
, char __user
*optval
,
6414 struct sctp_sock
*sp
= sctp_sk(sk
);
6415 struct sctp_association
*asoc
;
6416 struct sctp_sndrcvinfo info
;
6418 if (len
< sizeof(info
))
6423 if (copy_from_user(&info
, optval
, len
))
6426 asoc
= sctp_id2assoc(sk
, info
.sinfo_assoc_id
);
6427 if (!asoc
&& info
.sinfo_assoc_id
!= SCTP_FUTURE_ASSOC
&&
6428 sctp_style(sk
, UDP
))
6432 info
.sinfo_stream
= asoc
->default_stream
;
6433 info
.sinfo_flags
= asoc
->default_flags
;
6434 info
.sinfo_ppid
= asoc
->default_ppid
;
6435 info
.sinfo_context
= asoc
->default_context
;
6436 info
.sinfo_timetolive
= asoc
->default_timetolive
;
6438 info
.sinfo_stream
= sp
->default_stream
;
6439 info
.sinfo_flags
= sp
->default_flags
;
6440 info
.sinfo_ppid
= sp
->default_ppid
;
6441 info
.sinfo_context
= sp
->default_context
;
6442 info
.sinfo_timetolive
= sp
->default_timetolive
;
6445 if (put_user(len
, optlen
))
6447 if (copy_to_user(optval
, &info
, len
))
6453 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
6454 * (SCTP_DEFAULT_SNDINFO)
6456 static int sctp_getsockopt_default_sndinfo(struct sock
*sk
, int len
,
6457 char __user
*optval
,
6460 struct sctp_sock
*sp
= sctp_sk(sk
);
6461 struct sctp_association
*asoc
;
6462 struct sctp_sndinfo info
;
6464 if (len
< sizeof(info
))
6469 if (copy_from_user(&info
, optval
, len
))
6472 asoc
= sctp_id2assoc(sk
, info
.snd_assoc_id
);
6473 if (!asoc
&& info
.snd_assoc_id
!= SCTP_FUTURE_ASSOC
&&
6474 sctp_style(sk
, UDP
))
6478 info
.snd_sid
= asoc
->default_stream
;
6479 info
.snd_flags
= asoc
->default_flags
;
6480 info
.snd_ppid
= asoc
->default_ppid
;
6481 info
.snd_context
= asoc
->default_context
;
6483 info
.snd_sid
= sp
->default_stream
;
6484 info
.snd_flags
= sp
->default_flags
;
6485 info
.snd_ppid
= sp
->default_ppid
;
6486 info
.snd_context
= sp
->default_context
;
6489 if (put_user(len
, optlen
))
6491 if (copy_to_user(optval
, &info
, len
))
6499 * 7.1.5 SCTP_NODELAY
6501 * Turn on/off any Nagle-like algorithm. This means that packets are
6502 * generally sent as soon as possible and no unnecessary delays are
6503 * introduced, at the cost of more packets in the network. Expects an
6504 * integer boolean flag.
6507 static int sctp_getsockopt_nodelay(struct sock
*sk
, int len
,
6508 char __user
*optval
, int __user
*optlen
)
6512 if (len
< sizeof(int))
6516 val
= (sctp_sk(sk
)->nodelay
== 1);
6517 if (put_user(len
, optlen
))
6519 if (copy_to_user(optval
, &val
, len
))
6526 * 7.1.1 SCTP_RTOINFO
6528 * The protocol parameters used to initialize and bound retransmission
6529 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
6530 * and modify these parameters.
6531 * All parameters are time values, in milliseconds. A value of 0, when
6532 * modifying the parameters, indicates that the current value should not
6536 static int sctp_getsockopt_rtoinfo(struct sock
*sk
, int len
,
6537 char __user
*optval
,
6538 int __user
*optlen
) {
6539 struct sctp_rtoinfo rtoinfo
;
6540 struct sctp_association
*asoc
;
6542 if (len
< sizeof (struct sctp_rtoinfo
))
6545 len
= sizeof(struct sctp_rtoinfo
);
6547 if (copy_from_user(&rtoinfo
, optval
, len
))
6550 asoc
= sctp_id2assoc(sk
, rtoinfo
.srto_assoc_id
);
6552 if (!asoc
&& rtoinfo
.srto_assoc_id
!= SCTP_FUTURE_ASSOC
&&
6553 sctp_style(sk
, UDP
))
6556 /* Values corresponding to the specific association. */
6558 rtoinfo
.srto_initial
= jiffies_to_msecs(asoc
->rto_initial
);
6559 rtoinfo
.srto_max
= jiffies_to_msecs(asoc
->rto_max
);
6560 rtoinfo
.srto_min
= jiffies_to_msecs(asoc
->rto_min
);
6562 /* Values corresponding to the endpoint. */
6563 struct sctp_sock
*sp
= sctp_sk(sk
);
6565 rtoinfo
.srto_initial
= sp
->rtoinfo
.srto_initial
;
6566 rtoinfo
.srto_max
= sp
->rtoinfo
.srto_max
;
6567 rtoinfo
.srto_min
= sp
->rtoinfo
.srto_min
;
6570 if (put_user(len
, optlen
))
6573 if (copy_to_user(optval
, &rtoinfo
, len
))
6581 * 7.1.2 SCTP_ASSOCINFO
6583 * This option is used to tune the maximum retransmission attempts
6584 * of the association.
6585 * Returns an error if the new association retransmission value is
6586 * greater than the sum of the retransmission value of the peer.
6587 * See [SCTP] for more information.
6590 static int sctp_getsockopt_associnfo(struct sock
*sk
, int len
,
6591 char __user
*optval
,
6595 struct sctp_assocparams assocparams
;
6596 struct sctp_association
*asoc
;
6597 struct list_head
*pos
;
6600 if (len
< sizeof (struct sctp_assocparams
))
6603 len
= sizeof(struct sctp_assocparams
);
6605 if (copy_from_user(&assocparams
, optval
, len
))
6608 asoc
= sctp_id2assoc(sk
, assocparams
.sasoc_assoc_id
);
6610 if (!asoc
&& assocparams
.sasoc_assoc_id
!= SCTP_FUTURE_ASSOC
&&
6611 sctp_style(sk
, UDP
))
6614 /* Values correspoinding to the specific association */
6616 assocparams
.sasoc_asocmaxrxt
= asoc
->max_retrans
;
6617 assocparams
.sasoc_peer_rwnd
= asoc
->peer
.rwnd
;
6618 assocparams
.sasoc_local_rwnd
= asoc
->a_rwnd
;
6619 assocparams
.sasoc_cookie_life
= ktime_to_ms(asoc
->cookie_life
);
6621 list_for_each(pos
, &asoc
->peer
.transport_addr_list
) {
6625 assocparams
.sasoc_number_peer_destinations
= cnt
;
6627 /* Values corresponding to the endpoint */
6628 struct sctp_sock
*sp
= sctp_sk(sk
);
6630 assocparams
.sasoc_asocmaxrxt
= sp
->assocparams
.sasoc_asocmaxrxt
;
6631 assocparams
.sasoc_peer_rwnd
= sp
->assocparams
.sasoc_peer_rwnd
;
6632 assocparams
.sasoc_local_rwnd
= sp
->assocparams
.sasoc_local_rwnd
;
6633 assocparams
.sasoc_cookie_life
=
6634 sp
->assocparams
.sasoc_cookie_life
;
6635 assocparams
.sasoc_number_peer_destinations
=
6637 sasoc_number_peer_destinations
;
6640 if (put_user(len
, optlen
))
6643 if (copy_to_user(optval
, &assocparams
, len
))
6650 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
6652 * This socket option is a boolean flag which turns on or off mapped V4
6653 * addresses. If this option is turned on and the socket is type
6654 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
6655 * If this option is turned off, then no mapping will be done of V4
6656 * addresses and a user will receive both PF_INET6 and PF_INET type
6657 * addresses on the socket.
6659 static int sctp_getsockopt_mappedv4(struct sock
*sk
, int len
,
6660 char __user
*optval
, int __user
*optlen
)
6663 struct sctp_sock
*sp
= sctp_sk(sk
);
6665 if (len
< sizeof(int))
6670 if (put_user(len
, optlen
))
6672 if (copy_to_user(optval
, &val
, len
))
6679 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
6680 * (chapter and verse is quoted at sctp_setsockopt_context())
6682 static int sctp_getsockopt_context(struct sock
*sk
, int len
,
6683 char __user
*optval
, int __user
*optlen
)
6685 struct sctp_assoc_value params
;
6686 struct sctp_association
*asoc
;
6688 if (len
< sizeof(struct sctp_assoc_value
))
6691 len
= sizeof(struct sctp_assoc_value
);
6693 if (copy_from_user(¶ms
, optval
, len
))
6696 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6697 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
6698 sctp_style(sk
, UDP
))
6701 params
.assoc_value
= asoc
? asoc
->default_rcv_context
6702 : sctp_sk(sk
)->default_rcv_context
;
6704 if (put_user(len
, optlen
))
6706 if (copy_to_user(optval
, ¶ms
, len
))
6713 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
6714 * This option will get or set the maximum size to put in any outgoing
6715 * SCTP DATA chunk. If a message is larger than this size it will be
6716 * fragmented by SCTP into the specified size. Note that the underlying
6717 * SCTP implementation may fragment into smaller sized chunks when the
6718 * PMTU of the underlying association is smaller than the value set by
6719 * the user. The default value for this option is '0' which indicates
6720 * the user is NOT limiting fragmentation and only the PMTU will effect
6721 * SCTP's choice of DATA chunk size. Note also that values set larger
6722 * than the maximum size of an IP datagram will effectively let SCTP
6723 * control fragmentation (i.e. the same as setting this option to 0).
6725 * The following structure is used to access and modify this parameter:
6727 * struct sctp_assoc_value {
6728 * sctp_assoc_t assoc_id;
6729 * uint32_t assoc_value;
6732 * assoc_id: This parameter is ignored for one-to-one style sockets.
6733 * For one-to-many style sockets this parameter indicates which
6734 * association the user is performing an action upon. Note that if
6735 * this field's value is zero then the endpoints default value is
6736 * changed (effecting future associations only).
6737 * assoc_value: This parameter specifies the maximum size in bytes.
6739 static int sctp_getsockopt_maxseg(struct sock
*sk
, int len
,
6740 char __user
*optval
, int __user
*optlen
)
6742 struct sctp_assoc_value params
;
6743 struct sctp_association
*asoc
;
6745 if (len
== sizeof(int)) {
6746 pr_warn_ratelimited(DEPRECATED
6748 "Use of int in maxseg socket option.\n"
6749 "Use struct sctp_assoc_value instead\n",
6750 current
->comm
, task_pid_nr(current
));
6751 params
.assoc_id
= SCTP_FUTURE_ASSOC
;
6752 } else if (len
>= sizeof(struct sctp_assoc_value
)) {
6753 len
= sizeof(struct sctp_assoc_value
);
6754 if (copy_from_user(¶ms
, optval
, len
))
6759 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6760 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
6761 sctp_style(sk
, UDP
))
6765 params
.assoc_value
= asoc
->frag_point
;
6767 params
.assoc_value
= sctp_sk(sk
)->user_frag
;
6769 if (put_user(len
, optlen
))
6771 if (len
== sizeof(int)) {
6772 if (copy_to_user(optval
, ¶ms
.assoc_value
, len
))
6775 if (copy_to_user(optval
, ¶ms
, len
))
6783 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
6784 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
6786 static int sctp_getsockopt_fragment_interleave(struct sock
*sk
, int len
,
6787 char __user
*optval
, int __user
*optlen
)
6791 if (len
< sizeof(int))
6796 val
= sctp_sk(sk
)->frag_interleave
;
6797 if (put_user(len
, optlen
))
6799 if (copy_to_user(optval
, &val
, len
))
6806 * 7.1.25. Set or Get the sctp partial delivery point
6807 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
6809 static int sctp_getsockopt_partial_delivery_point(struct sock
*sk
, int len
,
6810 char __user
*optval
,
6815 if (len
< sizeof(u32
))
6820 val
= sctp_sk(sk
)->pd_point
;
6821 if (put_user(len
, optlen
))
6823 if (copy_to_user(optval
, &val
, len
))
6830 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
6831 * (chapter and verse is quoted at sctp_setsockopt_maxburst())
6833 static int sctp_getsockopt_maxburst(struct sock
*sk
, int len
,
6834 char __user
*optval
,
6837 struct sctp_assoc_value params
;
6838 struct sctp_association
*asoc
;
6840 if (len
== sizeof(int)) {
6841 pr_warn_ratelimited(DEPRECATED
6843 "Use of int in max_burst socket option.\n"
6844 "Use struct sctp_assoc_value instead\n",
6845 current
->comm
, task_pid_nr(current
));
6846 params
.assoc_id
= SCTP_FUTURE_ASSOC
;
6847 } else if (len
>= sizeof(struct sctp_assoc_value
)) {
6848 len
= sizeof(struct sctp_assoc_value
);
6849 if (copy_from_user(¶ms
, optval
, len
))
6854 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6855 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
6856 sctp_style(sk
, UDP
))
6859 params
.assoc_value
= asoc
? asoc
->max_burst
: sctp_sk(sk
)->max_burst
;
6861 if (len
== sizeof(int)) {
6862 if (copy_to_user(optval
, ¶ms
.assoc_value
, len
))
6865 if (copy_to_user(optval
, ¶ms
, len
))
6873 static int sctp_getsockopt_hmac_ident(struct sock
*sk
, int len
,
6874 char __user
*optval
, int __user
*optlen
)
6876 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
6877 struct sctp_hmacalgo __user
*p
= (void __user
*)optval
;
6878 struct sctp_hmac_algo_param
*hmacs
;
6883 if (!ep
->auth_enable
)
6886 hmacs
= ep
->auth_hmacs_list
;
6887 data_len
= ntohs(hmacs
->param_hdr
.length
) -
6888 sizeof(struct sctp_paramhdr
);
6890 if (len
< sizeof(struct sctp_hmacalgo
) + data_len
)
6893 len
= sizeof(struct sctp_hmacalgo
) + data_len
;
6894 num_idents
= data_len
/ sizeof(u16
);
6896 if (put_user(len
, optlen
))
6898 if (put_user(num_idents
, &p
->shmac_num_idents
))
6900 for (i
= 0; i
< num_idents
; i
++) {
6901 __u16 hmacid
= ntohs(hmacs
->hmac_ids
[i
]);
6903 if (copy_to_user(&p
->shmac_idents
[i
], &hmacid
, sizeof(__u16
)))
6909 static int sctp_getsockopt_active_key(struct sock
*sk
, int len
,
6910 char __user
*optval
, int __user
*optlen
)
6912 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
6913 struct sctp_authkeyid val
;
6914 struct sctp_association
*asoc
;
6916 if (!ep
->auth_enable
)
6919 if (len
< sizeof(struct sctp_authkeyid
))
6922 len
= sizeof(struct sctp_authkeyid
);
6923 if (copy_from_user(&val
, optval
, len
))
6926 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
6927 if (!asoc
&& val
.scact_assoc_id
&& sctp_style(sk
, UDP
))
6931 val
.scact_keynumber
= asoc
->active_key_id
;
6933 val
.scact_keynumber
= ep
->active_key_id
;
6935 if (put_user(len
, optlen
))
6937 if (copy_to_user(optval
, &val
, len
))
6943 static int sctp_getsockopt_peer_auth_chunks(struct sock
*sk
, int len
,
6944 char __user
*optval
, int __user
*optlen
)
6946 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
6947 struct sctp_authchunks __user
*p
= (void __user
*)optval
;
6948 struct sctp_authchunks val
;
6949 struct sctp_association
*asoc
;
6950 struct sctp_chunks_param
*ch
;
6954 if (!ep
->auth_enable
)
6957 if (len
< sizeof(struct sctp_authchunks
))
6960 if (copy_from_user(&val
, optval
, sizeof(val
)))
6963 to
= p
->gauth_chunks
;
6964 asoc
= sctp_id2assoc(sk
, val
.gauth_assoc_id
);
6968 ch
= asoc
->peer
.peer_chunks
;
6972 /* See if the user provided enough room for all the data */
6973 num_chunks
= ntohs(ch
->param_hdr
.length
) - sizeof(struct sctp_paramhdr
);
6974 if (len
< num_chunks
)
6977 if (copy_to_user(to
, ch
->chunks
, num_chunks
))
6980 len
= sizeof(struct sctp_authchunks
) + num_chunks
;
6981 if (put_user(len
, optlen
))
6983 if (put_user(num_chunks
, &p
->gauth_number_of_chunks
))
6988 static int sctp_getsockopt_local_auth_chunks(struct sock
*sk
, int len
,
6989 char __user
*optval
, int __user
*optlen
)
6991 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
6992 struct sctp_authchunks __user
*p
= (void __user
*)optval
;
6993 struct sctp_authchunks val
;
6994 struct sctp_association
*asoc
;
6995 struct sctp_chunks_param
*ch
;
6999 if (!ep
->auth_enable
)
7002 if (len
< sizeof(struct sctp_authchunks
))
7005 if (copy_from_user(&val
, optval
, sizeof(val
)))
7008 to
= p
->gauth_chunks
;
7009 asoc
= sctp_id2assoc(sk
, val
.gauth_assoc_id
);
7010 if (!asoc
&& val
.gauth_assoc_id
!= SCTP_FUTURE_ASSOC
&&
7011 sctp_style(sk
, UDP
))
7014 ch
= asoc
? (struct sctp_chunks_param
*)asoc
->c
.auth_chunks
7015 : ep
->auth_chunk_list
;
7019 num_chunks
= ntohs(ch
->param_hdr
.length
) - sizeof(struct sctp_paramhdr
);
7020 if (len
< sizeof(struct sctp_authchunks
) + num_chunks
)
7023 if (copy_to_user(to
, ch
->chunks
, num_chunks
))
7026 len
= sizeof(struct sctp_authchunks
) + num_chunks
;
7027 if (put_user(len
, optlen
))
7029 if (put_user(num_chunks
, &p
->gauth_number_of_chunks
))
7036 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
7037 * This option gets the current number of associations that are attached
7038 * to a one-to-many style socket. The option value is an uint32_t.
7040 static int sctp_getsockopt_assoc_number(struct sock
*sk
, int len
,
7041 char __user
*optval
, int __user
*optlen
)
7043 struct sctp_sock
*sp
= sctp_sk(sk
);
7044 struct sctp_association
*asoc
;
7047 if (sctp_style(sk
, TCP
))
7050 if (len
< sizeof(u32
))
7055 list_for_each_entry(asoc
, &(sp
->ep
->asocs
), asocs
) {
7059 if (put_user(len
, optlen
))
7061 if (copy_to_user(optval
, &val
, len
))
7068 * 8.1.23 SCTP_AUTO_ASCONF
7069 * See the corresponding setsockopt entry as description
7071 static int sctp_getsockopt_auto_asconf(struct sock
*sk
, int len
,
7072 char __user
*optval
, int __user
*optlen
)
7076 if (len
< sizeof(int))
7080 if (sctp_sk(sk
)->do_auto_asconf
&& sctp_is_ep_boundall(sk
))
7082 if (put_user(len
, optlen
))
7084 if (copy_to_user(optval
, &val
, len
))
7090 * 8.2.6. Get the Current Identifiers of Associations
7091 * (SCTP_GET_ASSOC_ID_LIST)
7093 * This option gets the current list of SCTP association identifiers of
7094 * the SCTP associations handled by a one-to-many style socket.
7096 static int sctp_getsockopt_assoc_ids(struct sock
*sk
, int len
,
7097 char __user
*optval
, int __user
*optlen
)
7099 struct sctp_sock
*sp
= sctp_sk(sk
);
7100 struct sctp_association
*asoc
;
7101 struct sctp_assoc_ids
*ids
;
7104 if (sctp_style(sk
, TCP
))
7107 if (len
< sizeof(struct sctp_assoc_ids
))
7110 list_for_each_entry(asoc
, &(sp
->ep
->asocs
), asocs
) {
7114 if (len
< sizeof(struct sctp_assoc_ids
) + sizeof(sctp_assoc_t
) * num
)
7117 len
= sizeof(struct sctp_assoc_ids
) + sizeof(sctp_assoc_t
) * num
;
7119 ids
= kmalloc(len
, GFP_USER
| __GFP_NOWARN
);
7123 ids
->gaids_number_of_ids
= num
;
7125 list_for_each_entry(asoc
, &(sp
->ep
->asocs
), asocs
) {
7126 ids
->gaids_assoc_id
[num
++] = asoc
->assoc_id
;
7129 if (put_user(len
, optlen
) || copy_to_user(optval
, ids
, len
)) {
7139 * SCTP_PEER_ADDR_THLDS
7141 * This option allows us to fetch the partially failed threshold for one or all
7142 * transports in an association. See Section 6.1 of:
7143 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
7145 static int sctp_getsockopt_paddr_thresholds(struct sock
*sk
,
7146 char __user
*optval
,
7150 struct sctp_paddrthlds val
;
7151 struct sctp_transport
*trans
;
7152 struct sctp_association
*asoc
;
7154 if (len
< sizeof(struct sctp_paddrthlds
))
7156 len
= sizeof(struct sctp_paddrthlds
);
7157 if (copy_from_user(&val
, (struct sctp_paddrthlds __user
*)optval
, len
))
7160 if (!sctp_is_any(sk
, (const union sctp_addr
*)&val
.spt_address
)) {
7161 trans
= sctp_addr_id2transport(sk
, &val
.spt_address
,
7166 val
.spt_pathmaxrxt
= trans
->pathmaxrxt
;
7167 val
.spt_pathpfthld
= trans
->pf_retrans
;
7172 asoc
= sctp_id2assoc(sk
, val
.spt_assoc_id
);
7173 if (!asoc
&& val
.spt_assoc_id
!= SCTP_FUTURE_ASSOC
&&
7174 sctp_style(sk
, UDP
))
7178 val
.spt_pathpfthld
= asoc
->pf_retrans
;
7179 val
.spt_pathmaxrxt
= asoc
->pathmaxrxt
;
7181 struct sctp_sock
*sp
= sctp_sk(sk
);
7183 val
.spt_pathpfthld
= sp
->pf_retrans
;
7184 val
.spt_pathmaxrxt
= sp
->pathmaxrxt
;
7187 if (put_user(len
, optlen
) || copy_to_user(optval
, &val
, len
))
7194 * SCTP_GET_ASSOC_STATS
7196 * This option retrieves local per endpoint statistics. It is modeled
7197 * after OpenSolaris' implementation
7199 static int sctp_getsockopt_assoc_stats(struct sock
*sk
, int len
,
7200 char __user
*optval
,
7203 struct sctp_assoc_stats sas
;
7204 struct sctp_association
*asoc
= NULL
;
7206 /* User must provide at least the assoc id */
7207 if (len
< sizeof(sctp_assoc_t
))
7210 /* Allow the struct to grow and fill in as much as possible */
7211 len
= min_t(size_t, len
, sizeof(sas
));
7213 if (copy_from_user(&sas
, optval
, len
))
7216 asoc
= sctp_id2assoc(sk
, sas
.sas_assoc_id
);
7220 sas
.sas_rtxchunks
= asoc
->stats
.rtxchunks
;
7221 sas
.sas_gapcnt
= asoc
->stats
.gapcnt
;
7222 sas
.sas_outofseqtsns
= asoc
->stats
.outofseqtsns
;
7223 sas
.sas_osacks
= asoc
->stats
.osacks
;
7224 sas
.sas_isacks
= asoc
->stats
.isacks
;
7225 sas
.sas_octrlchunks
= asoc
->stats
.octrlchunks
;
7226 sas
.sas_ictrlchunks
= asoc
->stats
.ictrlchunks
;
7227 sas
.sas_oodchunks
= asoc
->stats
.oodchunks
;
7228 sas
.sas_iodchunks
= asoc
->stats
.iodchunks
;
7229 sas
.sas_ouodchunks
= asoc
->stats
.ouodchunks
;
7230 sas
.sas_iuodchunks
= asoc
->stats
.iuodchunks
;
7231 sas
.sas_idupchunks
= asoc
->stats
.idupchunks
;
7232 sas
.sas_opackets
= asoc
->stats
.opackets
;
7233 sas
.sas_ipackets
= asoc
->stats
.ipackets
;
7235 /* New high max rto observed, will return 0 if not a single
7236 * RTO update took place. obs_rto_ipaddr will be bogus
7239 sas
.sas_maxrto
= asoc
->stats
.max_obs_rto
;
7240 memcpy(&sas
.sas_obs_rto_ipaddr
, &asoc
->stats
.obs_rto_ipaddr
,
7241 sizeof(struct sockaddr_storage
));
7243 /* Mark beginning of a new observation period */
7244 asoc
->stats
.max_obs_rto
= asoc
->rto_min
;
7246 if (put_user(len
, optlen
))
7249 pr_debug("%s: len:%d, assoc_id:%d\n", __func__
, len
, sas
.sas_assoc_id
);
7251 if (copy_to_user(optval
, &sas
, len
))
7257 static int sctp_getsockopt_recvrcvinfo(struct sock
*sk
, int len
,
7258 char __user
*optval
,
7263 if (len
< sizeof(int))
7267 if (sctp_sk(sk
)->recvrcvinfo
)
7269 if (put_user(len
, optlen
))
7271 if (copy_to_user(optval
, &val
, len
))
7277 static int sctp_getsockopt_recvnxtinfo(struct sock
*sk
, int len
,
7278 char __user
*optval
,
7283 if (len
< sizeof(int))
7287 if (sctp_sk(sk
)->recvnxtinfo
)
7289 if (put_user(len
, optlen
))
7291 if (copy_to_user(optval
, &val
, len
))
7297 static int sctp_getsockopt_pr_supported(struct sock
*sk
, int len
,
7298 char __user
*optval
,
7301 struct sctp_assoc_value params
;
7302 struct sctp_association
*asoc
;
7303 int retval
= -EFAULT
;
7305 if (len
< sizeof(params
)) {
7310 len
= sizeof(params
);
7311 if (copy_from_user(¶ms
, optval
, len
))
7314 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7315 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
7316 sctp_style(sk
, UDP
)) {
7321 params
.assoc_value
= asoc
? asoc
->prsctp_enable
7322 : sctp_sk(sk
)->ep
->prsctp_enable
;
7324 if (put_user(len
, optlen
))
7327 if (copy_to_user(optval
, ¶ms
, len
))
7336 static int sctp_getsockopt_default_prinfo(struct sock
*sk
, int len
,
7337 char __user
*optval
,
7340 struct sctp_default_prinfo info
;
7341 struct sctp_association
*asoc
;
7342 int retval
= -EFAULT
;
7344 if (len
< sizeof(info
)) {
7350 if (copy_from_user(&info
, optval
, len
))
7353 asoc
= sctp_id2assoc(sk
, info
.pr_assoc_id
);
7354 if (!asoc
&& info
.pr_assoc_id
!= SCTP_FUTURE_ASSOC
&&
7355 sctp_style(sk
, UDP
)) {
7361 info
.pr_policy
= SCTP_PR_POLICY(asoc
->default_flags
);
7362 info
.pr_value
= asoc
->default_timetolive
;
7364 struct sctp_sock
*sp
= sctp_sk(sk
);
7366 info
.pr_policy
= SCTP_PR_POLICY(sp
->default_flags
);
7367 info
.pr_value
= sp
->default_timetolive
;
7370 if (put_user(len
, optlen
))
7373 if (copy_to_user(optval
, &info
, len
))
7382 static int sctp_getsockopt_pr_assocstatus(struct sock
*sk
, int len
,
7383 char __user
*optval
,
7386 struct sctp_prstatus params
;
7387 struct sctp_association
*asoc
;
7389 int retval
= -EINVAL
;
7391 if (len
< sizeof(params
))
7394 len
= sizeof(params
);
7395 if (copy_from_user(¶ms
, optval
, len
)) {
7400 policy
= params
.sprstat_policy
;
7401 if (!policy
|| (policy
& ~(SCTP_PR_SCTP_MASK
| SCTP_PR_SCTP_ALL
)) ||
7402 ((policy
& SCTP_PR_SCTP_ALL
) && (policy
& SCTP_PR_SCTP_MASK
)))
7405 asoc
= sctp_id2assoc(sk
, params
.sprstat_assoc_id
);
7409 if (policy
== SCTP_PR_SCTP_ALL
) {
7410 params
.sprstat_abandoned_unsent
= 0;
7411 params
.sprstat_abandoned_sent
= 0;
7412 for (policy
= 0; policy
<= SCTP_PR_INDEX(MAX
); policy
++) {
7413 params
.sprstat_abandoned_unsent
+=
7414 asoc
->abandoned_unsent
[policy
];
7415 params
.sprstat_abandoned_sent
+=
7416 asoc
->abandoned_sent
[policy
];
7419 params
.sprstat_abandoned_unsent
=
7420 asoc
->abandoned_unsent
[__SCTP_PR_INDEX(policy
)];
7421 params
.sprstat_abandoned_sent
=
7422 asoc
->abandoned_sent
[__SCTP_PR_INDEX(policy
)];
7425 if (put_user(len
, optlen
)) {
7430 if (copy_to_user(optval
, ¶ms
, len
)) {
7441 static int sctp_getsockopt_pr_streamstatus(struct sock
*sk
, int len
,
7442 char __user
*optval
,
7445 struct sctp_stream_out_ext
*streamoute
;
7446 struct sctp_association
*asoc
;
7447 struct sctp_prstatus params
;
7448 int retval
= -EINVAL
;
7451 if (len
< sizeof(params
))
7454 len
= sizeof(params
);
7455 if (copy_from_user(¶ms
, optval
, len
)) {
7460 policy
= params
.sprstat_policy
;
7461 if (!policy
|| (policy
& ~(SCTP_PR_SCTP_MASK
| SCTP_PR_SCTP_ALL
)) ||
7462 ((policy
& SCTP_PR_SCTP_ALL
) && (policy
& SCTP_PR_SCTP_MASK
)))
7465 asoc
= sctp_id2assoc(sk
, params
.sprstat_assoc_id
);
7466 if (!asoc
|| params
.sprstat_sid
>= asoc
->stream
.outcnt
)
7469 streamoute
= SCTP_SO(&asoc
->stream
, params
.sprstat_sid
)->ext
;
7471 /* Not allocated yet, means all stats are 0 */
7472 params
.sprstat_abandoned_unsent
= 0;
7473 params
.sprstat_abandoned_sent
= 0;
7478 if (policy
== SCTP_PR_SCTP_ALL
) {
7479 params
.sprstat_abandoned_unsent
= 0;
7480 params
.sprstat_abandoned_sent
= 0;
7481 for (policy
= 0; policy
<= SCTP_PR_INDEX(MAX
); policy
++) {
7482 params
.sprstat_abandoned_unsent
+=
7483 streamoute
->abandoned_unsent
[policy
];
7484 params
.sprstat_abandoned_sent
+=
7485 streamoute
->abandoned_sent
[policy
];
7488 params
.sprstat_abandoned_unsent
=
7489 streamoute
->abandoned_unsent
[__SCTP_PR_INDEX(policy
)];
7490 params
.sprstat_abandoned_sent
=
7491 streamoute
->abandoned_sent
[__SCTP_PR_INDEX(policy
)];
7494 if (put_user(len
, optlen
) || copy_to_user(optval
, ¶ms
, len
)) {
7505 static int sctp_getsockopt_reconfig_supported(struct sock
*sk
, int len
,
7506 char __user
*optval
,
7509 struct sctp_assoc_value params
;
7510 struct sctp_association
*asoc
;
7511 int retval
= -EFAULT
;
7513 if (len
< sizeof(params
)) {
7518 len
= sizeof(params
);
7519 if (copy_from_user(¶ms
, optval
, len
))
7522 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7523 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
7524 sctp_style(sk
, UDP
)) {
7529 params
.assoc_value
= asoc
? asoc
->reconf_enable
7530 : sctp_sk(sk
)->ep
->reconf_enable
;
7532 if (put_user(len
, optlen
))
7535 if (copy_to_user(optval
, ¶ms
, len
))
7544 static int sctp_getsockopt_enable_strreset(struct sock
*sk
, int len
,
7545 char __user
*optval
,
7548 struct sctp_assoc_value params
;
7549 struct sctp_association
*asoc
;
7550 int retval
= -EFAULT
;
7552 if (len
< sizeof(params
)) {
7557 len
= sizeof(params
);
7558 if (copy_from_user(¶ms
, optval
, len
))
7561 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7562 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
7563 sctp_style(sk
, UDP
)) {
7568 params
.assoc_value
= asoc
? asoc
->strreset_enable
7569 : sctp_sk(sk
)->ep
->strreset_enable
;
7571 if (put_user(len
, optlen
))
7574 if (copy_to_user(optval
, ¶ms
, len
))
7583 static int sctp_getsockopt_scheduler(struct sock
*sk
, int len
,
7584 char __user
*optval
,
7587 struct sctp_assoc_value params
;
7588 struct sctp_association
*asoc
;
7589 int retval
= -EFAULT
;
7591 if (len
< sizeof(params
)) {
7596 len
= sizeof(params
);
7597 if (copy_from_user(¶ms
, optval
, len
))
7600 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7601 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
7602 sctp_style(sk
, UDP
)) {
7607 params
.assoc_value
= asoc
? sctp_sched_get_sched(asoc
)
7608 : sctp_sk(sk
)->default_ss
;
7610 if (put_user(len
, optlen
))
7613 if (copy_to_user(optval
, ¶ms
, len
))
7622 static int sctp_getsockopt_scheduler_value(struct sock
*sk
, int len
,
7623 char __user
*optval
,
7626 struct sctp_stream_value params
;
7627 struct sctp_association
*asoc
;
7628 int retval
= -EFAULT
;
7630 if (len
< sizeof(params
)) {
7635 len
= sizeof(params
);
7636 if (copy_from_user(¶ms
, optval
, len
))
7639 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7645 retval
= sctp_sched_get_value(asoc
, params
.stream_id
,
7646 ¶ms
.stream_value
);
7650 if (put_user(len
, optlen
)) {
7655 if (copy_to_user(optval
, ¶ms
, len
)) {
7664 static int sctp_getsockopt_interleaving_supported(struct sock
*sk
, int len
,
7665 char __user
*optval
,
7668 struct sctp_assoc_value params
;
7669 struct sctp_association
*asoc
;
7670 int retval
= -EFAULT
;
7672 if (len
< sizeof(params
)) {
7677 len
= sizeof(params
);
7678 if (copy_from_user(¶ms
, optval
, len
))
7681 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7682 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
7683 sctp_style(sk
, UDP
)) {
7688 params
.assoc_value
= asoc
? asoc
->intl_enable
7689 : sctp_sk(sk
)->strm_interleave
;
7691 if (put_user(len
, optlen
))
7694 if (copy_to_user(optval
, ¶ms
, len
))
7703 static int sctp_getsockopt_reuse_port(struct sock
*sk
, int len
,
7704 char __user
*optval
,
7709 if (len
< sizeof(int))
7713 val
= sctp_sk(sk
)->reuse
;
7714 if (put_user(len
, optlen
))
7717 if (copy_to_user(optval
, &val
, len
))
7723 static int sctp_getsockopt_event(struct sock
*sk
, int len
, char __user
*optval
,
7726 struct sctp_association
*asoc
;
7727 struct sctp_event param
;
7730 if (len
< sizeof(param
))
7733 len
= sizeof(param
);
7734 if (copy_from_user(¶m
, optval
, len
))
7737 if (param
.se_type
< SCTP_SN_TYPE_BASE
||
7738 param
.se_type
> SCTP_SN_TYPE_MAX
)
7741 asoc
= sctp_id2assoc(sk
, param
.se_assoc_id
);
7742 if (!asoc
&& param
.se_assoc_id
!= SCTP_FUTURE_ASSOC
&&
7743 sctp_style(sk
, UDP
))
7746 subscribe
= asoc
? asoc
->subscribe
: sctp_sk(sk
)->subscribe
;
7747 param
.se_on
= sctp_ulpevent_type_enabled(subscribe
, param
.se_type
);
7749 if (put_user(len
, optlen
))
7752 if (copy_to_user(optval
, ¶m
, len
))
7758 static int sctp_getsockopt(struct sock
*sk
, int level
, int optname
,
7759 char __user
*optval
, int __user
*optlen
)
7764 pr_debug("%s: sk:%p, optname:%d\n", __func__
, sk
, optname
);
7766 /* I can hardly begin to describe how wrong this is. This is
7767 * so broken as to be worse than useless. The API draft
7768 * REALLY is NOT helpful here... I am not convinced that the
7769 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP
7770 * are at all well-founded.
7772 if (level
!= SOL_SCTP
) {
7773 struct sctp_af
*af
= sctp_sk(sk
)->pf
->af
;
7775 retval
= af
->getsockopt(sk
, level
, optname
, optval
, optlen
);
7779 if (get_user(len
, optlen
))
7789 retval
= sctp_getsockopt_sctp_status(sk
, len
, optval
, optlen
);
7791 case SCTP_DISABLE_FRAGMENTS
:
7792 retval
= sctp_getsockopt_disable_fragments(sk
, len
, optval
,
7796 retval
= sctp_getsockopt_events(sk
, len
, optval
, optlen
);
7798 case SCTP_AUTOCLOSE
:
7799 retval
= sctp_getsockopt_autoclose(sk
, len
, optval
, optlen
);
7801 case SCTP_SOCKOPT_PEELOFF
:
7802 retval
= sctp_getsockopt_peeloff(sk
, len
, optval
, optlen
);
7804 case SCTP_SOCKOPT_PEELOFF_FLAGS
:
7805 retval
= sctp_getsockopt_peeloff_flags(sk
, len
, optval
, optlen
);
7807 case SCTP_PEER_ADDR_PARAMS
:
7808 retval
= sctp_getsockopt_peer_addr_params(sk
, len
, optval
,
7811 case SCTP_DELAYED_SACK
:
7812 retval
= sctp_getsockopt_delayed_ack(sk
, len
, optval
,
7816 retval
= sctp_getsockopt_initmsg(sk
, len
, optval
, optlen
);
7818 case SCTP_GET_PEER_ADDRS
:
7819 retval
= sctp_getsockopt_peer_addrs(sk
, len
, optval
,
7822 case SCTP_GET_LOCAL_ADDRS
:
7823 retval
= sctp_getsockopt_local_addrs(sk
, len
, optval
,
7826 case SCTP_SOCKOPT_CONNECTX3
:
7827 retval
= sctp_getsockopt_connectx3(sk
, len
, optval
, optlen
);
7829 case SCTP_DEFAULT_SEND_PARAM
:
7830 retval
= sctp_getsockopt_default_send_param(sk
, len
,
7833 case SCTP_DEFAULT_SNDINFO
:
7834 retval
= sctp_getsockopt_default_sndinfo(sk
, len
,
7837 case SCTP_PRIMARY_ADDR
:
7838 retval
= sctp_getsockopt_primary_addr(sk
, len
, optval
, optlen
);
7841 retval
= sctp_getsockopt_nodelay(sk
, len
, optval
, optlen
);
7844 retval
= sctp_getsockopt_rtoinfo(sk
, len
, optval
, optlen
);
7846 case SCTP_ASSOCINFO
:
7847 retval
= sctp_getsockopt_associnfo(sk
, len
, optval
, optlen
);
7849 case SCTP_I_WANT_MAPPED_V4_ADDR
:
7850 retval
= sctp_getsockopt_mappedv4(sk
, len
, optval
, optlen
);
7853 retval
= sctp_getsockopt_maxseg(sk
, len
, optval
, optlen
);
7855 case SCTP_GET_PEER_ADDR_INFO
:
7856 retval
= sctp_getsockopt_peer_addr_info(sk
, len
, optval
,
7859 case SCTP_ADAPTATION_LAYER
:
7860 retval
= sctp_getsockopt_adaptation_layer(sk
, len
, optval
,
7864 retval
= sctp_getsockopt_context(sk
, len
, optval
, optlen
);
7866 case SCTP_FRAGMENT_INTERLEAVE
:
7867 retval
= sctp_getsockopt_fragment_interleave(sk
, len
, optval
,
7870 case SCTP_PARTIAL_DELIVERY_POINT
:
7871 retval
= sctp_getsockopt_partial_delivery_point(sk
, len
, optval
,
7874 case SCTP_MAX_BURST
:
7875 retval
= sctp_getsockopt_maxburst(sk
, len
, optval
, optlen
);
7878 case SCTP_AUTH_CHUNK
:
7879 case SCTP_AUTH_DELETE_KEY
:
7880 case SCTP_AUTH_DEACTIVATE_KEY
:
7881 retval
= -EOPNOTSUPP
;
7883 case SCTP_HMAC_IDENT
:
7884 retval
= sctp_getsockopt_hmac_ident(sk
, len
, optval
, optlen
);
7886 case SCTP_AUTH_ACTIVE_KEY
:
7887 retval
= sctp_getsockopt_active_key(sk
, len
, optval
, optlen
);
7889 case SCTP_PEER_AUTH_CHUNKS
:
7890 retval
= sctp_getsockopt_peer_auth_chunks(sk
, len
, optval
,
7893 case SCTP_LOCAL_AUTH_CHUNKS
:
7894 retval
= sctp_getsockopt_local_auth_chunks(sk
, len
, optval
,
7897 case SCTP_GET_ASSOC_NUMBER
:
7898 retval
= sctp_getsockopt_assoc_number(sk
, len
, optval
, optlen
);
7900 case SCTP_GET_ASSOC_ID_LIST
:
7901 retval
= sctp_getsockopt_assoc_ids(sk
, len
, optval
, optlen
);
7903 case SCTP_AUTO_ASCONF
:
7904 retval
= sctp_getsockopt_auto_asconf(sk
, len
, optval
, optlen
);
7906 case SCTP_PEER_ADDR_THLDS
:
7907 retval
= sctp_getsockopt_paddr_thresholds(sk
, optval
, len
, optlen
);
7909 case SCTP_GET_ASSOC_STATS
:
7910 retval
= sctp_getsockopt_assoc_stats(sk
, len
, optval
, optlen
);
7912 case SCTP_RECVRCVINFO
:
7913 retval
= sctp_getsockopt_recvrcvinfo(sk
, len
, optval
, optlen
);
7915 case SCTP_RECVNXTINFO
:
7916 retval
= sctp_getsockopt_recvnxtinfo(sk
, len
, optval
, optlen
);
7918 case SCTP_PR_SUPPORTED
:
7919 retval
= sctp_getsockopt_pr_supported(sk
, len
, optval
, optlen
);
7921 case SCTP_DEFAULT_PRINFO
:
7922 retval
= sctp_getsockopt_default_prinfo(sk
, len
, optval
,
7925 case SCTP_PR_ASSOC_STATUS
:
7926 retval
= sctp_getsockopt_pr_assocstatus(sk
, len
, optval
,
7929 case SCTP_PR_STREAM_STATUS
:
7930 retval
= sctp_getsockopt_pr_streamstatus(sk
, len
, optval
,
7933 case SCTP_RECONFIG_SUPPORTED
:
7934 retval
= sctp_getsockopt_reconfig_supported(sk
, len
, optval
,
7937 case SCTP_ENABLE_STREAM_RESET
:
7938 retval
= sctp_getsockopt_enable_strreset(sk
, len
, optval
,
7941 case SCTP_STREAM_SCHEDULER
:
7942 retval
= sctp_getsockopt_scheduler(sk
, len
, optval
,
7945 case SCTP_STREAM_SCHEDULER_VALUE
:
7946 retval
= sctp_getsockopt_scheduler_value(sk
, len
, optval
,
7949 case SCTP_INTERLEAVING_SUPPORTED
:
7950 retval
= sctp_getsockopt_interleaving_supported(sk
, len
, optval
,
7953 case SCTP_REUSE_PORT
:
7954 retval
= sctp_getsockopt_reuse_port(sk
, len
, optval
, optlen
);
7957 retval
= sctp_getsockopt_event(sk
, len
, optval
, optlen
);
7960 retval
= -ENOPROTOOPT
;
7968 static int sctp_hash(struct sock
*sk
)
7974 static void sctp_unhash(struct sock
*sk
)
7979 /* Check if port is acceptable. Possibly find first available port.
7981 * The port hash table (contained in the 'global' SCTP protocol storage
7982 * returned by struct sctp_protocol *sctp_get_protocol()). The hash
7983 * table is an array of 4096 lists (sctp_bind_hashbucket). Each
7984 * list (the list number is the port number hashed out, so as you
7985 * would expect from a hash function, all the ports in a given list have
7986 * such a number that hashes out to the same list number; you were
7987 * expecting that, right?); so each list has a set of ports, with a
7988 * link to the socket (struct sock) that uses it, the port number and
7989 * a fastreuse flag (FIXME: NPI ipg).
7991 static struct sctp_bind_bucket
*sctp_bucket_create(
7992 struct sctp_bind_hashbucket
*head
, struct net
*, unsigned short snum
);
7994 static long sctp_get_port_local(struct sock
*sk
, union sctp_addr
*addr
)
7996 struct sctp_sock
*sp
= sctp_sk(sk
);
7997 bool reuse
= (sk
->sk_reuse
|| sp
->reuse
);
7998 struct sctp_bind_hashbucket
*head
; /* hash list */
7999 kuid_t uid
= sock_i_uid(sk
);
8000 struct sctp_bind_bucket
*pp
;
8001 unsigned short snum
;
8004 snum
= ntohs(addr
->v4
.sin_port
);
8006 pr_debug("%s: begins, snum:%d\n", __func__
, snum
);
8011 /* Search for an available port. */
8012 int low
, high
, remaining
, index
;
8014 struct net
*net
= sock_net(sk
);
8016 inet_get_local_port_range(net
, &low
, &high
);
8017 remaining
= (high
- low
) + 1;
8018 rover
= prandom_u32() % remaining
+ low
;
8022 if ((rover
< low
) || (rover
> high
))
8024 if (inet_is_local_reserved_port(net
, rover
))
8026 index
= sctp_phashfn(sock_net(sk
), rover
);
8027 head
= &sctp_port_hashtable
[index
];
8028 spin_lock(&head
->lock
);
8029 sctp_for_each_hentry(pp
, &head
->chain
)
8030 if ((pp
->port
== rover
) &&
8031 net_eq(sock_net(sk
), pp
->net
))
8035 spin_unlock(&head
->lock
);
8036 } while (--remaining
> 0);
8038 /* Exhausted local port range during search? */
8043 /* OK, here is the one we will use. HEAD (the port
8044 * hash table list entry) is non-NULL and we hold it's
8049 /* We are given an specific port number; we verify
8050 * that it is not being used. If it is used, we will
8051 * exahust the search in the hash list corresponding
8052 * to the port number (snum) - we detect that with the
8053 * port iterator, pp being NULL.
8055 head
= &sctp_port_hashtable
[sctp_phashfn(sock_net(sk
), snum
)];
8056 spin_lock(&head
->lock
);
8057 sctp_for_each_hentry(pp
, &head
->chain
) {
8058 if ((pp
->port
== snum
) && net_eq(pp
->net
, sock_net(sk
)))
8065 if (!hlist_empty(&pp
->owner
)) {
8066 /* We had a port hash table hit - there is an
8067 * available port (pp != NULL) and it is being
8068 * used by other socket (pp->owner not empty); that other
8069 * socket is going to be sk2.
8073 pr_debug("%s: found a possible match\n", __func__
);
8075 if ((pp
->fastreuse
&& reuse
&&
8076 sk
->sk_state
!= SCTP_SS_LISTENING
) ||
8077 (pp
->fastreuseport
&& sk
->sk_reuseport
&&
8078 uid_eq(pp
->fastuid
, uid
)))
8081 /* Run through the list of sockets bound to the port
8082 * (pp->port) [via the pointers bind_next and
8083 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
8084 * we get the endpoint they describe and run through
8085 * the endpoint's list of IP (v4 or v6) addresses,
8086 * comparing each of the addresses with the address of
8087 * the socket sk. If we find a match, then that means
8088 * that this port/socket (sk) combination are already
8091 sk_for_each_bound(sk2
, &pp
->owner
) {
8092 struct sctp_sock
*sp2
= sctp_sk(sk2
);
8093 struct sctp_endpoint
*ep2
= sp2
->ep
;
8096 (reuse
&& (sk2
->sk_reuse
|| sp2
->reuse
) &&
8097 sk2
->sk_state
!= SCTP_SS_LISTENING
) ||
8098 (sk
->sk_reuseport
&& sk2
->sk_reuseport
&&
8099 uid_eq(uid
, sock_i_uid(sk2
))))
8102 if (sctp_bind_addr_conflict(&ep2
->base
.bind_addr
,
8109 pr_debug("%s: found a match\n", __func__
);
8112 /* If there was a hash table miss, create a new port. */
8114 if (!pp
&& !(pp
= sctp_bucket_create(head
, sock_net(sk
), snum
)))
8117 /* In either case (hit or miss), make sure fastreuse is 1 only
8118 * if sk->sk_reuse is too (that is, if the caller requested
8119 * SO_REUSEADDR on this socket -sk-).
8121 if (hlist_empty(&pp
->owner
)) {
8122 if (reuse
&& sk
->sk_state
!= SCTP_SS_LISTENING
)
8127 if (sk
->sk_reuseport
) {
8128 pp
->fastreuseport
= 1;
8131 pp
->fastreuseport
= 0;
8134 if (pp
->fastreuse
&&
8135 (!reuse
|| sk
->sk_state
== SCTP_SS_LISTENING
))
8138 if (pp
->fastreuseport
&&
8139 (!sk
->sk_reuseport
|| !uid_eq(pp
->fastuid
, uid
)))
8140 pp
->fastreuseport
= 0;
8143 /* We are set, so fill up all the data in the hash table
8144 * entry, tie the socket list information with the rest of the
8145 * sockets FIXME: Blurry, NPI (ipg).
8148 if (!sp
->bind_hash
) {
8149 inet_sk(sk
)->inet_num
= snum
;
8150 sk_add_bind_node(sk
, &pp
->owner
);
8156 spin_unlock(&head
->lock
);
8163 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
8164 * port is requested.
8166 static int sctp_get_port(struct sock
*sk
, unsigned short snum
)
8168 union sctp_addr addr
;
8169 struct sctp_af
*af
= sctp_sk(sk
)->pf
->af
;
8171 /* Set up a dummy address struct from the sk. */
8172 af
->from_sk(&addr
, sk
);
8173 addr
.v4
.sin_port
= htons(snum
);
8175 /* Note: sk->sk_num gets filled in if ephemeral port request. */
8176 return !!sctp_get_port_local(sk
, &addr
);
8180 * Move a socket to LISTENING state.
8182 static int sctp_listen_start(struct sock
*sk
, int backlog
)
8184 struct sctp_sock
*sp
= sctp_sk(sk
);
8185 struct sctp_endpoint
*ep
= sp
->ep
;
8186 struct crypto_shash
*tfm
= NULL
;
8189 /* Allocate HMAC for generating cookie. */
8190 if (!sp
->hmac
&& sp
->sctp_hmac_alg
) {
8191 sprintf(alg
, "hmac(%s)", sp
->sctp_hmac_alg
);
8192 tfm
= crypto_alloc_shash(alg
, 0, 0);
8194 net_info_ratelimited("failed to load transform for %s: %ld\n",
8195 sp
->sctp_hmac_alg
, PTR_ERR(tfm
));
8198 sctp_sk(sk
)->hmac
= tfm
;
8202 * If a bind() or sctp_bindx() is not called prior to a listen()
8203 * call that allows new associations to be accepted, the system
8204 * picks an ephemeral port and will choose an address set equivalent
8205 * to binding with a wildcard address.
8207 * This is not currently spelled out in the SCTP sockets
8208 * extensions draft, but follows the practice as seen in TCP
8212 inet_sk_set_state(sk
, SCTP_SS_LISTENING
);
8213 if (!ep
->base
.bind_addr
.port
) {
8214 if (sctp_autobind(sk
))
8217 if (sctp_get_port(sk
, inet_sk(sk
)->inet_num
)) {
8218 inet_sk_set_state(sk
, SCTP_SS_CLOSED
);
8223 sk
->sk_max_ack_backlog
= backlog
;
8224 return sctp_hash_endpoint(ep
);
8228 * 4.1.3 / 5.1.3 listen()
8230 * By default, new associations are not accepted for UDP style sockets.
8231 * An application uses listen() to mark a socket as being able to
8232 * accept new associations.
8234 * On TCP style sockets, applications use listen() to ready the SCTP
8235 * endpoint for accepting inbound associations.
8237 * On both types of endpoints a backlog of '0' disables listening.
8239 * Move a socket to LISTENING state.
8241 int sctp_inet_listen(struct socket
*sock
, int backlog
)
8243 struct sock
*sk
= sock
->sk
;
8244 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
8247 if (unlikely(backlog
< 0))
8252 /* Peeled-off sockets are not allowed to listen(). */
8253 if (sctp_style(sk
, UDP_HIGH_BANDWIDTH
))
8256 if (sock
->state
!= SS_UNCONNECTED
)
8259 if (!sctp_sstate(sk
, LISTENING
) && !sctp_sstate(sk
, CLOSED
))
8262 /* If backlog is zero, disable listening. */
8264 if (sctp_sstate(sk
, CLOSED
))
8268 sctp_unhash_endpoint(ep
);
8269 sk
->sk_state
= SCTP_SS_CLOSED
;
8270 if (sk
->sk_reuse
|| sctp_sk(sk
)->reuse
)
8271 sctp_sk(sk
)->bind_hash
->fastreuse
= 1;
8275 /* If we are already listening, just update the backlog */
8276 if (sctp_sstate(sk
, LISTENING
))
8277 sk
->sk_max_ack_backlog
= backlog
;
8279 err
= sctp_listen_start(sk
, backlog
);
8291 * This function is done by modeling the current datagram_poll() and the
8292 * tcp_poll(). Note that, based on these implementations, we don't
8293 * lock the socket in this function, even though it seems that,
8294 * ideally, locking or some other mechanisms can be used to ensure
8295 * the integrity of the counters (sndbuf and wmem_alloc) used
8296 * in this place. We assume that we don't need locks either until proven
8299 * Another thing to note is that we include the Async I/O support
8300 * here, again, by modeling the current TCP/UDP code. We don't have
8301 * a good way to test with it yet.
8303 __poll_t
sctp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
8305 struct sock
*sk
= sock
->sk
;
8306 struct sctp_sock
*sp
= sctp_sk(sk
);
8309 poll_wait(file
, sk_sleep(sk
), wait
);
8311 sock_rps_record_flow(sk
);
8313 /* A TCP-style listening socket becomes readable when the accept queue
8316 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))
8317 return (!list_empty(&sp
->ep
->asocs
)) ?
8318 (EPOLLIN
| EPOLLRDNORM
) : 0;
8322 /* Is there any exceptional events? */
8323 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
8325 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? EPOLLPRI
: 0);
8326 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
8327 mask
|= EPOLLRDHUP
| EPOLLIN
| EPOLLRDNORM
;
8328 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
8331 /* Is it readable? Reconsider this code with TCP-style support. */
8332 if (!skb_queue_empty(&sk
->sk_receive_queue
))
8333 mask
|= EPOLLIN
| EPOLLRDNORM
;
8335 /* The association is either gone or not ready. */
8336 if (!sctp_style(sk
, UDP
) && sctp_sstate(sk
, CLOSED
))
8339 /* Is it writable? */
8340 if (sctp_writeable(sk
)) {
8341 mask
|= EPOLLOUT
| EPOLLWRNORM
;
8343 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
8345 * Since the socket is not locked, the buffer
8346 * might be made available after the writeable check and
8347 * before the bit is set. This could cause a lost I/O
8348 * signal. tcp_poll() has a race breaker for this race
8349 * condition. Based on their implementation, we put
8350 * in the following code to cover it as well.
8352 if (sctp_writeable(sk
))
8353 mask
|= EPOLLOUT
| EPOLLWRNORM
;
8358 /********************************************************************
8359 * 2nd Level Abstractions
8360 ********************************************************************/
8362 static struct sctp_bind_bucket
*sctp_bucket_create(
8363 struct sctp_bind_hashbucket
*head
, struct net
*net
, unsigned short snum
)
8365 struct sctp_bind_bucket
*pp
;
8367 pp
= kmem_cache_alloc(sctp_bucket_cachep
, GFP_ATOMIC
);
8369 SCTP_DBG_OBJCNT_INC(bind_bucket
);
8372 INIT_HLIST_HEAD(&pp
->owner
);
8374 hlist_add_head(&pp
->node
, &head
->chain
);
8379 /* Caller must hold hashbucket lock for this tb with local BH disabled */
8380 static void sctp_bucket_destroy(struct sctp_bind_bucket
*pp
)
8382 if (pp
&& hlist_empty(&pp
->owner
)) {
8383 __hlist_del(&pp
->node
);
8384 kmem_cache_free(sctp_bucket_cachep
, pp
);
8385 SCTP_DBG_OBJCNT_DEC(bind_bucket
);
8389 /* Release this socket's reference to a local port. */
8390 static inline void __sctp_put_port(struct sock
*sk
)
8392 struct sctp_bind_hashbucket
*head
=
8393 &sctp_port_hashtable
[sctp_phashfn(sock_net(sk
),
8394 inet_sk(sk
)->inet_num
)];
8395 struct sctp_bind_bucket
*pp
;
8397 spin_lock(&head
->lock
);
8398 pp
= sctp_sk(sk
)->bind_hash
;
8399 __sk_del_bind_node(sk
);
8400 sctp_sk(sk
)->bind_hash
= NULL
;
8401 inet_sk(sk
)->inet_num
= 0;
8402 sctp_bucket_destroy(pp
);
8403 spin_unlock(&head
->lock
);
8406 void sctp_put_port(struct sock
*sk
)
8409 __sctp_put_port(sk
);
8414 * The system picks an ephemeral port and choose an address set equivalent
8415 * to binding with a wildcard address.
8416 * One of those addresses will be the primary address for the association.
8417 * This automatically enables the multihoming capability of SCTP.
8419 static int sctp_autobind(struct sock
*sk
)
8421 union sctp_addr autoaddr
;
8425 /* Initialize a local sockaddr structure to INADDR_ANY. */
8426 af
= sctp_sk(sk
)->pf
->af
;
8428 port
= htons(inet_sk(sk
)->inet_num
);
8429 af
->inaddr_any(&autoaddr
, port
);
8431 return sctp_do_bind(sk
, &autoaddr
, af
->sockaddr_len
);
8434 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
8437 * 4.2 The cmsghdr Structure *
8439 * When ancillary data is sent or received, any number of ancillary data
8440 * objects can be specified by the msg_control and msg_controllen members of
8441 * the msghdr structure, because each object is preceded by
8442 * a cmsghdr structure defining the object's length (the cmsg_len member).
8443 * Historically Berkeley-derived implementations have passed only one object
8444 * at a time, but this API allows multiple objects to be
8445 * passed in a single call to sendmsg() or recvmsg(). The following example
8446 * shows two ancillary data objects in a control buffer.
8448 * |<--------------------------- msg_controllen -------------------------->|
8451 * |<----- ancillary data object ----->|<----- ancillary data object ----->|
8453 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
8456 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
8458 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
8461 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
8462 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
8464 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
8466 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
8473 static int sctp_msghdr_parse(const struct msghdr
*msg
, struct sctp_cmsgs
*cmsgs
)
8475 struct msghdr
*my_msg
= (struct msghdr
*)msg
;
8476 struct cmsghdr
*cmsg
;
8478 for_each_cmsghdr(cmsg
, my_msg
) {
8479 if (!CMSG_OK(my_msg
, cmsg
))
8482 /* Should we parse this header or ignore? */
8483 if (cmsg
->cmsg_level
!= IPPROTO_SCTP
)
8486 /* Strictly check lengths following example in SCM code. */
8487 switch (cmsg
->cmsg_type
) {
8489 /* SCTP Socket API Extension
8490 * 5.3.1 SCTP Initiation Structure (SCTP_INIT)
8492 * This cmsghdr structure provides information for
8493 * initializing new SCTP associations with sendmsg().
8494 * The SCTP_INITMSG socket option uses this same data
8495 * structure. This structure is not used for
8498 * cmsg_level cmsg_type cmsg_data[]
8499 * ------------ ------------ ----------------------
8500 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
8502 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_initmsg
)))
8505 cmsgs
->init
= CMSG_DATA(cmsg
);
8509 /* SCTP Socket API Extension
8510 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
8512 * This cmsghdr structure specifies SCTP options for
8513 * sendmsg() and describes SCTP header information
8514 * about a received message through recvmsg().
8516 * cmsg_level cmsg_type cmsg_data[]
8517 * ------------ ------------ ----------------------
8518 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
8520 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_sndrcvinfo
)))
8523 cmsgs
->srinfo
= CMSG_DATA(cmsg
);
8525 if (cmsgs
->srinfo
->sinfo_flags
&
8526 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
8527 SCTP_SACK_IMMEDIATELY
| SCTP_SENDALL
|
8528 SCTP_PR_SCTP_MASK
| SCTP_ABORT
| SCTP_EOF
))
8533 /* SCTP Socket API Extension
8534 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
8536 * This cmsghdr structure specifies SCTP options for
8537 * sendmsg(). This structure and SCTP_RCVINFO replaces
8538 * SCTP_SNDRCV which has been deprecated.
8540 * cmsg_level cmsg_type cmsg_data[]
8541 * ------------ ------------ ---------------------
8542 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo
8544 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_sndinfo
)))
8547 cmsgs
->sinfo
= CMSG_DATA(cmsg
);
8549 if (cmsgs
->sinfo
->snd_flags
&
8550 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
8551 SCTP_SACK_IMMEDIATELY
| SCTP_SENDALL
|
8552 SCTP_PR_SCTP_MASK
| SCTP_ABORT
| SCTP_EOF
))
8556 /* SCTP Socket API Extension
8557 * 5.3.7 SCTP PR-SCTP Information Structure (SCTP_PRINFO)
8559 * This cmsghdr structure specifies SCTP options for sendmsg().
8561 * cmsg_level cmsg_type cmsg_data[]
8562 * ------------ ------------ ---------------------
8563 * IPPROTO_SCTP SCTP_PRINFO struct sctp_prinfo
8565 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_prinfo
)))
8568 cmsgs
->prinfo
= CMSG_DATA(cmsg
);
8569 if (cmsgs
->prinfo
->pr_policy
& ~SCTP_PR_SCTP_MASK
)
8572 if (cmsgs
->prinfo
->pr_policy
== SCTP_PR_SCTP_NONE
)
8573 cmsgs
->prinfo
->pr_value
= 0;
8576 /* SCTP Socket API Extension
8577 * 5.3.8 SCTP AUTH Information Structure (SCTP_AUTHINFO)
8579 * This cmsghdr structure specifies SCTP options for sendmsg().
8581 * cmsg_level cmsg_type cmsg_data[]
8582 * ------------ ------------ ---------------------
8583 * IPPROTO_SCTP SCTP_AUTHINFO struct sctp_authinfo
8585 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_authinfo
)))
8588 cmsgs
->authinfo
= CMSG_DATA(cmsg
);
8590 case SCTP_DSTADDRV4
:
8591 case SCTP_DSTADDRV6
:
8592 /* SCTP Socket API Extension
8593 * 5.3.9/10 SCTP Destination IPv4/6 Address Structure (SCTP_DSTADDRV4/6)
8595 * This cmsghdr structure specifies SCTP options for sendmsg().
8597 * cmsg_level cmsg_type cmsg_data[]
8598 * ------------ ------------ ---------------------
8599 * IPPROTO_SCTP SCTP_DSTADDRV4 struct in_addr
8600 * ------------ ------------ ---------------------
8601 * IPPROTO_SCTP SCTP_DSTADDRV6 struct in6_addr
8603 cmsgs
->addrs_msg
= my_msg
;
8614 * Wait for a packet..
8615 * Note: This function is the same function as in core/datagram.c
8616 * with a few modifications to make lksctp work.
8618 static int sctp_wait_for_packet(struct sock
*sk
, int *err
, long *timeo_p
)
8623 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
8625 /* Socket errors? */
8626 error
= sock_error(sk
);
8630 if (!skb_queue_empty(&sk
->sk_receive_queue
))
8633 /* Socket shut down? */
8634 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
8637 /* Sequenced packets can come disconnected. If so we report the
8642 /* Is there a good reason to think that we may receive some data? */
8643 if (list_empty(&sctp_sk(sk
)->ep
->asocs
) && !sctp_sstate(sk
, LISTENING
))
8646 /* Handle signals. */
8647 if (signal_pending(current
))
8650 /* Let another process have a go. Since we are going to sleep
8651 * anyway. Note: This may cause odd behaviors if the message
8652 * does not fit in the user's buffer, but this seems to be the
8653 * only way to honor MSG_DONTWAIT realistically.
8656 *timeo_p
= schedule_timeout(*timeo_p
);
8660 finish_wait(sk_sleep(sk
), &wait
);
8664 error
= sock_intr_errno(*timeo_p
);
8667 finish_wait(sk_sleep(sk
), &wait
);
8672 /* Receive a datagram.
8673 * Note: This is pretty much the same routine as in core/datagram.c
8674 * with a few changes to make lksctp work.
8676 struct sk_buff
*sctp_skb_recv_datagram(struct sock
*sk
, int flags
,
8677 int noblock
, int *err
)
8680 struct sk_buff
*skb
;
8683 timeo
= sock_rcvtimeo(sk
, noblock
);
8685 pr_debug("%s: timeo:%ld, max:%ld\n", __func__
, timeo
,
8686 MAX_SCHEDULE_TIMEOUT
);
8689 /* Again only user level code calls this function,
8690 * so nothing interrupt level
8691 * will suddenly eat the receive_queue.
8693 * Look at current nfs client by the way...
8694 * However, this function was correct in any case. 8)
8696 if (flags
& MSG_PEEK
) {
8697 skb
= skb_peek(&sk
->sk_receive_queue
);
8699 refcount_inc(&skb
->users
);
8701 skb
= __skb_dequeue(&sk
->sk_receive_queue
);
8707 /* Caller is allowed not to check sk->sk_err before calling. */
8708 error
= sock_error(sk
);
8712 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
8715 if (sk_can_busy_loop(sk
)) {
8716 sk_busy_loop(sk
, noblock
);
8718 if (!skb_queue_empty(&sk
->sk_receive_queue
))
8722 /* User doesn't want to wait. */
8726 } while (sctp_wait_for_packet(sk
, err
, &timeo
) == 0);
8735 /* If sndbuf has changed, wake up per association sndbuf waiters. */
8736 static void __sctp_write_space(struct sctp_association
*asoc
)
8738 struct sock
*sk
= asoc
->base
.sk
;
8740 if (sctp_wspace(asoc
) <= 0)
8743 if (waitqueue_active(&asoc
->wait
))
8744 wake_up_interruptible(&asoc
->wait
);
8746 if (sctp_writeable(sk
)) {
8747 struct socket_wq
*wq
;
8750 wq
= rcu_dereference(sk
->sk_wq
);
8752 if (waitqueue_active(&wq
->wait
))
8753 wake_up_interruptible(&wq
->wait
);
8755 /* Note that we try to include the Async I/O support
8756 * here by modeling from the current TCP/UDP code.
8757 * We have not tested with it yet.
8759 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
))
8760 sock_wake_async(wq
, SOCK_WAKE_SPACE
, POLL_OUT
);
8766 static void sctp_wake_up_waiters(struct sock
*sk
,
8767 struct sctp_association
*asoc
)
8769 struct sctp_association
*tmp
= asoc
;
8771 /* We do accounting for the sndbuf space per association,
8772 * so we only need to wake our own association.
8774 if (asoc
->ep
->sndbuf_policy
)
8775 return __sctp_write_space(asoc
);
8777 /* If association goes down and is just flushing its
8778 * outq, then just normally notify others.
8780 if (asoc
->base
.dead
)
8781 return sctp_write_space(sk
);
8783 /* Accounting for the sndbuf space is per socket, so we
8784 * need to wake up others, try to be fair and in case of
8785 * other associations, let them have a go first instead
8786 * of just doing a sctp_write_space() call.
8788 * Note that we reach sctp_wake_up_waiters() only when
8789 * associations free up queued chunks, thus we are under
8790 * lock and the list of associations on a socket is
8791 * guaranteed not to change.
8793 for (tmp
= list_next_entry(tmp
, asocs
); 1;
8794 tmp
= list_next_entry(tmp
, asocs
)) {
8795 /* Manually skip the head element. */
8796 if (&tmp
->asocs
== &((sctp_sk(sk
))->ep
->asocs
))
8798 /* Wake up association. */
8799 __sctp_write_space(tmp
);
8800 /* We've reached the end. */
8806 /* Do accounting for the sndbuf space.
8807 * Decrement the used sndbuf space of the corresponding association by the
8808 * data size which was just transmitted(freed).
8810 static void sctp_wfree(struct sk_buff
*skb
)
8812 struct sctp_chunk
*chunk
= skb_shinfo(skb
)->destructor_arg
;
8813 struct sctp_association
*asoc
= chunk
->asoc
;
8814 struct sock
*sk
= asoc
->base
.sk
;
8816 sk_mem_uncharge(sk
, skb
->truesize
);
8817 sk
->sk_wmem_queued
-= skb
->truesize
+ sizeof(struct sctp_chunk
);
8818 asoc
->sndbuf_used
-= skb
->truesize
+ sizeof(struct sctp_chunk
);
8819 WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk
),
8820 &sk
->sk_wmem_alloc
));
8823 struct sctp_shared_key
*shkey
= chunk
->shkey
;
8825 /* refcnt == 2 and !list_empty mean after this release, it's
8826 * not being used anywhere, and it's time to notify userland
8827 * that this shkey can be freed if it's been deactivated.
8829 if (shkey
->deactivated
&& !list_empty(&shkey
->key_list
) &&
8830 refcount_read(&shkey
->refcnt
) == 2) {
8831 struct sctp_ulpevent
*ev
;
8833 ev
= sctp_ulpevent_make_authkey(asoc
, shkey
->key_id
,
8837 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
, ev
);
8839 sctp_auth_shkey_release(chunk
->shkey
);
8843 sctp_wake_up_waiters(sk
, asoc
);
8845 sctp_association_put(asoc
);
8848 /* Do accounting for the receive space on the socket.
8849 * Accounting for the association is done in ulpevent.c
8850 * We set this as a destructor for the cloned data skbs so that
8851 * accounting is done at the correct time.
8853 void sctp_sock_rfree(struct sk_buff
*skb
)
8855 struct sock
*sk
= skb
->sk
;
8856 struct sctp_ulpevent
*event
= sctp_skb2event(skb
);
8858 atomic_sub(event
->rmem_len
, &sk
->sk_rmem_alloc
);
8861 * Mimic the behavior of sock_rfree
8863 sk_mem_uncharge(sk
, event
->rmem_len
);
8867 /* Helper function to wait for space in the sndbuf. */
8868 static int sctp_wait_for_sndbuf(struct sctp_association
*asoc
, long *timeo_p
,
8871 struct sock
*sk
= asoc
->base
.sk
;
8872 long current_timeo
= *timeo_p
;
8876 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__
, asoc
,
8879 /* Increment the association's refcnt. */
8880 sctp_association_hold(asoc
);
8882 /* Wait on the association specific sndbuf space. */
8884 prepare_to_wait_exclusive(&asoc
->wait
, &wait
,
8885 TASK_INTERRUPTIBLE
);
8886 if (asoc
->base
.dead
)
8890 if (sk
->sk_err
|| asoc
->state
>= SCTP_STATE_SHUTDOWN_PENDING
)
8892 if (signal_pending(current
))
8893 goto do_interrupted
;
8894 if ((int)msg_len
<= sctp_wspace(asoc
))
8897 /* Let another process have a go. Since we are going
8901 current_timeo
= schedule_timeout(current_timeo
);
8903 if (sk
!= asoc
->base
.sk
)
8906 *timeo_p
= current_timeo
;
8910 finish_wait(&asoc
->wait
, &wait
);
8912 /* Release the association's refcnt. */
8913 sctp_association_put(asoc
);
8926 err
= sock_intr_errno(*timeo_p
);
8934 void sctp_data_ready(struct sock
*sk
)
8936 struct socket_wq
*wq
;
8939 wq
= rcu_dereference(sk
->sk_wq
);
8940 if (skwq_has_sleeper(wq
))
8941 wake_up_interruptible_sync_poll(&wq
->wait
, EPOLLIN
|
8942 EPOLLRDNORM
| EPOLLRDBAND
);
8943 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
8947 /* If socket sndbuf has changed, wake up all per association waiters. */
8948 void sctp_write_space(struct sock
*sk
)
8950 struct sctp_association
*asoc
;
8952 /* Wake up the tasks in each wait queue. */
8953 list_for_each_entry(asoc
, &((sctp_sk(sk
))->ep
->asocs
), asocs
) {
8954 __sctp_write_space(asoc
);
8958 /* Is there any sndbuf space available on the socket?
8960 * Note that sk_wmem_alloc is the sum of the send buffers on all of the
8961 * associations on the same socket. For a UDP-style socket with
8962 * multiple associations, it is possible for it to be "unwriteable"
8963 * prematurely. I assume that this is acceptable because
8964 * a premature "unwriteable" is better than an accidental "writeable" which
8965 * would cause an unwanted block under certain circumstances. For the 1-1
8966 * UDP-style sockets or TCP-style sockets, this code should work.
8969 static bool sctp_writeable(struct sock
*sk
)
8971 return sk
->sk_sndbuf
> sk
->sk_wmem_queued
;
8974 /* Wait for an association to go into ESTABLISHED state. If timeout is 0,
8975 * returns immediately with EINPROGRESS.
8977 static int sctp_wait_for_connect(struct sctp_association
*asoc
, long *timeo_p
)
8979 struct sock
*sk
= asoc
->base
.sk
;
8981 long current_timeo
= *timeo_p
;
8984 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__
, asoc
, *timeo_p
);
8986 /* Increment the association's refcnt. */
8987 sctp_association_hold(asoc
);
8990 prepare_to_wait_exclusive(&asoc
->wait
, &wait
,
8991 TASK_INTERRUPTIBLE
);
8994 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
8996 if (sk
->sk_err
|| asoc
->state
>= SCTP_STATE_SHUTDOWN_PENDING
||
8999 if (signal_pending(current
))
9000 goto do_interrupted
;
9002 if (sctp_state(asoc
, ESTABLISHED
))
9005 /* Let another process have a go. Since we are going
9009 current_timeo
= schedule_timeout(current_timeo
);
9012 *timeo_p
= current_timeo
;
9016 finish_wait(&asoc
->wait
, &wait
);
9018 /* Release the association's refcnt. */
9019 sctp_association_put(asoc
);
9024 if (asoc
->init_err_counter
+ 1 > asoc
->max_init_attempts
)
9027 err
= -ECONNREFUSED
;
9031 err
= sock_intr_errno(*timeo_p
);
9039 static int sctp_wait_for_accept(struct sock
*sk
, long timeo
)
9041 struct sctp_endpoint
*ep
;
9045 ep
= sctp_sk(sk
)->ep
;
9049 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
,
9050 TASK_INTERRUPTIBLE
);
9052 if (list_empty(&ep
->asocs
)) {
9054 timeo
= schedule_timeout(timeo
);
9059 if (!sctp_sstate(sk
, LISTENING
))
9063 if (!list_empty(&ep
->asocs
))
9066 err
= sock_intr_errno(timeo
);
9067 if (signal_pending(current
))
9075 finish_wait(sk_sleep(sk
), &wait
);
9080 static void sctp_wait_for_close(struct sock
*sk
, long timeout
)
9085 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
9086 if (list_empty(&sctp_sk(sk
)->ep
->asocs
))
9089 timeout
= schedule_timeout(timeout
);
9091 } while (!signal_pending(current
) && timeout
);
9093 finish_wait(sk_sleep(sk
), &wait
);
9096 static void sctp_skb_set_owner_r_frag(struct sk_buff
*skb
, struct sock
*sk
)
9098 struct sk_buff
*frag
;
9103 /* Don't forget the fragments. */
9104 skb_walk_frags(skb
, frag
)
9105 sctp_skb_set_owner_r_frag(frag
, sk
);
9108 sctp_skb_set_owner_r(skb
, sk
);
9111 void sctp_copy_sock(struct sock
*newsk
, struct sock
*sk
,
9112 struct sctp_association
*asoc
)
9114 struct inet_sock
*inet
= inet_sk(sk
);
9115 struct inet_sock
*newinet
;
9116 struct sctp_sock
*sp
= sctp_sk(sk
);
9117 struct sctp_endpoint
*ep
= sp
->ep
;
9119 newsk
->sk_type
= sk
->sk_type
;
9120 newsk
->sk_bound_dev_if
= sk
->sk_bound_dev_if
;
9121 newsk
->sk_flags
= sk
->sk_flags
;
9122 newsk
->sk_tsflags
= sk
->sk_tsflags
;
9123 newsk
->sk_no_check_tx
= sk
->sk_no_check_tx
;
9124 newsk
->sk_no_check_rx
= sk
->sk_no_check_rx
;
9125 newsk
->sk_reuse
= sk
->sk_reuse
;
9126 sctp_sk(newsk
)->reuse
= sp
->reuse
;
9128 newsk
->sk_shutdown
= sk
->sk_shutdown
;
9129 newsk
->sk_destruct
= sctp_destruct_sock
;
9130 newsk
->sk_family
= sk
->sk_family
;
9131 newsk
->sk_protocol
= IPPROTO_SCTP
;
9132 newsk
->sk_backlog_rcv
= sk
->sk_prot
->backlog_rcv
;
9133 newsk
->sk_sndbuf
= sk
->sk_sndbuf
;
9134 newsk
->sk_rcvbuf
= sk
->sk_rcvbuf
;
9135 newsk
->sk_lingertime
= sk
->sk_lingertime
;
9136 newsk
->sk_rcvtimeo
= sk
->sk_rcvtimeo
;
9137 newsk
->sk_sndtimeo
= sk
->sk_sndtimeo
;
9138 newsk
->sk_rxhash
= sk
->sk_rxhash
;
9140 newinet
= inet_sk(newsk
);
9142 /* Initialize sk's sport, dport, rcv_saddr and daddr for
9143 * getsockname() and getpeername()
9145 newinet
->inet_sport
= inet
->inet_sport
;
9146 newinet
->inet_saddr
= inet
->inet_saddr
;
9147 newinet
->inet_rcv_saddr
= inet
->inet_rcv_saddr
;
9148 newinet
->inet_dport
= htons(asoc
->peer
.port
);
9149 newinet
->pmtudisc
= inet
->pmtudisc
;
9150 newinet
->inet_id
= asoc
->next_tsn
^ jiffies
;
9152 newinet
->uc_ttl
= inet
->uc_ttl
;
9153 newinet
->mc_loop
= 1;
9154 newinet
->mc_ttl
= 1;
9155 newinet
->mc_index
= 0;
9156 newinet
->mc_list
= NULL
;
9158 if (newsk
->sk_flags
& SK_FLAGS_TIMESTAMP
)
9159 net_enable_timestamp();
9161 /* Set newsk security attributes from orginal sk and connection
9162 * security attribute from ep.
9164 security_sctp_sk_clone(ep
, sk
, newsk
);
9167 static inline void sctp_copy_descendant(struct sock
*sk_to
,
9168 const struct sock
*sk_from
)
9170 int ancestor_size
= sizeof(struct inet_sock
) +
9171 sizeof(struct sctp_sock
) -
9172 offsetof(struct sctp_sock
, auto_asconf_list
);
9174 if (sk_from
->sk_family
== PF_INET6
)
9175 ancestor_size
+= sizeof(struct ipv6_pinfo
);
9177 __inet_sk_copy_descendant(sk_to
, sk_from
, ancestor_size
);
9180 /* Populate the fields of the newsk from the oldsk and migrate the assoc
9181 * and its messages to the newsk.
9183 static int sctp_sock_migrate(struct sock
*oldsk
, struct sock
*newsk
,
9184 struct sctp_association
*assoc
,
9185 enum sctp_socket_type type
)
9187 struct sctp_sock
*oldsp
= sctp_sk(oldsk
);
9188 struct sctp_sock
*newsp
= sctp_sk(newsk
);
9189 struct sctp_bind_bucket
*pp
; /* hash list port iterator */
9190 struct sctp_endpoint
*newep
= newsp
->ep
;
9191 struct sk_buff
*skb
, *tmp
;
9192 struct sctp_ulpevent
*event
;
9193 struct sctp_bind_hashbucket
*head
;
9196 /* Migrate socket buffer sizes and all the socket level options to the
9199 newsk
->sk_sndbuf
= oldsk
->sk_sndbuf
;
9200 newsk
->sk_rcvbuf
= oldsk
->sk_rcvbuf
;
9201 /* Brute force copy old sctp opt. */
9202 sctp_copy_descendant(newsk
, oldsk
);
9204 /* Restore the ep value that was overwritten with the above structure
9210 /* Hook this new socket in to the bind_hash list. */
9211 head
= &sctp_port_hashtable
[sctp_phashfn(sock_net(oldsk
),
9212 inet_sk(oldsk
)->inet_num
)];
9213 spin_lock_bh(&head
->lock
);
9214 pp
= sctp_sk(oldsk
)->bind_hash
;
9215 sk_add_bind_node(newsk
, &pp
->owner
);
9216 sctp_sk(newsk
)->bind_hash
= pp
;
9217 inet_sk(newsk
)->inet_num
= inet_sk(oldsk
)->inet_num
;
9218 spin_unlock_bh(&head
->lock
);
9220 /* Copy the bind_addr list from the original endpoint to the new
9221 * endpoint so that we can handle restarts properly
9223 err
= sctp_bind_addr_dup(&newsp
->ep
->base
.bind_addr
,
9224 &oldsp
->ep
->base
.bind_addr
, GFP_KERNEL
);
9228 /* New ep's auth_hmacs should be set if old ep's is set, in case
9229 * that net->sctp.auth_enable has been changed to 0 by users and
9230 * new ep's auth_hmacs couldn't be set in sctp_endpoint_init().
9232 if (oldsp
->ep
->auth_hmacs
) {
9233 err
= sctp_auth_init_hmacs(newsp
->ep
, GFP_KERNEL
);
9238 /* Move any messages in the old socket's receive queue that are for the
9239 * peeled off association to the new socket's receive queue.
9241 sctp_skb_for_each(skb
, &oldsk
->sk_receive_queue
, tmp
) {
9242 event
= sctp_skb2event(skb
);
9243 if (event
->asoc
== assoc
) {
9244 __skb_unlink(skb
, &oldsk
->sk_receive_queue
);
9245 __skb_queue_tail(&newsk
->sk_receive_queue
, skb
);
9246 sctp_skb_set_owner_r_frag(skb
, newsk
);
9250 /* Clean up any messages pending delivery due to partial
9251 * delivery. Three cases:
9252 * 1) No partial deliver; no work.
9253 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
9254 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
9256 skb_queue_head_init(&newsp
->pd_lobby
);
9257 atomic_set(&sctp_sk(newsk
)->pd_mode
, assoc
->ulpq
.pd_mode
);
9259 if (atomic_read(&sctp_sk(oldsk
)->pd_mode
)) {
9260 struct sk_buff_head
*queue
;
9262 /* Decide which queue to move pd_lobby skbs to. */
9263 if (assoc
->ulpq
.pd_mode
) {
9264 queue
= &newsp
->pd_lobby
;
9266 queue
= &newsk
->sk_receive_queue
;
9268 /* Walk through the pd_lobby, looking for skbs that
9269 * need moved to the new socket.
9271 sctp_skb_for_each(skb
, &oldsp
->pd_lobby
, tmp
) {
9272 event
= sctp_skb2event(skb
);
9273 if (event
->asoc
== assoc
) {
9274 __skb_unlink(skb
, &oldsp
->pd_lobby
);
9275 __skb_queue_tail(queue
, skb
);
9276 sctp_skb_set_owner_r_frag(skb
, newsk
);
9280 /* Clear up any skbs waiting for the partial
9281 * delivery to finish.
9283 if (assoc
->ulpq
.pd_mode
)
9284 sctp_clear_pd(oldsk
, NULL
);
9288 sctp_for_each_rx_skb(assoc
, newsk
, sctp_skb_set_owner_r_frag
);
9290 /* Set the type of socket to indicate that it is peeled off from the
9291 * original UDP-style socket or created with the accept() call on a
9292 * TCP-style socket..
9296 /* Mark the new socket "in-use" by the user so that any packets
9297 * that may arrive on the association after we've moved it are
9298 * queued to the backlog. This prevents a potential race between
9299 * backlog processing on the old socket and new-packet processing
9300 * on the new socket.
9302 * The caller has just allocated newsk so we can guarantee that other
9303 * paths won't try to lock it and then oldsk.
9305 lock_sock_nested(newsk
, SINGLE_DEPTH_NESTING
);
9306 sctp_for_each_tx_datachunk(assoc
, sctp_clear_owner_w
);
9307 sctp_assoc_migrate(assoc
, newsk
);
9308 sctp_for_each_tx_datachunk(assoc
, sctp_set_owner_w
);
9310 /* If the association on the newsk is already closed before accept()
9311 * is called, set RCV_SHUTDOWN flag.
9313 if (sctp_state(assoc
, CLOSED
) && sctp_style(newsk
, TCP
)) {
9314 inet_sk_set_state(newsk
, SCTP_SS_CLOSED
);
9315 newsk
->sk_shutdown
|= RCV_SHUTDOWN
;
9317 inet_sk_set_state(newsk
, SCTP_SS_ESTABLISHED
);
9320 release_sock(newsk
);
9326 /* This proto struct describes the ULP interface for SCTP. */
9327 struct proto sctp_prot
= {
9329 .owner
= THIS_MODULE
,
9330 .close
= sctp_close
,
9331 .disconnect
= sctp_disconnect
,
9332 .accept
= sctp_accept
,
9333 .ioctl
= sctp_ioctl
,
9334 .init
= sctp_init_sock
,
9335 .destroy
= sctp_destroy_sock
,
9336 .shutdown
= sctp_shutdown
,
9337 .setsockopt
= sctp_setsockopt
,
9338 .getsockopt
= sctp_getsockopt
,
9339 .sendmsg
= sctp_sendmsg
,
9340 .recvmsg
= sctp_recvmsg
,
9342 .backlog_rcv
= sctp_backlog_rcv
,
9344 .unhash
= sctp_unhash
,
9345 .get_port
= sctp_get_port
,
9346 .obj_size
= sizeof(struct sctp_sock
),
9347 .useroffset
= offsetof(struct sctp_sock
, subscribe
),
9348 .usersize
= offsetof(struct sctp_sock
, initmsg
) -
9349 offsetof(struct sctp_sock
, subscribe
) +
9350 sizeof_field(struct sctp_sock
, initmsg
),
9351 .sysctl_mem
= sysctl_sctp_mem
,
9352 .sysctl_rmem
= sysctl_sctp_rmem
,
9353 .sysctl_wmem
= sysctl_sctp_wmem
,
9354 .memory_pressure
= &sctp_memory_pressure
,
9355 .enter_memory_pressure
= sctp_enter_memory_pressure
,
9356 .memory_allocated
= &sctp_memory_allocated
,
9357 .sockets_allocated
= &sctp_sockets_allocated
,
9360 #if IS_ENABLED(CONFIG_IPV6)
9362 #include <net/transp_v6.h>
9363 static void sctp_v6_destroy_sock(struct sock
*sk
)
9365 sctp_destroy_sock(sk
);
9366 inet6_destroy_sock(sk
);
9369 struct proto sctpv6_prot
= {
9371 .owner
= THIS_MODULE
,
9372 .close
= sctp_close
,
9373 .disconnect
= sctp_disconnect
,
9374 .accept
= sctp_accept
,
9375 .ioctl
= sctp_ioctl
,
9376 .init
= sctp_init_sock
,
9377 .destroy
= sctp_v6_destroy_sock
,
9378 .shutdown
= sctp_shutdown
,
9379 .setsockopt
= sctp_setsockopt
,
9380 .getsockopt
= sctp_getsockopt
,
9381 .sendmsg
= sctp_sendmsg
,
9382 .recvmsg
= sctp_recvmsg
,
9384 .backlog_rcv
= sctp_backlog_rcv
,
9386 .unhash
= sctp_unhash
,
9387 .get_port
= sctp_get_port
,
9388 .obj_size
= sizeof(struct sctp6_sock
),
9389 .useroffset
= offsetof(struct sctp6_sock
, sctp
.subscribe
),
9390 .usersize
= offsetof(struct sctp6_sock
, sctp
.initmsg
) -
9391 offsetof(struct sctp6_sock
, sctp
.subscribe
) +
9392 sizeof_field(struct sctp6_sock
, sctp
.initmsg
),
9393 .sysctl_mem
= sysctl_sctp_mem
,
9394 .sysctl_rmem
= sysctl_sctp_rmem
,
9395 .sysctl_wmem
= sysctl_sctp_wmem
,
9396 .memory_pressure
= &sctp_memory_pressure
,
9397 .enter_memory_pressure
= sctp_enter_memory_pressure
,
9398 .memory_allocated
= &sctp_memory_allocated
,
9399 .sockets_allocated
= &sctp_sockets_allocated
,
9401 #endif /* IS_ENABLED(CONFIG_IPV6) */