1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
6 * Copyright (c) 2001-2002 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This file is part of the SCTP kernel implementation
11 * These functions interface with the sockets layer to implement the
12 * SCTP Extensions for the Sockets API.
14 * Note that the descriptions from the specification are USER level
15 * functions--this file is the functions which populate the struct proto
16 * for SCTP which is the BOTTOM of the sockets interface.
18 * This SCTP implementation is free software;
19 * you can redistribute it and/or modify it under the terms of
20 * the GNU General Public License as published by
21 * the Free Software Foundation; either version 2, or (at your option)
24 * This SCTP implementation is distributed in the hope that it
25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
26 * ************************
27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
28 * See the GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with GNU CC; see the file COPYING. If not, see
32 * <http://www.gnu.org/licenses/>.
34 * Please send any bug reports or fixes you make to the
36 * lksctp developers <linux-sctp@vger.kernel.org>
38 * Written or modified by:
39 * La Monte H.P. Yarroll <piggy@acm.org>
40 * Narasimha Budihal <narsi@refcode.org>
41 * Karl Knutson <karl@athena.chicago.il.us>
42 * Jon Grimm <jgrimm@us.ibm.com>
43 * Xingang Guo <xingang.guo@intel.com>
44 * Daisy Chang <daisyc@us.ibm.com>
45 * Sridhar Samudrala <samudrala@us.ibm.com>
46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
47 * Ardelle Fan <ardelle.fan@intel.com>
48 * Ryan Layer <rmlayer@us.ibm.com>
49 * Anup Pemmaiah <pemmaiah@cc.usu.edu>
50 * Kevin Gao <kevin.gao@intel.com>
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55 #include <crypto/hash.h>
56 #include <linux/types.h>
57 #include <linux/kernel.h>
58 #include <linux/wait.h>
59 #include <linux/time.h>
60 #include <linux/sched/signal.h>
62 #include <linux/capability.h>
63 #include <linux/fcntl.h>
64 #include <linux/poll.h>
65 #include <linux/init.h>
66 #include <linux/slab.h>
67 #include <linux/file.h>
68 #include <linux/compat.h>
72 #include <net/route.h>
74 #include <net/inet_common.h>
75 #include <net/busy_poll.h>
77 #include <linux/socket.h> /* for sa_family_t */
78 #include <linux/export.h>
80 #include <net/sctp/sctp.h>
81 #include <net/sctp/sm.h>
82 #include <net/sctp/stream_sched.h>
84 /* Forward declarations for internal helper functions. */
85 static int sctp_writeable(struct sock
*sk
);
86 static void sctp_wfree(struct sk_buff
*skb
);
87 static int sctp_wait_for_sndbuf(struct sctp_association
*asoc
, long *timeo_p
,
89 static int sctp_wait_for_packet(struct sock
*sk
, int *err
, long *timeo_p
);
90 static int sctp_wait_for_connect(struct sctp_association
*, long *timeo_p
);
91 static int sctp_wait_for_accept(struct sock
*sk
, long timeo
);
92 static void sctp_wait_for_close(struct sock
*sk
, long timeo
);
93 static void sctp_destruct_sock(struct sock
*sk
);
94 static struct sctp_af
*sctp_sockaddr_af(struct sctp_sock
*opt
,
95 union sctp_addr
*addr
, int len
);
96 static int sctp_bindx_add(struct sock
*, struct sockaddr
*, int);
97 static int sctp_bindx_rem(struct sock
*, struct sockaddr
*, int);
98 static int sctp_send_asconf_add_ip(struct sock
*, struct sockaddr
*, int);
99 static int sctp_send_asconf_del_ip(struct sock
*, struct sockaddr
*, int);
100 static int sctp_send_asconf(struct sctp_association
*asoc
,
101 struct sctp_chunk
*chunk
);
102 static int sctp_do_bind(struct sock
*, union sctp_addr
*, int);
103 static int sctp_autobind(struct sock
*sk
);
104 static void sctp_sock_migrate(struct sock
*oldsk
, struct sock
*newsk
,
105 struct sctp_association
*assoc
,
106 enum sctp_socket_type type
);
108 static unsigned long sctp_memory_pressure
;
109 static atomic_long_t sctp_memory_allocated
;
110 struct percpu_counter sctp_sockets_allocated
;
112 static void sctp_enter_memory_pressure(struct sock
*sk
)
114 sctp_memory_pressure
= 1;
118 /* Get the sndbuf space available at the time on the association. */
119 static inline int sctp_wspace(struct sctp_association
*asoc
)
123 if (asoc
->ep
->sndbuf_policy
)
124 amt
= asoc
->sndbuf_used
;
126 amt
= sk_wmem_alloc_get(asoc
->base
.sk
);
128 if (amt
>= asoc
->base
.sk
->sk_sndbuf
) {
129 if (asoc
->base
.sk
->sk_userlocks
& SOCK_SNDBUF_LOCK
)
132 amt
= sk_stream_wspace(asoc
->base
.sk
);
137 amt
= asoc
->base
.sk
->sk_sndbuf
- amt
;
142 /* Increment the used sndbuf space count of the corresponding association by
143 * the size of the outgoing data chunk.
144 * Also, set the skb destructor for sndbuf accounting later.
146 * Since it is always 1-1 between chunk and skb, and also a new skb is always
147 * allocated for chunk bundling in sctp_packet_transmit(), we can use the
148 * destructor in the data chunk skb for the purpose of the sndbuf space
151 static inline void sctp_set_owner_w(struct sctp_chunk
*chunk
)
153 struct sctp_association
*asoc
= chunk
->asoc
;
154 struct sock
*sk
= asoc
->base
.sk
;
156 /* The sndbuf space is tracked per association. */
157 sctp_association_hold(asoc
);
159 skb_set_owner_w(chunk
->skb
, sk
);
161 chunk
->skb
->destructor
= sctp_wfree
;
162 /* Save the chunk pointer in skb for sctp_wfree to use later. */
163 skb_shinfo(chunk
->skb
)->destructor_arg
= chunk
;
165 asoc
->sndbuf_used
+= SCTP_DATA_SNDSIZE(chunk
) +
166 sizeof(struct sk_buff
) +
167 sizeof(struct sctp_chunk
);
169 refcount_add(sizeof(struct sctp_chunk
), &sk
->sk_wmem_alloc
);
170 sk
->sk_wmem_queued
+= chunk
->skb
->truesize
;
171 sk_mem_charge(sk
, chunk
->skb
->truesize
);
174 static void sctp_clear_owner_w(struct sctp_chunk
*chunk
)
176 skb_orphan(chunk
->skb
);
179 static void sctp_for_each_tx_datachunk(struct sctp_association
*asoc
,
180 void (*cb
)(struct sctp_chunk
*))
183 struct sctp_outq
*q
= &asoc
->outqueue
;
184 struct sctp_transport
*t
;
185 struct sctp_chunk
*chunk
;
187 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
, transports
)
188 list_for_each_entry(chunk
, &t
->transmitted
, transmitted_list
)
191 list_for_each_entry(chunk
, &q
->retransmit
, transmitted_list
)
194 list_for_each_entry(chunk
, &q
->sacked
, transmitted_list
)
197 list_for_each_entry(chunk
, &q
->abandoned
, transmitted_list
)
200 list_for_each_entry(chunk
, &q
->out_chunk_list
, list
)
204 static void sctp_for_each_rx_skb(struct sctp_association
*asoc
, struct sock
*sk
,
205 void (*cb
)(struct sk_buff
*, struct sock
*))
208 struct sk_buff
*skb
, *tmp
;
210 sctp_skb_for_each(skb
, &asoc
->ulpq
.lobby
, tmp
)
213 sctp_skb_for_each(skb
, &asoc
->ulpq
.reasm
, tmp
)
216 sctp_skb_for_each(skb
, &asoc
->ulpq
.reasm_uo
, tmp
)
220 /* Verify that this is a valid address. */
221 static inline int sctp_verify_addr(struct sock
*sk
, union sctp_addr
*addr
,
226 /* Verify basic sockaddr. */
227 af
= sctp_sockaddr_af(sctp_sk(sk
), addr
, len
);
231 /* Is this a valid SCTP address? */
232 if (!af
->addr_valid(addr
, sctp_sk(sk
), NULL
))
235 if (!sctp_sk(sk
)->pf
->send_verify(sctp_sk(sk
), (addr
)))
241 /* Look up the association by its id. If this is not a UDP-style
242 * socket, the ID field is always ignored.
244 struct sctp_association
*sctp_id2assoc(struct sock
*sk
, sctp_assoc_t id
)
246 struct sctp_association
*asoc
= NULL
;
248 /* If this is not a UDP-style socket, assoc id should be ignored. */
249 if (!sctp_style(sk
, UDP
)) {
250 /* Return NULL if the socket state is not ESTABLISHED. It
251 * could be a TCP-style listening socket or a socket which
252 * hasn't yet called connect() to establish an association.
254 if (!sctp_sstate(sk
, ESTABLISHED
) && !sctp_sstate(sk
, CLOSING
))
257 /* Get the first and the only association from the list. */
258 if (!list_empty(&sctp_sk(sk
)->ep
->asocs
))
259 asoc
= list_entry(sctp_sk(sk
)->ep
->asocs
.next
,
260 struct sctp_association
, asocs
);
264 /* Otherwise this is a UDP-style socket. */
265 if (!id
|| (id
== (sctp_assoc_t
)-1))
268 spin_lock_bh(&sctp_assocs_id_lock
);
269 asoc
= (struct sctp_association
*)idr_find(&sctp_assocs_id
, (int)id
);
270 spin_unlock_bh(&sctp_assocs_id_lock
);
272 if (!asoc
|| (asoc
->base
.sk
!= sk
) || asoc
->base
.dead
)
278 /* Look up the transport from an address and an assoc id. If both address and
279 * id are specified, the associations matching the address and the id should be
282 static struct sctp_transport
*sctp_addr_id2transport(struct sock
*sk
,
283 struct sockaddr_storage
*addr
,
286 struct sctp_association
*addr_asoc
= NULL
, *id_asoc
= NULL
;
287 struct sctp_af
*af
= sctp_get_af_specific(addr
->ss_family
);
288 union sctp_addr
*laddr
= (union sctp_addr
*)addr
;
289 struct sctp_transport
*transport
;
291 if (!af
|| sctp_verify_addr(sk
, laddr
, af
->sockaddr_len
))
294 addr_asoc
= sctp_endpoint_lookup_assoc(sctp_sk(sk
)->ep
,
301 id_asoc
= sctp_id2assoc(sk
, id
);
302 if (id_asoc
&& (id_asoc
!= addr_asoc
))
305 sctp_get_pf_specific(sk
->sk_family
)->addr_to_user(sctp_sk(sk
),
306 (union sctp_addr
*)addr
);
311 /* API 3.1.2 bind() - UDP Style Syntax
312 * The syntax of bind() is,
314 * ret = bind(int sd, struct sockaddr *addr, int addrlen);
316 * sd - the socket descriptor returned by socket().
317 * addr - the address structure (struct sockaddr_in or struct
318 * sockaddr_in6 [RFC 2553]),
319 * addr_len - the size of the address structure.
321 static int sctp_bind(struct sock
*sk
, struct sockaddr
*addr
, int addr_len
)
327 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__
, sk
,
330 /* Disallow binding twice. */
331 if (!sctp_sk(sk
)->ep
->base
.bind_addr
.port
)
332 retval
= sctp_do_bind(sk
, (union sctp_addr
*)addr
,
342 static long sctp_get_port_local(struct sock
*, union sctp_addr
*);
344 /* Verify this is a valid sockaddr. */
345 static struct sctp_af
*sctp_sockaddr_af(struct sctp_sock
*opt
,
346 union sctp_addr
*addr
, int len
)
350 /* Check minimum size. */
351 if (len
< sizeof (struct sockaddr
))
354 if (!opt
->pf
->af_supported(addr
->sa
.sa_family
, opt
))
357 /* V4 mapped address are really of AF_INET family */
358 if (addr
->sa
.sa_family
== AF_INET6
&&
359 ipv6_addr_v4mapped(&addr
->v6
.sin6_addr
) &&
360 !opt
->pf
->af_supported(AF_INET
, opt
))
363 /* If we get this far, af is valid. */
364 af
= sctp_get_af_specific(addr
->sa
.sa_family
);
366 if (len
< af
->sockaddr_len
)
372 /* Bind a local address either to an endpoint or to an association. */
373 static int sctp_do_bind(struct sock
*sk
, union sctp_addr
*addr
, int len
)
375 struct net
*net
= sock_net(sk
);
376 struct sctp_sock
*sp
= sctp_sk(sk
);
377 struct sctp_endpoint
*ep
= sp
->ep
;
378 struct sctp_bind_addr
*bp
= &ep
->base
.bind_addr
;
383 /* Common sockaddr verification. */
384 af
= sctp_sockaddr_af(sp
, addr
, len
);
386 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
387 __func__
, sk
, addr
, len
);
391 snum
= ntohs(addr
->v4
.sin_port
);
393 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
394 __func__
, sk
, &addr
->sa
, bp
->port
, snum
, len
);
396 /* PF specific bind() address verification. */
397 if (!sp
->pf
->bind_verify(sp
, addr
))
398 return -EADDRNOTAVAIL
;
400 /* We must either be unbound, or bind to the same port.
401 * It's OK to allow 0 ports if we are already bound.
402 * We'll just inhert an already bound port in this case
407 else if (snum
!= bp
->port
) {
408 pr_debug("%s: new port %d doesn't match existing port "
409 "%d\n", __func__
, snum
, bp
->port
);
414 if (snum
&& snum
< inet_prot_sock(net
) &&
415 !ns_capable(net
->user_ns
, CAP_NET_BIND_SERVICE
))
418 /* See if the address matches any of the addresses we may have
419 * already bound before checking against other endpoints.
421 if (sctp_bind_addr_match(bp
, addr
, sp
))
424 /* Make sure we are allowed to bind here.
425 * The function sctp_get_port_local() does duplicate address
428 addr
->v4
.sin_port
= htons(snum
);
429 if ((ret
= sctp_get_port_local(sk
, addr
))) {
433 /* Refresh ephemeral port. */
435 bp
->port
= inet_sk(sk
)->inet_num
;
437 /* Add the address to the bind address list.
438 * Use GFP_ATOMIC since BHs will be disabled.
440 ret
= sctp_add_bind_addr(bp
, addr
, af
->sockaddr_len
,
441 SCTP_ADDR_SRC
, GFP_ATOMIC
);
443 /* Copy back into socket for getsockname() use. */
445 inet_sk(sk
)->inet_sport
= htons(inet_sk(sk
)->inet_num
);
446 sp
->pf
->to_sk_saddr(addr
, sk
);
452 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
454 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
455 * at any one time. If a sender, after sending an ASCONF chunk, decides
456 * it needs to transfer another ASCONF Chunk, it MUST wait until the
457 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
458 * subsequent ASCONF. Note this restriction binds each side, so at any
459 * time two ASCONF may be in-transit on any given association (one sent
460 * from each endpoint).
462 static int sctp_send_asconf(struct sctp_association
*asoc
,
463 struct sctp_chunk
*chunk
)
465 struct net
*net
= sock_net(asoc
->base
.sk
);
468 /* If there is an outstanding ASCONF chunk, queue it for later
471 if (asoc
->addip_last_asconf
) {
472 list_add_tail(&chunk
->list
, &asoc
->addip_chunk_list
);
476 /* Hold the chunk until an ASCONF_ACK is received. */
477 sctp_chunk_hold(chunk
);
478 retval
= sctp_primitive_ASCONF(net
, asoc
, chunk
);
480 sctp_chunk_free(chunk
);
482 asoc
->addip_last_asconf
= chunk
;
488 /* Add a list of addresses as bind addresses to local endpoint or
491 * Basically run through each address specified in the addrs/addrcnt
492 * array/length pair, determine if it is IPv6 or IPv4 and call
493 * sctp_do_bind() on it.
495 * If any of them fails, then the operation will be reversed and the
496 * ones that were added will be removed.
498 * Only sctp_setsockopt_bindx() is supposed to call this function.
500 static int sctp_bindx_add(struct sock
*sk
, struct sockaddr
*addrs
, int addrcnt
)
505 struct sockaddr
*sa_addr
;
508 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__
, sk
,
512 for (cnt
= 0; cnt
< addrcnt
; cnt
++) {
513 /* The list may contain either IPv4 or IPv6 address;
514 * determine the address length for walking thru the list.
517 af
= sctp_get_af_specific(sa_addr
->sa_family
);
523 retval
= sctp_do_bind(sk
, (union sctp_addr
*)sa_addr
,
526 addr_buf
+= af
->sockaddr_len
;
530 /* Failed. Cleanup the ones that have been added */
532 sctp_bindx_rem(sk
, addrs
, cnt
);
540 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the
541 * associations that are part of the endpoint indicating that a list of local
542 * addresses are added to the endpoint.
544 * If any of the addresses is already in the bind address list of the
545 * association, we do not send the chunk for that association. But it will not
546 * affect other associations.
548 * Only sctp_setsockopt_bindx() is supposed to call this function.
550 static int sctp_send_asconf_add_ip(struct sock
*sk
,
551 struct sockaddr
*addrs
,
554 struct net
*net
= sock_net(sk
);
555 struct sctp_sock
*sp
;
556 struct sctp_endpoint
*ep
;
557 struct sctp_association
*asoc
;
558 struct sctp_bind_addr
*bp
;
559 struct sctp_chunk
*chunk
;
560 struct sctp_sockaddr_entry
*laddr
;
561 union sctp_addr
*addr
;
562 union sctp_addr saveaddr
;
569 if (!net
->sctp
.addip_enable
)
575 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
576 __func__
, sk
, addrs
, addrcnt
);
578 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
579 if (!asoc
->peer
.asconf_capable
)
582 if (asoc
->peer
.addip_disabled_mask
& SCTP_PARAM_ADD_IP
)
585 if (!sctp_state(asoc
, ESTABLISHED
))
588 /* Check if any address in the packed array of addresses is
589 * in the bind address list of the association. If so,
590 * do not send the asconf chunk to its peer, but continue with
591 * other associations.
594 for (i
= 0; i
< addrcnt
; i
++) {
596 af
= sctp_get_af_specific(addr
->v4
.sin_family
);
602 if (sctp_assoc_lookup_laddr(asoc
, addr
))
605 addr_buf
+= af
->sockaddr_len
;
610 /* Use the first valid address in bind addr list of
611 * association as Address Parameter of ASCONF CHUNK.
613 bp
= &asoc
->base
.bind_addr
;
614 p
= bp
->address_list
.next
;
615 laddr
= list_entry(p
, struct sctp_sockaddr_entry
, list
);
616 chunk
= sctp_make_asconf_update_ip(asoc
, &laddr
->a
, addrs
,
617 addrcnt
, SCTP_PARAM_ADD_IP
);
623 /* Add the new addresses to the bind address list with
624 * use_as_src set to 0.
627 for (i
= 0; i
< addrcnt
; i
++) {
629 af
= sctp_get_af_specific(addr
->v4
.sin_family
);
630 memcpy(&saveaddr
, addr
, af
->sockaddr_len
);
631 retval
= sctp_add_bind_addr(bp
, &saveaddr
,
633 SCTP_ADDR_NEW
, GFP_ATOMIC
);
634 addr_buf
+= af
->sockaddr_len
;
636 if (asoc
->src_out_of_asoc_ok
) {
637 struct sctp_transport
*trans
;
639 list_for_each_entry(trans
,
640 &asoc
->peer
.transport_addr_list
, transports
) {
641 /* Clear the source and route cache */
642 sctp_transport_dst_release(trans
);
643 trans
->cwnd
= min(4*asoc
->pathmtu
, max_t(__u32
,
644 2*asoc
->pathmtu
, 4380));
645 trans
->ssthresh
= asoc
->peer
.i
.a_rwnd
;
646 trans
->rto
= asoc
->rto_initial
;
647 sctp_max_rto(asoc
, trans
);
648 trans
->rtt
= trans
->srtt
= trans
->rttvar
= 0;
649 sctp_transport_route(trans
, NULL
,
650 sctp_sk(asoc
->base
.sk
));
653 retval
= sctp_send_asconf(asoc
, chunk
);
660 /* Remove a list of addresses from bind addresses list. Do not remove the
663 * Basically run through each address specified in the addrs/addrcnt
664 * array/length pair, determine if it is IPv6 or IPv4 and call
665 * sctp_del_bind() on it.
667 * If any of them fails, then the operation will be reversed and the
668 * ones that were removed will be added back.
670 * At least one address has to be left; if only one address is
671 * available, the operation will return -EBUSY.
673 * Only sctp_setsockopt_bindx() is supposed to call this function.
675 static int sctp_bindx_rem(struct sock
*sk
, struct sockaddr
*addrs
, int addrcnt
)
677 struct sctp_sock
*sp
= sctp_sk(sk
);
678 struct sctp_endpoint
*ep
= sp
->ep
;
680 struct sctp_bind_addr
*bp
= &ep
->base
.bind_addr
;
683 union sctp_addr
*sa_addr
;
686 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
687 __func__
, sk
, addrs
, addrcnt
);
690 for (cnt
= 0; cnt
< addrcnt
; cnt
++) {
691 /* If the bind address list is empty or if there is only one
692 * bind address, there is nothing more to be removed (we need
693 * at least one address here).
695 if (list_empty(&bp
->address_list
) ||
696 (sctp_list_single_entry(&bp
->address_list
))) {
702 af
= sctp_get_af_specific(sa_addr
->sa
.sa_family
);
708 if (!af
->addr_valid(sa_addr
, sp
, NULL
)) {
709 retval
= -EADDRNOTAVAIL
;
713 if (sa_addr
->v4
.sin_port
&&
714 sa_addr
->v4
.sin_port
!= htons(bp
->port
)) {
719 if (!sa_addr
->v4
.sin_port
)
720 sa_addr
->v4
.sin_port
= htons(bp
->port
);
722 /* FIXME - There is probably a need to check if sk->sk_saddr and
723 * sk->sk_rcv_addr are currently set to one of the addresses to
724 * be removed. This is something which needs to be looked into
725 * when we are fixing the outstanding issues with multi-homing
726 * socket routing and failover schemes. Refer to comments in
727 * sctp_do_bind(). -daisy
729 retval
= sctp_del_bind_addr(bp
, sa_addr
);
731 addr_buf
+= af
->sockaddr_len
;
734 /* Failed. Add the ones that has been removed back */
736 sctp_bindx_add(sk
, addrs
, cnt
);
744 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of
745 * the associations that are part of the endpoint indicating that a list of
746 * local addresses are removed from the endpoint.
748 * If any of the addresses is already in the bind address list of the
749 * association, we do not send the chunk for that association. But it will not
750 * affect other associations.
752 * Only sctp_setsockopt_bindx() is supposed to call this function.
754 static int sctp_send_asconf_del_ip(struct sock
*sk
,
755 struct sockaddr
*addrs
,
758 struct net
*net
= sock_net(sk
);
759 struct sctp_sock
*sp
;
760 struct sctp_endpoint
*ep
;
761 struct sctp_association
*asoc
;
762 struct sctp_transport
*transport
;
763 struct sctp_bind_addr
*bp
;
764 struct sctp_chunk
*chunk
;
765 union sctp_addr
*laddr
;
768 struct sctp_sockaddr_entry
*saddr
;
774 if (!net
->sctp
.addip_enable
)
780 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
781 __func__
, sk
, addrs
, addrcnt
);
783 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
785 if (!asoc
->peer
.asconf_capable
)
788 if (asoc
->peer
.addip_disabled_mask
& SCTP_PARAM_DEL_IP
)
791 if (!sctp_state(asoc
, ESTABLISHED
))
794 /* Check if any address in the packed array of addresses is
795 * not present in the bind address list of the association.
796 * If so, do not send the asconf chunk to its peer, but
797 * continue with other associations.
800 for (i
= 0; i
< addrcnt
; i
++) {
802 af
= sctp_get_af_specific(laddr
->v4
.sin_family
);
808 if (!sctp_assoc_lookup_laddr(asoc
, laddr
))
811 addr_buf
+= af
->sockaddr_len
;
816 /* Find one address in the association's bind address list
817 * that is not in the packed array of addresses. This is to
818 * make sure that we do not delete all the addresses in the
821 bp
= &asoc
->base
.bind_addr
;
822 laddr
= sctp_find_unmatch_addr(bp
, (union sctp_addr
*)addrs
,
824 if ((laddr
== NULL
) && (addrcnt
== 1)) {
825 if (asoc
->asconf_addr_del_pending
)
827 asoc
->asconf_addr_del_pending
=
828 kzalloc(sizeof(union sctp_addr
), GFP_ATOMIC
);
829 if (asoc
->asconf_addr_del_pending
== NULL
) {
833 asoc
->asconf_addr_del_pending
->sa
.sa_family
=
835 asoc
->asconf_addr_del_pending
->v4
.sin_port
=
837 if (addrs
->sa_family
== AF_INET
) {
838 struct sockaddr_in
*sin
;
840 sin
= (struct sockaddr_in
*)addrs
;
841 asoc
->asconf_addr_del_pending
->v4
.sin_addr
.s_addr
= sin
->sin_addr
.s_addr
;
842 } else if (addrs
->sa_family
== AF_INET6
) {
843 struct sockaddr_in6
*sin6
;
845 sin6
= (struct sockaddr_in6
*)addrs
;
846 asoc
->asconf_addr_del_pending
->v6
.sin6_addr
= sin6
->sin6_addr
;
849 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
850 __func__
, asoc
, &asoc
->asconf_addr_del_pending
->sa
,
851 asoc
->asconf_addr_del_pending
);
853 asoc
->src_out_of_asoc_ok
= 1;
861 /* We do not need RCU protection throughout this loop
862 * because this is done under a socket lock from the
865 chunk
= sctp_make_asconf_update_ip(asoc
, laddr
, addrs
, addrcnt
,
873 /* Reset use_as_src flag for the addresses in the bind address
874 * list that are to be deleted.
877 for (i
= 0; i
< addrcnt
; i
++) {
879 af
= sctp_get_af_specific(laddr
->v4
.sin_family
);
880 list_for_each_entry(saddr
, &bp
->address_list
, list
) {
881 if (sctp_cmp_addr_exact(&saddr
->a
, laddr
))
882 saddr
->state
= SCTP_ADDR_DEL
;
884 addr_buf
+= af
->sockaddr_len
;
887 /* Update the route and saddr entries for all the transports
888 * as some of the addresses in the bind address list are
889 * about to be deleted and cannot be used as source addresses.
891 list_for_each_entry(transport
, &asoc
->peer
.transport_addr_list
,
893 sctp_transport_dst_release(transport
);
894 sctp_transport_route(transport
, NULL
,
895 sctp_sk(asoc
->base
.sk
));
899 /* We don't need to transmit ASCONF */
901 retval
= sctp_send_asconf(asoc
, chunk
);
907 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
908 int sctp_asconf_mgmt(struct sctp_sock
*sp
, struct sctp_sockaddr_entry
*addrw
)
910 struct sock
*sk
= sctp_opt2sk(sp
);
911 union sctp_addr
*addr
;
914 /* It is safe to write port space in caller. */
916 addr
->v4
.sin_port
= htons(sp
->ep
->base
.bind_addr
.port
);
917 af
= sctp_get_af_specific(addr
->sa
.sa_family
);
920 if (sctp_verify_addr(sk
, addr
, af
->sockaddr_len
))
923 if (addrw
->state
== SCTP_ADDR_NEW
)
924 return sctp_send_asconf_add_ip(sk
, (struct sockaddr
*)addr
, 1);
926 return sctp_send_asconf_del_ip(sk
, (struct sockaddr
*)addr
, 1);
929 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
932 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
935 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
936 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
939 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
940 * Section 3.1.2 for this usage.
942 * addrs is a pointer to an array of one or more socket addresses. Each
943 * address is contained in its appropriate structure (i.e. struct
944 * sockaddr_in or struct sockaddr_in6) the family of the address type
945 * must be used to distinguish the address length (note that this
946 * representation is termed a "packed array" of addresses). The caller
947 * specifies the number of addresses in the array with addrcnt.
949 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
950 * -1, and sets errno to the appropriate error code.
952 * For SCTP, the port given in each socket address must be the same, or
953 * sctp_bindx() will fail, setting errno to EINVAL.
955 * The flags parameter is formed from the bitwise OR of zero or more of
956 * the following currently defined flags:
958 * SCTP_BINDX_ADD_ADDR
960 * SCTP_BINDX_REM_ADDR
962 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
963 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
964 * addresses from the association. The two flags are mutually exclusive;
965 * if both are given, sctp_bindx() will fail with EINVAL. A caller may
966 * not remove all addresses from an association; sctp_bindx() will
967 * reject such an attempt with EINVAL.
969 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
970 * additional addresses with an endpoint after calling bind(). Or use
971 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
972 * socket is associated with so that no new association accepted will be
973 * associated with those addresses. If the endpoint supports dynamic
974 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
975 * endpoint to send the appropriate message to the peer to change the
976 * peers address lists.
978 * Adding and removing addresses from a connected association is
979 * optional functionality. Implementations that do not support this
980 * functionality should return EOPNOTSUPP.
982 * Basically do nothing but copying the addresses from user to kernel
983 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
984 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
987 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
990 * sk The sk of the socket
991 * addrs The pointer to the addresses in user land
992 * addrssize Size of the addrs buffer
993 * op Operation to perform (add or remove, see the flags of
996 * Returns 0 if ok, <0 errno code on error.
998 static int sctp_setsockopt_bindx(struct sock
*sk
,
999 struct sockaddr __user
*addrs
,
1000 int addrs_size
, int op
)
1002 struct sockaddr
*kaddrs
;
1006 struct sockaddr
*sa_addr
;
1010 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
1011 __func__
, sk
, addrs
, addrs_size
, op
);
1013 if (unlikely(addrs_size
<= 0))
1016 kaddrs
= vmemdup_user(addrs
, addrs_size
);
1017 if (unlikely(IS_ERR(kaddrs
)))
1018 return PTR_ERR(kaddrs
);
1020 /* Walk through the addrs buffer and count the number of addresses. */
1022 while (walk_size
< addrs_size
) {
1023 if (walk_size
+ sizeof(sa_family_t
) > addrs_size
) {
1029 af
= sctp_get_af_specific(sa_addr
->sa_family
);
1031 /* If the address family is not supported or if this address
1032 * causes the address buffer to overflow return EINVAL.
1034 if (!af
|| (walk_size
+ af
->sockaddr_len
) > addrs_size
) {
1039 addr_buf
+= af
->sockaddr_len
;
1040 walk_size
+= af
->sockaddr_len
;
1045 case SCTP_BINDX_ADD_ADDR
:
1046 err
= sctp_bindx_add(sk
, kaddrs
, addrcnt
);
1049 err
= sctp_send_asconf_add_ip(sk
, kaddrs
, addrcnt
);
1052 case SCTP_BINDX_REM_ADDR
:
1053 err
= sctp_bindx_rem(sk
, kaddrs
, addrcnt
);
1056 err
= sctp_send_asconf_del_ip(sk
, kaddrs
, addrcnt
);
1070 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
1072 * Common routine for handling connect() and sctp_connectx().
1073 * Connect will come in with just a single address.
1075 static int __sctp_connect(struct sock
*sk
,
1076 struct sockaddr
*kaddrs
,
1078 sctp_assoc_t
*assoc_id
)
1080 struct net
*net
= sock_net(sk
);
1081 struct sctp_sock
*sp
;
1082 struct sctp_endpoint
*ep
;
1083 struct sctp_association
*asoc
= NULL
;
1084 struct sctp_association
*asoc2
;
1085 struct sctp_transport
*transport
;
1087 enum sctp_scope scope
;
1092 union sctp_addr
*sa_addr
= NULL
;
1094 unsigned short port
;
1095 unsigned int f_flags
= 0;
1100 /* connect() cannot be done on a socket that is already in ESTABLISHED
1101 * state - UDP-style peeled off socket or a TCP-style socket that
1102 * is already connected.
1103 * It cannot be done even on a TCP-style listening socket.
1105 if (sctp_sstate(sk
, ESTABLISHED
) || sctp_sstate(sk
, CLOSING
) ||
1106 (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))) {
1111 /* Walk through the addrs buffer and count the number of addresses. */
1113 while (walk_size
< addrs_size
) {
1116 if (walk_size
+ sizeof(sa_family_t
) > addrs_size
) {
1122 af
= sctp_get_af_specific(sa_addr
->sa
.sa_family
);
1124 /* If the address family is not supported or if this address
1125 * causes the address buffer to overflow return EINVAL.
1127 if (!af
|| (walk_size
+ af
->sockaddr_len
) > addrs_size
) {
1132 port
= ntohs(sa_addr
->v4
.sin_port
);
1134 /* Save current address so we can work with it */
1135 memcpy(&to
, sa_addr
, af
->sockaddr_len
);
1137 err
= sctp_verify_addr(sk
, &to
, af
->sockaddr_len
);
1141 /* Make sure the destination port is correctly set
1144 if (asoc
&& asoc
->peer
.port
&& asoc
->peer
.port
!= port
) {
1149 /* Check if there already is a matching association on the
1150 * endpoint (other than the one created here).
1152 asoc2
= sctp_endpoint_lookup_assoc(ep
, &to
, &transport
);
1153 if (asoc2
&& asoc2
!= asoc
) {
1154 if (asoc2
->state
>= SCTP_STATE_ESTABLISHED
)
1161 /* If we could not find a matching association on the endpoint,
1162 * make sure that there is no peeled-off association matching
1163 * the peer address even on another socket.
1165 if (sctp_endpoint_is_peeled_off(ep
, &to
)) {
1166 err
= -EADDRNOTAVAIL
;
1171 /* If a bind() or sctp_bindx() is not called prior to
1172 * an sctp_connectx() call, the system picks an
1173 * ephemeral port and will choose an address set
1174 * equivalent to binding with a wildcard address.
1176 if (!ep
->base
.bind_addr
.port
) {
1177 if (sctp_autobind(sk
)) {
1183 * If an unprivileged user inherits a 1-many
1184 * style socket with open associations on a
1185 * privileged port, it MAY be permitted to
1186 * accept new associations, but it SHOULD NOT
1187 * be permitted to open new associations.
1189 if (ep
->base
.bind_addr
.port
<
1190 inet_prot_sock(net
) &&
1191 !ns_capable(net
->user_ns
,
1192 CAP_NET_BIND_SERVICE
)) {
1198 scope
= sctp_scope(&to
);
1199 asoc
= sctp_association_new(ep
, sk
, scope
, GFP_KERNEL
);
1205 err
= sctp_assoc_set_bind_addr_from_ep(asoc
, scope
,
1213 /* Prime the peer's transport structures. */
1214 transport
= sctp_assoc_add_peer(asoc
, &to
, GFP_KERNEL
,
1222 addr_buf
+= af
->sockaddr_len
;
1223 walk_size
+= af
->sockaddr_len
;
1226 /* In case the user of sctp_connectx() wants an association
1227 * id back, assign one now.
1230 err
= sctp_assoc_set_id(asoc
, GFP_KERNEL
);
1235 err
= sctp_primitive_ASSOCIATE(net
, asoc
, NULL
);
1240 /* Initialize sk's dport and daddr for getpeername() */
1241 inet_sk(sk
)->inet_dport
= htons(asoc
->peer
.port
);
1242 sp
->pf
->to_sk_daddr(sa_addr
, sk
);
1245 /* in-kernel sockets don't generally have a file allocated to them
1246 * if all they do is call sock_create_kern().
1248 if (sk
->sk_socket
->file
)
1249 f_flags
= sk
->sk_socket
->file
->f_flags
;
1251 timeo
= sock_sndtimeo(sk
, f_flags
& O_NONBLOCK
);
1254 *assoc_id
= asoc
->assoc_id
;
1255 err
= sctp_wait_for_connect(asoc
, &timeo
);
1256 /* Note: the asoc may be freed after the return of
1257 * sctp_wait_for_connect.
1260 /* Don't free association on exit. */
1264 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
1265 __func__
, asoc
, kaddrs
, err
);
1268 /* sctp_primitive_ASSOCIATE may have added this association
1269 * To the hash table, try to unhash it, just in case, its a noop
1270 * if it wasn't hashed so we're safe
1272 sctp_association_free(asoc
);
1277 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1280 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
1281 * sctp_assoc_t *asoc);
1283 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1284 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
1285 * or IPv6 addresses.
1287 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
1288 * Section 3.1.2 for this usage.
1290 * addrs is a pointer to an array of one or more socket addresses. Each
1291 * address is contained in its appropriate structure (i.e. struct
1292 * sockaddr_in or struct sockaddr_in6) the family of the address type
1293 * must be used to distengish the address length (note that this
1294 * representation is termed a "packed array" of addresses). The caller
1295 * specifies the number of addresses in the array with addrcnt.
1297 * On success, sctp_connectx() returns 0. It also sets the assoc_id to
1298 * the association id of the new association. On failure, sctp_connectx()
1299 * returns -1, and sets errno to the appropriate error code. The assoc_id
1300 * is not touched by the kernel.
1302 * For SCTP, the port given in each socket address must be the same, or
1303 * sctp_connectx() will fail, setting errno to EINVAL.
1305 * An application can use sctp_connectx to initiate an association with
1306 * an endpoint that is multi-homed. Much like sctp_bindx() this call
1307 * allows a caller to specify multiple addresses at which a peer can be
1308 * reached. The way the SCTP stack uses the list of addresses to set up
1309 * the association is implementation dependent. This function only
1310 * specifies that the stack will try to make use of all the addresses in
1311 * the list when needed.
1313 * Note that the list of addresses passed in is only used for setting up
1314 * the association. It does not necessarily equal the set of addresses
1315 * the peer uses for the resulting association. If the caller wants to
1316 * find out the set of peer addresses, it must use sctp_getpaddrs() to
1317 * retrieve them after the association has been set up.
1319 * Basically do nothing but copying the addresses from user to kernel
1320 * land and invoking either sctp_connectx(). This is used for tunneling
1321 * the sctp_connectx() request through sctp_setsockopt() from userspace.
1323 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
1326 * sk The sk of the socket
1327 * addrs The pointer to the addresses in user land
1328 * addrssize Size of the addrs buffer
1330 * Returns >=0 if ok, <0 errno code on error.
1332 static int __sctp_setsockopt_connectx(struct sock
*sk
,
1333 struct sockaddr __user
*addrs
,
1335 sctp_assoc_t
*assoc_id
)
1337 struct sockaddr
*kaddrs
;
1340 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
1341 __func__
, sk
, addrs
, addrs_size
);
1343 if (unlikely(addrs_size
<= 0))
1346 kaddrs
= vmemdup_user(addrs
, addrs_size
);
1347 if (unlikely(IS_ERR(kaddrs
)))
1348 return PTR_ERR(kaddrs
);
1350 err
= __sctp_connect(sk
, kaddrs
, addrs_size
, assoc_id
);
1357 * This is an older interface. It's kept for backward compatibility
1358 * to the option that doesn't provide association id.
1360 static int sctp_setsockopt_connectx_old(struct sock
*sk
,
1361 struct sockaddr __user
*addrs
,
1364 return __sctp_setsockopt_connectx(sk
, addrs
, addrs_size
, NULL
);
1368 * New interface for the API. The since the API is done with a socket
1369 * option, to make it simple we feed back the association id is as a return
1370 * indication to the call. Error is always negative and association id is
1373 static int sctp_setsockopt_connectx(struct sock
*sk
,
1374 struct sockaddr __user
*addrs
,
1377 sctp_assoc_t assoc_id
= 0;
1380 err
= __sctp_setsockopt_connectx(sk
, addrs
, addrs_size
, &assoc_id
);
1389 * New (hopefully final) interface for the API.
1390 * We use the sctp_getaddrs_old structure so that use-space library
1391 * can avoid any unnecessary allocations. The only different part
1392 * is that we store the actual length of the address buffer into the
1393 * addrs_num structure member. That way we can re-use the existing
1396 #ifdef CONFIG_COMPAT
1397 struct compat_sctp_getaddrs_old
{
1398 sctp_assoc_t assoc_id
;
1400 compat_uptr_t addrs
; /* struct sockaddr * */
1404 static int sctp_getsockopt_connectx3(struct sock
*sk
, int len
,
1405 char __user
*optval
,
1408 struct sctp_getaddrs_old param
;
1409 sctp_assoc_t assoc_id
= 0;
1412 #ifdef CONFIG_COMPAT
1413 if (in_compat_syscall()) {
1414 struct compat_sctp_getaddrs_old param32
;
1416 if (len
< sizeof(param32
))
1418 if (copy_from_user(¶m32
, optval
, sizeof(param32
)))
1421 param
.assoc_id
= param32
.assoc_id
;
1422 param
.addr_num
= param32
.addr_num
;
1423 param
.addrs
= compat_ptr(param32
.addrs
);
1427 if (len
< sizeof(param
))
1429 if (copy_from_user(¶m
, optval
, sizeof(param
)))
1433 err
= __sctp_setsockopt_connectx(sk
, (struct sockaddr __user
*)
1434 param
.addrs
, param
.addr_num
,
1436 if (err
== 0 || err
== -EINPROGRESS
) {
1437 if (copy_to_user(optval
, &assoc_id
, sizeof(assoc_id
)))
1439 if (put_user(sizeof(assoc_id
), optlen
))
1446 /* API 3.1.4 close() - UDP Style Syntax
1447 * Applications use close() to perform graceful shutdown (as described in
1448 * Section 10.1 of [SCTP]) on ALL the associations currently represented
1449 * by a UDP-style socket.
1453 * ret = close(int sd);
1455 * sd - the socket descriptor of the associations to be closed.
1457 * To gracefully shutdown a specific association represented by the
1458 * UDP-style socket, an application should use the sendmsg() call,
1459 * passing no user data, but including the appropriate flag in the
1460 * ancillary data (see Section xxxx).
1462 * If sd in the close() call is a branched-off socket representing only
1463 * one association, the shutdown is performed on that association only.
1465 * 4.1.6 close() - TCP Style Syntax
1467 * Applications use close() to gracefully close down an association.
1471 * int close(int sd);
1473 * sd - the socket descriptor of the association to be closed.
1475 * After an application calls close() on a socket descriptor, no further
1476 * socket operations will succeed on that descriptor.
1478 * API 7.1.4 SO_LINGER
1480 * An application using the TCP-style socket can use this option to
1481 * perform the SCTP ABORT primitive. The linger option structure is:
1484 * int l_onoff; // option on/off
1485 * int l_linger; // linger time
1488 * To enable the option, set l_onoff to 1. If the l_linger value is set
1489 * to 0, calling close() is the same as the ABORT primitive. If the
1490 * value is set to a negative value, the setsockopt() call will return
1491 * an error. If the value is set to a positive value linger_time, the
1492 * close() can be blocked for at most linger_time ms. If the graceful
1493 * shutdown phase does not finish during this period, close() will
1494 * return but the graceful shutdown phase continues in the system.
1496 static void sctp_close(struct sock
*sk
, long timeout
)
1498 struct net
*net
= sock_net(sk
);
1499 struct sctp_endpoint
*ep
;
1500 struct sctp_association
*asoc
;
1501 struct list_head
*pos
, *temp
;
1502 unsigned int data_was_unread
;
1504 pr_debug("%s: sk:%p, timeout:%ld\n", __func__
, sk
, timeout
);
1506 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1507 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1508 inet_sk_set_state(sk
, SCTP_SS_CLOSING
);
1510 ep
= sctp_sk(sk
)->ep
;
1512 /* Clean up any skbs sitting on the receive queue. */
1513 data_was_unread
= sctp_queue_purge_ulpevents(&sk
->sk_receive_queue
);
1514 data_was_unread
+= sctp_queue_purge_ulpevents(&sctp_sk(sk
)->pd_lobby
);
1516 /* Walk all associations on an endpoint. */
1517 list_for_each_safe(pos
, temp
, &ep
->asocs
) {
1518 asoc
= list_entry(pos
, struct sctp_association
, asocs
);
1520 if (sctp_style(sk
, TCP
)) {
1521 /* A closed association can still be in the list if
1522 * it belongs to a TCP-style listening socket that is
1523 * not yet accepted. If so, free it. If not, send an
1524 * ABORT or SHUTDOWN based on the linger options.
1526 if (sctp_state(asoc
, CLOSED
)) {
1527 sctp_association_free(asoc
);
1532 if (data_was_unread
|| !skb_queue_empty(&asoc
->ulpq
.lobby
) ||
1533 !skb_queue_empty(&asoc
->ulpq
.reasm
) ||
1534 !skb_queue_empty(&asoc
->ulpq
.reasm_uo
) ||
1535 (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
)) {
1536 struct sctp_chunk
*chunk
;
1538 chunk
= sctp_make_abort_user(asoc
, NULL
, 0);
1539 sctp_primitive_ABORT(net
, asoc
, chunk
);
1541 sctp_primitive_SHUTDOWN(net
, asoc
, NULL
);
1544 /* On a TCP-style socket, block for at most linger_time if set. */
1545 if (sctp_style(sk
, TCP
) && timeout
)
1546 sctp_wait_for_close(sk
, timeout
);
1548 /* This will run the backlog queue. */
1551 /* Supposedly, no process has access to the socket, but
1552 * the net layers still may.
1553 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
1554 * held and that should be grabbed before socket lock.
1556 spin_lock_bh(&net
->sctp
.addr_wq_lock
);
1557 bh_lock_sock_nested(sk
);
1559 /* Hold the sock, since sk_common_release() will put sock_put()
1560 * and we have just a little more cleanup.
1563 sk_common_release(sk
);
1566 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
1570 SCTP_DBG_OBJCNT_DEC(sock
);
1573 /* Handle EPIPE error. */
1574 static int sctp_error(struct sock
*sk
, int flags
, int err
)
1577 err
= sock_error(sk
) ? : -EPIPE
;
1578 if (err
== -EPIPE
&& !(flags
& MSG_NOSIGNAL
))
1579 send_sig(SIGPIPE
, current
, 0);
1583 /* API 3.1.3 sendmsg() - UDP Style Syntax
1585 * An application uses sendmsg() and recvmsg() calls to transmit data to
1586 * and receive data from its peer.
1588 * ssize_t sendmsg(int socket, const struct msghdr *message,
1591 * socket - the socket descriptor of the endpoint.
1592 * message - pointer to the msghdr structure which contains a single
1593 * user message and possibly some ancillary data.
1595 * See Section 5 for complete description of the data
1598 * flags - flags sent or received with the user message, see Section
1599 * 5 for complete description of the flags.
1601 * Note: This function could use a rewrite especially when explicit
1602 * connect support comes in.
1604 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
1606 static int sctp_msghdr_parse(const struct msghdr
*msg
,
1607 struct sctp_cmsgs
*cmsgs
);
1609 static int sctp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t msg_len
)
1611 struct net
*net
= sock_net(sk
);
1612 struct sctp_sock
*sp
;
1613 struct sctp_endpoint
*ep
;
1614 struct sctp_association
*new_asoc
= NULL
, *asoc
= NULL
;
1615 struct sctp_transport
*transport
, *chunk_tp
;
1616 struct sctp_chunk
*chunk
;
1618 struct sockaddr
*msg_name
= NULL
;
1619 struct sctp_sndrcvinfo default_sinfo
;
1620 struct sctp_sndrcvinfo
*sinfo
;
1621 struct sctp_initmsg
*sinit
;
1622 sctp_assoc_t associd
= 0;
1623 struct sctp_cmsgs cmsgs
= { NULL
};
1624 enum sctp_scope scope
;
1625 bool fill_sinfo_ttl
= false, wait_connect
= false;
1626 struct sctp_datamsg
*datamsg
;
1627 int msg_flags
= msg
->msg_flags
;
1628 __u16 sinfo_flags
= 0;
1636 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__
, sk
,
1639 /* We cannot send a message over a TCP-style listening socket. */
1640 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
)) {
1645 /* Parse out the SCTP CMSGs. */
1646 err
= sctp_msghdr_parse(msg
, &cmsgs
);
1648 pr_debug("%s: msghdr parse err:%x\n", __func__
, err
);
1652 /* Fetch the destination address for this packet. This
1653 * address only selects the association--it is not necessarily
1654 * the address we will send to.
1655 * For a peeled-off socket, msg_name is ignored.
1657 if (!sctp_style(sk
, UDP_HIGH_BANDWIDTH
) && msg
->msg_name
) {
1658 int msg_namelen
= msg
->msg_namelen
;
1660 err
= sctp_verify_addr(sk
, (union sctp_addr
*)msg
->msg_name
,
1665 if (msg_namelen
> sizeof(to
))
1666 msg_namelen
= sizeof(to
);
1667 memcpy(&to
, msg
->msg_name
, msg_namelen
);
1668 msg_name
= msg
->msg_name
;
1672 if (cmsgs
.sinfo
!= NULL
) {
1673 memset(&default_sinfo
, 0, sizeof(default_sinfo
));
1674 default_sinfo
.sinfo_stream
= cmsgs
.sinfo
->snd_sid
;
1675 default_sinfo
.sinfo_flags
= cmsgs
.sinfo
->snd_flags
;
1676 default_sinfo
.sinfo_ppid
= cmsgs
.sinfo
->snd_ppid
;
1677 default_sinfo
.sinfo_context
= cmsgs
.sinfo
->snd_context
;
1678 default_sinfo
.sinfo_assoc_id
= cmsgs
.sinfo
->snd_assoc_id
;
1680 sinfo
= &default_sinfo
;
1681 fill_sinfo_ttl
= true;
1683 sinfo
= cmsgs
.srinfo
;
1685 /* Did the user specify SNDINFO/SNDRCVINFO? */
1687 sinfo_flags
= sinfo
->sinfo_flags
;
1688 associd
= sinfo
->sinfo_assoc_id
;
1691 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__
,
1692 msg_len
, sinfo_flags
);
1694 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */
1695 if (sctp_style(sk
, TCP
) && (sinfo_flags
& (SCTP_EOF
| SCTP_ABORT
))) {
1700 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero
1701 * length messages when SCTP_EOF|SCTP_ABORT is not set.
1702 * If SCTP_ABORT is set, the message length could be non zero with
1703 * the msg_iov set to the user abort reason.
1705 if (((sinfo_flags
& SCTP_EOF
) && (msg_len
> 0)) ||
1706 (!(sinfo_flags
& (SCTP_EOF
|SCTP_ABORT
)) && (msg_len
== 0))) {
1711 /* If SCTP_ADDR_OVER is set, there must be an address
1712 * specified in msg_name.
1714 if ((sinfo_flags
& SCTP_ADDR_OVER
) && (!msg
->msg_name
)) {
1721 pr_debug("%s: about to look up association\n", __func__
);
1725 /* If a msg_name has been specified, assume this is to be used. */
1727 /* Look for a matching association on the endpoint. */
1728 asoc
= sctp_endpoint_lookup_assoc(ep
, &to
, &transport
);
1730 /* If we could not find a matching association on the
1731 * endpoint, make sure that it is not a TCP-style
1732 * socket that already has an association or there is
1733 * no peeled-off association on another socket.
1736 ((sctp_style(sk
, TCP
) &&
1737 (sctp_sstate(sk
, ESTABLISHED
) ||
1738 sctp_sstate(sk
, CLOSING
))) ||
1739 sctp_endpoint_is_peeled_off(ep
, &to
))) {
1740 err
= -EADDRNOTAVAIL
;
1744 asoc
= sctp_id2assoc(sk
, associd
);
1752 pr_debug("%s: just looked up association:%p\n", __func__
, asoc
);
1754 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED
1755 * socket that has an association in CLOSED state. This can
1756 * happen when an accepted socket has an association that is
1759 if (sctp_state(asoc
, CLOSED
) && sctp_style(sk
, TCP
)) {
1764 if (sinfo_flags
& SCTP_EOF
) {
1765 pr_debug("%s: shutting down association:%p\n",
1768 sctp_primitive_SHUTDOWN(net
, asoc
, NULL
);
1772 if (sinfo_flags
& SCTP_ABORT
) {
1774 chunk
= sctp_make_abort_user(asoc
, msg
, msg_len
);
1780 pr_debug("%s: aborting association:%p\n",
1783 sctp_primitive_ABORT(net
, asoc
, chunk
);
1789 /* Do we need to create the association? */
1791 pr_debug("%s: there is no association yet\n", __func__
);
1793 if (sinfo_flags
& (SCTP_EOF
| SCTP_ABORT
)) {
1798 /* Check for invalid stream against the stream counts,
1799 * either the default or the user specified stream counts.
1802 if (!sinit
|| !sinit
->sinit_num_ostreams
) {
1803 /* Check against the defaults. */
1804 if (sinfo
->sinfo_stream
>=
1805 sp
->initmsg
.sinit_num_ostreams
) {
1810 /* Check against the requested. */
1811 if (sinfo
->sinfo_stream
>=
1812 sinit
->sinit_num_ostreams
) {
1820 * API 3.1.2 bind() - UDP Style Syntax
1821 * If a bind() or sctp_bindx() is not called prior to a
1822 * sendmsg() call that initiates a new association, the
1823 * system picks an ephemeral port and will choose an address
1824 * set equivalent to binding with a wildcard address.
1826 if (!ep
->base
.bind_addr
.port
) {
1827 if (sctp_autobind(sk
)) {
1833 * If an unprivileged user inherits a one-to-many
1834 * style socket with open associations on a privileged
1835 * port, it MAY be permitted to accept new associations,
1836 * but it SHOULD NOT be permitted to open new
1839 if (ep
->base
.bind_addr
.port
< inet_prot_sock(net
) &&
1840 !ns_capable(net
->user_ns
, CAP_NET_BIND_SERVICE
)) {
1846 scope
= sctp_scope(&to
);
1847 new_asoc
= sctp_association_new(ep
, sk
, scope
, GFP_KERNEL
);
1853 err
= sctp_assoc_set_bind_addr_from_ep(asoc
, scope
, GFP_KERNEL
);
1859 /* If the SCTP_INIT ancillary data is specified, set all
1860 * the association init values accordingly.
1863 if (sinit
->sinit_num_ostreams
) {
1864 __u16 outcnt
= sinit
->sinit_num_ostreams
;
1866 asoc
->c
.sinit_num_ostreams
= outcnt
;
1867 /* outcnt has been changed, so re-init stream */
1868 err
= sctp_stream_init(&asoc
->stream
, outcnt
, 0,
1873 if (sinit
->sinit_max_instreams
) {
1874 asoc
->c
.sinit_max_instreams
=
1875 sinit
->sinit_max_instreams
;
1877 if (sinit
->sinit_max_attempts
) {
1878 asoc
->max_init_attempts
1879 = sinit
->sinit_max_attempts
;
1881 if (sinit
->sinit_max_init_timeo
) {
1882 asoc
->max_init_timeo
=
1883 msecs_to_jiffies(sinit
->sinit_max_init_timeo
);
1887 /* Prime the peer's transport structures. */
1888 transport
= sctp_assoc_add_peer(asoc
, &to
, GFP_KERNEL
, SCTP_UNKNOWN
);
1895 /* ASSERT: we have a valid association at this point. */
1896 pr_debug("%s: we have a valid association\n", __func__
);
1899 /* If the user didn't specify SNDINFO/SNDRCVINFO, make up
1900 * one with some defaults.
1902 memset(&default_sinfo
, 0, sizeof(default_sinfo
));
1903 default_sinfo
.sinfo_stream
= asoc
->default_stream
;
1904 default_sinfo
.sinfo_flags
= asoc
->default_flags
;
1905 default_sinfo
.sinfo_ppid
= asoc
->default_ppid
;
1906 default_sinfo
.sinfo_context
= asoc
->default_context
;
1907 default_sinfo
.sinfo_timetolive
= asoc
->default_timetolive
;
1908 default_sinfo
.sinfo_assoc_id
= sctp_assoc2id(asoc
);
1910 sinfo
= &default_sinfo
;
1911 } else if (fill_sinfo_ttl
) {
1912 /* In case SNDINFO was specified, we still need to fill
1913 * it with a default ttl from the assoc here.
1915 sinfo
->sinfo_timetolive
= asoc
->default_timetolive
;
1918 /* API 7.1.7, the sndbuf size per association bounds the
1919 * maximum size of data that can be sent in a single send call.
1921 if (msg_len
> sk
->sk_sndbuf
) {
1926 if (asoc
->pmtu_pending
)
1927 sctp_assoc_pending_pmtu(asoc
);
1929 /* If fragmentation is disabled and the message length exceeds the
1930 * association fragmentation point, return EMSGSIZE. The I-D
1931 * does not specify what this error is, but this looks like
1934 if (sctp_sk(sk
)->disable_fragments
&& (msg_len
> asoc
->frag_point
)) {
1939 /* Check for invalid stream. */
1940 if (sinfo
->sinfo_stream
>= asoc
->stream
.outcnt
) {
1945 /* Allocate sctp_stream_out_ext if not already done */
1946 if (unlikely(!asoc
->stream
.out
[sinfo
->sinfo_stream
].ext
)) {
1947 err
= sctp_stream_init_ext(&asoc
->stream
, sinfo
->sinfo_stream
);
1952 if (sctp_wspace(asoc
) < msg_len
)
1953 sctp_prsctp_prune(asoc
, sinfo
, msg_len
- sctp_wspace(asoc
));
1955 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1956 if (!sctp_wspace(asoc
)) {
1957 /* sk can be changed by peel off when waiting for buf. */
1958 err
= sctp_wait_for_sndbuf(asoc
, &timeo
, msg_len
);
1960 if (err
== -ESRCH
) {
1961 /* asoc is already dead. */
1969 /* If an address is passed with the sendto/sendmsg call, it is used
1970 * to override the primary destination address in the TCP model, or
1971 * when SCTP_ADDR_OVER flag is set in the UDP model.
1973 if ((sctp_style(sk
, TCP
) && msg_name
) ||
1974 (sinfo_flags
& SCTP_ADDR_OVER
)) {
1975 chunk_tp
= sctp_assoc_lookup_paddr(asoc
, &to
);
1983 /* Auto-connect, if we aren't connected already. */
1984 if (sctp_state(asoc
, CLOSED
)) {
1985 err
= sctp_primitive_ASSOCIATE(net
, asoc
, NULL
);
1989 /* If stream interleave is enabled, wait_connect has to be
1990 * done earlier than data enqueue, as it needs to make data
1991 * or idata according to asoc->intl_enable which is set
1992 * after connection is done.
1994 if (sctp_sk(asoc
->base
.sk
)->strm_interleave
) {
1995 timeo
= sock_sndtimeo(sk
, 0);
1996 err
= sctp_wait_for_connect(asoc
, &timeo
);
2000 wait_connect
= true;
2003 pr_debug("%s: we associated primitively\n", __func__
);
2006 /* Break the message into multiple chunks of maximum size. */
2007 datamsg
= sctp_datamsg_from_user(asoc
, sinfo
, &msg
->msg_iter
);
2008 if (IS_ERR(datamsg
)) {
2009 err
= PTR_ERR(datamsg
);
2012 asoc
->force_delay
= !!(msg
->msg_flags
& MSG_MORE
);
2014 /* Now send the (possibly) fragmented message. */
2015 list_for_each_entry(chunk
, &datamsg
->chunks
, frag_list
) {
2016 sctp_chunk_hold(chunk
);
2018 /* Do accounting for the write space. */
2019 sctp_set_owner_w(chunk
);
2021 chunk
->transport
= chunk_tp
;
2024 /* Send it to the lower layers. Note: all chunks
2025 * must either fail or succeed. The lower layer
2026 * works that way today. Keep it that way or this
2029 err
= sctp_primitive_SEND(net
, asoc
, datamsg
);
2030 /* Did the lower layer accept the chunk? */
2032 sctp_datamsg_free(datamsg
);
2036 pr_debug("%s: we sent primitively\n", __func__
);
2038 sctp_datamsg_put(datamsg
);
2041 if (unlikely(wait_connect
)) {
2042 timeo
= sock_sndtimeo(sk
, msg_flags
& MSG_DONTWAIT
);
2043 sctp_wait_for_connect(asoc
, &timeo
);
2046 /* If we are already past ASSOCIATE, the lower
2047 * layers are responsible for association cleanup.
2053 sctp_association_free(asoc
);
2058 return sctp_error(sk
, msg_flags
, err
);
2065 err
= sock_error(sk
);
2075 /* This is an extended version of skb_pull() that removes the data from the
2076 * start of a skb even when data is spread across the list of skb's in the
2077 * frag_list. len specifies the total amount of data that needs to be removed.
2078 * when 'len' bytes could be removed from the skb, it returns 0.
2079 * If 'len' exceeds the total skb length, it returns the no. of bytes that
2080 * could not be removed.
2082 static int sctp_skb_pull(struct sk_buff
*skb
, int len
)
2084 struct sk_buff
*list
;
2085 int skb_len
= skb_headlen(skb
);
2088 if (len
<= skb_len
) {
2089 __skb_pull(skb
, len
);
2093 __skb_pull(skb
, skb_len
);
2095 skb_walk_frags(skb
, list
) {
2096 rlen
= sctp_skb_pull(list
, len
);
2097 skb
->len
-= (len
-rlen
);
2098 skb
->data_len
-= (len
-rlen
);
2109 /* API 3.1.3 recvmsg() - UDP Style Syntax
2111 * ssize_t recvmsg(int socket, struct msghdr *message,
2114 * socket - the socket descriptor of the endpoint.
2115 * message - pointer to the msghdr structure which contains a single
2116 * user message and possibly some ancillary data.
2118 * See Section 5 for complete description of the data
2121 * flags - flags sent or received with the user message, see Section
2122 * 5 for complete description of the flags.
2124 static int sctp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
2125 int noblock
, int flags
, int *addr_len
)
2127 struct sctp_ulpevent
*event
= NULL
;
2128 struct sctp_sock
*sp
= sctp_sk(sk
);
2129 struct sk_buff
*skb
, *head_skb
;
2134 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
2135 "addr_len:%p)\n", __func__
, sk
, msg
, len
, noblock
, flags
,
2140 if (sctp_style(sk
, TCP
) && !sctp_sstate(sk
, ESTABLISHED
) &&
2141 !sctp_sstate(sk
, CLOSING
) && !sctp_sstate(sk
, CLOSED
)) {
2146 skb
= sctp_skb_recv_datagram(sk
, flags
, noblock
, &err
);
2150 /* Get the total length of the skb including any skb's in the
2159 err
= skb_copy_datagram_msg(skb
, 0, msg
, copied
);
2161 event
= sctp_skb2event(skb
);
2166 if (event
->chunk
&& event
->chunk
->head_skb
)
2167 head_skb
= event
->chunk
->head_skb
;
2170 sock_recv_ts_and_drops(msg
, sk
, head_skb
);
2171 if (sctp_ulpevent_is_notification(event
)) {
2172 msg
->msg_flags
|= MSG_NOTIFICATION
;
2173 sp
->pf
->event_msgname(event
, msg
->msg_name
, addr_len
);
2175 sp
->pf
->skb_msgname(head_skb
, msg
->msg_name
, addr_len
);
2178 /* Check if we allow SCTP_NXTINFO. */
2179 if (sp
->recvnxtinfo
)
2180 sctp_ulpevent_read_nxtinfo(event
, msg
, sk
);
2181 /* Check if we allow SCTP_RCVINFO. */
2182 if (sp
->recvrcvinfo
)
2183 sctp_ulpevent_read_rcvinfo(event
, msg
);
2184 /* Check if we allow SCTP_SNDRCVINFO. */
2185 if (sp
->subscribe
.sctp_data_io_event
)
2186 sctp_ulpevent_read_sndrcvinfo(event
, msg
);
2190 /* If skb's length exceeds the user's buffer, update the skb and
2191 * push it back to the receive_queue so that the next call to
2192 * recvmsg() will return the remaining data. Don't set MSG_EOR.
2194 if (skb_len
> copied
) {
2195 msg
->msg_flags
&= ~MSG_EOR
;
2196 if (flags
& MSG_PEEK
)
2198 sctp_skb_pull(skb
, copied
);
2199 skb_queue_head(&sk
->sk_receive_queue
, skb
);
2201 /* When only partial message is copied to the user, increase
2202 * rwnd by that amount. If all the data in the skb is read,
2203 * rwnd is updated when the event is freed.
2205 if (!sctp_ulpevent_is_notification(event
))
2206 sctp_assoc_rwnd_increase(event
->asoc
, copied
);
2208 } else if ((event
->msg_flags
& MSG_NOTIFICATION
) ||
2209 (event
->msg_flags
& MSG_EOR
))
2210 msg
->msg_flags
|= MSG_EOR
;
2212 msg
->msg_flags
&= ~MSG_EOR
;
2215 if (flags
& MSG_PEEK
) {
2216 /* Release the skb reference acquired after peeking the skb in
2217 * sctp_skb_recv_datagram().
2221 /* Free the event which includes releasing the reference to
2222 * the owner of the skb, freeing the skb and updating the
2225 sctp_ulpevent_free(event
);
2232 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
2234 * This option is a on/off flag. If enabled no SCTP message
2235 * fragmentation will be performed. Instead if a message being sent
2236 * exceeds the current PMTU size, the message will NOT be sent and
2237 * instead a error will be indicated to the user.
2239 static int sctp_setsockopt_disable_fragments(struct sock
*sk
,
2240 char __user
*optval
,
2241 unsigned int optlen
)
2245 if (optlen
< sizeof(int))
2248 if (get_user(val
, (int __user
*)optval
))
2251 sctp_sk(sk
)->disable_fragments
= (val
== 0) ? 0 : 1;
2256 static int sctp_setsockopt_events(struct sock
*sk
, char __user
*optval
,
2257 unsigned int optlen
)
2259 struct sctp_association
*asoc
;
2260 struct sctp_ulpevent
*event
;
2262 if (optlen
> sizeof(struct sctp_event_subscribe
))
2264 if (copy_from_user(&sctp_sk(sk
)->subscribe
, optval
, optlen
))
2267 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2268 * if there is no data to be sent or retransmit, the stack will
2269 * immediately send up this notification.
2271 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT
,
2272 &sctp_sk(sk
)->subscribe
)) {
2273 asoc
= sctp_id2assoc(sk
, 0);
2275 if (asoc
&& sctp_outq_is_empty(&asoc
->outqueue
)) {
2276 event
= sctp_ulpevent_make_sender_dry_event(asoc
,
2277 GFP_USER
| __GFP_NOWARN
);
2281 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
, event
);
2288 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
2290 * This socket option is applicable to the UDP-style socket only. When
2291 * set it will cause associations that are idle for more than the
2292 * specified number of seconds to automatically close. An association
2293 * being idle is defined an association that has NOT sent or received
2294 * user data. The special value of '0' indicates that no automatic
2295 * close of any associations should be performed. The option expects an
2296 * integer defining the number of seconds of idle time before an
2297 * association is closed.
2299 static int sctp_setsockopt_autoclose(struct sock
*sk
, char __user
*optval
,
2300 unsigned int optlen
)
2302 struct sctp_sock
*sp
= sctp_sk(sk
);
2303 struct net
*net
= sock_net(sk
);
2305 /* Applicable to UDP-style socket only */
2306 if (sctp_style(sk
, TCP
))
2308 if (optlen
!= sizeof(int))
2310 if (copy_from_user(&sp
->autoclose
, optval
, optlen
))
2313 if (sp
->autoclose
> net
->sctp
.max_autoclose
)
2314 sp
->autoclose
= net
->sctp
.max_autoclose
;
2319 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
2321 * Applications can enable or disable heartbeats for any peer address of
2322 * an association, modify an address's heartbeat interval, force a
2323 * heartbeat to be sent immediately, and adjust the address's maximum
2324 * number of retransmissions sent before an address is considered
2325 * unreachable. The following structure is used to access and modify an
2326 * address's parameters:
2328 * struct sctp_paddrparams {
2329 * sctp_assoc_t spp_assoc_id;
2330 * struct sockaddr_storage spp_address;
2331 * uint32_t spp_hbinterval;
2332 * uint16_t spp_pathmaxrxt;
2333 * uint32_t spp_pathmtu;
2334 * uint32_t spp_sackdelay;
2335 * uint32_t spp_flags;
2338 * spp_assoc_id - (one-to-many style socket) This is filled in the
2339 * application, and identifies the association for
2341 * spp_address - This specifies which address is of interest.
2342 * spp_hbinterval - This contains the value of the heartbeat interval,
2343 * in milliseconds. If a value of zero
2344 * is present in this field then no changes are to
2345 * be made to this parameter.
2346 * spp_pathmaxrxt - This contains the maximum number of
2347 * retransmissions before this address shall be
2348 * considered unreachable. If a value of zero
2349 * is present in this field then no changes are to
2350 * be made to this parameter.
2351 * spp_pathmtu - When Path MTU discovery is disabled the value
2352 * specified here will be the "fixed" path mtu.
2353 * Note that if the spp_address field is empty
2354 * then all associations on this address will
2355 * have this fixed path mtu set upon them.
2357 * spp_sackdelay - When delayed sack is enabled, this value specifies
2358 * the number of milliseconds that sacks will be delayed
2359 * for. This value will apply to all addresses of an
2360 * association if the spp_address field is empty. Note
2361 * also, that if delayed sack is enabled and this
2362 * value is set to 0, no change is made to the last
2363 * recorded delayed sack timer value.
2365 * spp_flags - These flags are used to control various features
2366 * on an association. The flag field may contain
2367 * zero or more of the following options.
2369 * SPP_HB_ENABLE - Enable heartbeats on the
2370 * specified address. Note that if the address
2371 * field is empty all addresses for the association
2372 * have heartbeats enabled upon them.
2374 * SPP_HB_DISABLE - Disable heartbeats on the
2375 * speicifed address. Note that if the address
2376 * field is empty all addresses for the association
2377 * will have their heartbeats disabled. Note also
2378 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
2379 * mutually exclusive, only one of these two should
2380 * be specified. Enabling both fields will have
2381 * undetermined results.
2383 * SPP_HB_DEMAND - Request a user initiated heartbeat
2384 * to be made immediately.
2386 * SPP_HB_TIME_IS_ZERO - Specify's that the time for
2387 * heartbeat delayis to be set to the value of 0
2390 * SPP_PMTUD_ENABLE - This field will enable PMTU
2391 * discovery upon the specified address. Note that
2392 * if the address feild is empty then all addresses
2393 * on the association are effected.
2395 * SPP_PMTUD_DISABLE - This field will disable PMTU
2396 * discovery upon the specified address. Note that
2397 * if the address feild is empty then all addresses
2398 * on the association are effected. Not also that
2399 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
2400 * exclusive. Enabling both will have undetermined
2403 * SPP_SACKDELAY_ENABLE - Setting this flag turns
2404 * on delayed sack. The time specified in spp_sackdelay
2405 * is used to specify the sack delay for this address. Note
2406 * that if spp_address is empty then all addresses will
2407 * enable delayed sack and take on the sack delay
2408 * value specified in spp_sackdelay.
2409 * SPP_SACKDELAY_DISABLE - Setting this flag turns
2410 * off delayed sack. If the spp_address field is blank then
2411 * delayed sack is disabled for the entire association. Note
2412 * also that this field is mutually exclusive to
2413 * SPP_SACKDELAY_ENABLE, setting both will have undefined
2416 static int sctp_apply_peer_addr_params(struct sctp_paddrparams
*params
,
2417 struct sctp_transport
*trans
,
2418 struct sctp_association
*asoc
,
2419 struct sctp_sock
*sp
,
2422 int sackdelay_change
)
2426 if (params
->spp_flags
& SPP_HB_DEMAND
&& trans
) {
2427 struct net
*net
= sock_net(trans
->asoc
->base
.sk
);
2429 error
= sctp_primitive_REQUESTHEARTBEAT(net
, trans
->asoc
, trans
);
2434 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
2435 * this field is ignored. Note also that a value of zero indicates
2436 * the current setting should be left unchanged.
2438 if (params
->spp_flags
& SPP_HB_ENABLE
) {
2440 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
2441 * set. This lets us use 0 value when this flag
2444 if (params
->spp_flags
& SPP_HB_TIME_IS_ZERO
)
2445 params
->spp_hbinterval
= 0;
2447 if (params
->spp_hbinterval
||
2448 (params
->spp_flags
& SPP_HB_TIME_IS_ZERO
)) {
2451 msecs_to_jiffies(params
->spp_hbinterval
);
2454 msecs_to_jiffies(params
->spp_hbinterval
);
2456 sp
->hbinterval
= params
->spp_hbinterval
;
2463 trans
->param_flags
=
2464 (trans
->param_flags
& ~SPP_HB
) | hb_change
;
2467 (asoc
->param_flags
& ~SPP_HB
) | hb_change
;
2470 (sp
->param_flags
& ~SPP_HB
) | hb_change
;
2474 /* When Path MTU discovery is disabled the value specified here will
2475 * be the "fixed" path mtu (i.e. the value of the spp_flags field must
2476 * include the flag SPP_PMTUD_DISABLE for this field to have any
2479 if ((params
->spp_flags
& SPP_PMTUD_DISABLE
) && params
->spp_pathmtu
) {
2481 trans
->pathmtu
= params
->spp_pathmtu
;
2482 sctp_assoc_sync_pmtu(asoc
);
2484 asoc
->pathmtu
= params
->spp_pathmtu
;
2486 sp
->pathmtu
= params
->spp_pathmtu
;
2492 int update
= (trans
->param_flags
& SPP_PMTUD_DISABLE
) &&
2493 (params
->spp_flags
& SPP_PMTUD_ENABLE
);
2494 trans
->param_flags
=
2495 (trans
->param_flags
& ~SPP_PMTUD
) | pmtud_change
;
2497 sctp_transport_pmtu(trans
, sctp_opt2sk(sp
));
2498 sctp_assoc_sync_pmtu(asoc
);
2502 (asoc
->param_flags
& ~SPP_PMTUD
) | pmtud_change
;
2505 (sp
->param_flags
& ~SPP_PMTUD
) | pmtud_change
;
2509 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
2510 * value of this field is ignored. Note also that a value of zero
2511 * indicates the current setting should be left unchanged.
2513 if ((params
->spp_flags
& SPP_SACKDELAY_ENABLE
) && params
->spp_sackdelay
) {
2516 msecs_to_jiffies(params
->spp_sackdelay
);
2519 msecs_to_jiffies(params
->spp_sackdelay
);
2521 sp
->sackdelay
= params
->spp_sackdelay
;
2525 if (sackdelay_change
) {
2527 trans
->param_flags
=
2528 (trans
->param_flags
& ~SPP_SACKDELAY
) |
2532 (asoc
->param_flags
& ~SPP_SACKDELAY
) |
2536 (sp
->param_flags
& ~SPP_SACKDELAY
) |
2541 /* Note that a value of zero indicates the current setting should be
2544 if (params
->spp_pathmaxrxt
) {
2546 trans
->pathmaxrxt
= params
->spp_pathmaxrxt
;
2548 asoc
->pathmaxrxt
= params
->spp_pathmaxrxt
;
2550 sp
->pathmaxrxt
= params
->spp_pathmaxrxt
;
2557 static int sctp_setsockopt_peer_addr_params(struct sock
*sk
,
2558 char __user
*optval
,
2559 unsigned int optlen
)
2561 struct sctp_paddrparams params
;
2562 struct sctp_transport
*trans
= NULL
;
2563 struct sctp_association
*asoc
= NULL
;
2564 struct sctp_sock
*sp
= sctp_sk(sk
);
2566 int hb_change
, pmtud_change
, sackdelay_change
;
2568 if (optlen
!= sizeof(struct sctp_paddrparams
))
2571 if (copy_from_user(¶ms
, optval
, optlen
))
2574 /* Validate flags and value parameters. */
2575 hb_change
= params
.spp_flags
& SPP_HB
;
2576 pmtud_change
= params
.spp_flags
& SPP_PMTUD
;
2577 sackdelay_change
= params
.spp_flags
& SPP_SACKDELAY
;
2579 if (hb_change
== SPP_HB
||
2580 pmtud_change
== SPP_PMTUD
||
2581 sackdelay_change
== SPP_SACKDELAY
||
2582 params
.spp_sackdelay
> 500 ||
2583 (params
.spp_pathmtu
&&
2584 params
.spp_pathmtu
< SCTP_DEFAULT_MINSEGMENT
))
2587 /* If an address other than INADDR_ANY is specified, and
2588 * no transport is found, then the request is invalid.
2590 if (!sctp_is_any(sk
, (union sctp_addr
*)¶ms
.spp_address
)) {
2591 trans
= sctp_addr_id2transport(sk
, ¶ms
.spp_address
,
2592 params
.spp_assoc_id
);
2597 /* Get association, if assoc_id != 0 and the socket is a one
2598 * to many style socket, and an association was not found, then
2599 * the id was invalid.
2601 asoc
= sctp_id2assoc(sk
, params
.spp_assoc_id
);
2602 if (!asoc
&& params
.spp_assoc_id
&& sctp_style(sk
, UDP
))
2605 /* Heartbeat demand can only be sent on a transport or
2606 * association, but not a socket.
2608 if (params
.spp_flags
& SPP_HB_DEMAND
&& !trans
&& !asoc
)
2611 /* Process parameters. */
2612 error
= sctp_apply_peer_addr_params(¶ms
, trans
, asoc
, sp
,
2613 hb_change
, pmtud_change
,
2619 /* If changes are for association, also apply parameters to each
2622 if (!trans
&& asoc
) {
2623 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
2625 sctp_apply_peer_addr_params(¶ms
, trans
, asoc
, sp
,
2626 hb_change
, pmtud_change
,
2634 static inline __u32
sctp_spp_sackdelay_enable(__u32 param_flags
)
2636 return (param_flags
& ~SPP_SACKDELAY
) | SPP_SACKDELAY_ENABLE
;
2639 static inline __u32
sctp_spp_sackdelay_disable(__u32 param_flags
)
2641 return (param_flags
& ~SPP_SACKDELAY
) | SPP_SACKDELAY_DISABLE
;
2645 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
2647 * This option will effect the way delayed acks are performed. This
2648 * option allows you to get or set the delayed ack time, in
2649 * milliseconds. It also allows changing the delayed ack frequency.
2650 * Changing the frequency to 1 disables the delayed sack algorithm. If
2651 * the assoc_id is 0, then this sets or gets the endpoints default
2652 * values. If the assoc_id field is non-zero, then the set or get
2653 * effects the specified association for the one to many model (the
2654 * assoc_id field is ignored by the one to one model). Note that if
2655 * sack_delay or sack_freq are 0 when setting this option, then the
2656 * current values will remain unchanged.
2658 * struct sctp_sack_info {
2659 * sctp_assoc_t sack_assoc_id;
2660 * uint32_t sack_delay;
2661 * uint32_t sack_freq;
2664 * sack_assoc_id - This parameter, indicates which association the user
2665 * is performing an action upon. Note that if this field's value is
2666 * zero then the endpoints default value is changed (effecting future
2667 * associations only).
2669 * sack_delay - This parameter contains the number of milliseconds that
2670 * the user is requesting the delayed ACK timer be set to. Note that
2671 * this value is defined in the standard to be between 200 and 500
2674 * sack_freq - This parameter contains the number of packets that must
2675 * be received before a sack is sent without waiting for the delay
2676 * timer to expire. The default value for this is 2, setting this
2677 * value to 1 will disable the delayed sack algorithm.
2680 static int sctp_setsockopt_delayed_ack(struct sock
*sk
,
2681 char __user
*optval
, unsigned int optlen
)
2683 struct sctp_sack_info params
;
2684 struct sctp_transport
*trans
= NULL
;
2685 struct sctp_association
*asoc
= NULL
;
2686 struct sctp_sock
*sp
= sctp_sk(sk
);
2688 if (optlen
== sizeof(struct sctp_sack_info
)) {
2689 if (copy_from_user(¶ms
, optval
, optlen
))
2692 if (params
.sack_delay
== 0 && params
.sack_freq
== 0)
2694 } else if (optlen
== sizeof(struct sctp_assoc_value
)) {
2695 pr_warn_ratelimited(DEPRECATED
2697 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
2698 "Use struct sctp_sack_info instead\n",
2699 current
->comm
, task_pid_nr(current
));
2700 if (copy_from_user(¶ms
, optval
, optlen
))
2703 if (params
.sack_delay
== 0)
2704 params
.sack_freq
= 1;
2706 params
.sack_freq
= 0;
2710 /* Validate value parameter. */
2711 if (params
.sack_delay
> 500)
2714 /* Get association, if sack_assoc_id != 0 and the socket is a one
2715 * to many style socket, and an association was not found, then
2716 * the id was invalid.
2718 asoc
= sctp_id2assoc(sk
, params
.sack_assoc_id
);
2719 if (!asoc
&& params
.sack_assoc_id
&& sctp_style(sk
, UDP
))
2722 if (params
.sack_delay
) {
2725 msecs_to_jiffies(params
.sack_delay
);
2727 sctp_spp_sackdelay_enable(asoc
->param_flags
);
2729 sp
->sackdelay
= params
.sack_delay
;
2731 sctp_spp_sackdelay_enable(sp
->param_flags
);
2735 if (params
.sack_freq
== 1) {
2738 sctp_spp_sackdelay_disable(asoc
->param_flags
);
2741 sctp_spp_sackdelay_disable(sp
->param_flags
);
2743 } else if (params
.sack_freq
> 1) {
2745 asoc
->sackfreq
= params
.sack_freq
;
2747 sctp_spp_sackdelay_enable(asoc
->param_flags
);
2749 sp
->sackfreq
= params
.sack_freq
;
2751 sctp_spp_sackdelay_enable(sp
->param_flags
);
2755 /* If change is for association, also apply to each transport. */
2757 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
2759 if (params
.sack_delay
) {
2761 msecs_to_jiffies(params
.sack_delay
);
2762 trans
->param_flags
=
2763 sctp_spp_sackdelay_enable(trans
->param_flags
);
2765 if (params
.sack_freq
== 1) {
2766 trans
->param_flags
=
2767 sctp_spp_sackdelay_disable(trans
->param_flags
);
2768 } else if (params
.sack_freq
> 1) {
2769 trans
->sackfreq
= params
.sack_freq
;
2770 trans
->param_flags
=
2771 sctp_spp_sackdelay_enable(trans
->param_flags
);
2779 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
2781 * Applications can specify protocol parameters for the default association
2782 * initialization. The option name argument to setsockopt() and getsockopt()
2785 * Setting initialization parameters is effective only on an unconnected
2786 * socket (for UDP-style sockets only future associations are effected
2787 * by the change). With TCP-style sockets, this option is inherited by
2788 * sockets derived from a listener socket.
2790 static int sctp_setsockopt_initmsg(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
2792 struct sctp_initmsg sinit
;
2793 struct sctp_sock
*sp
= sctp_sk(sk
);
2795 if (optlen
!= sizeof(struct sctp_initmsg
))
2797 if (copy_from_user(&sinit
, optval
, optlen
))
2800 if (sinit
.sinit_num_ostreams
)
2801 sp
->initmsg
.sinit_num_ostreams
= sinit
.sinit_num_ostreams
;
2802 if (sinit
.sinit_max_instreams
)
2803 sp
->initmsg
.sinit_max_instreams
= sinit
.sinit_max_instreams
;
2804 if (sinit
.sinit_max_attempts
)
2805 sp
->initmsg
.sinit_max_attempts
= sinit
.sinit_max_attempts
;
2806 if (sinit
.sinit_max_init_timeo
)
2807 sp
->initmsg
.sinit_max_init_timeo
= sinit
.sinit_max_init_timeo
;
2813 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
2815 * Applications that wish to use the sendto() system call may wish to
2816 * specify a default set of parameters that would normally be supplied
2817 * through the inclusion of ancillary data. This socket option allows
2818 * such an application to set the default sctp_sndrcvinfo structure.
2819 * The application that wishes to use this socket option simply passes
2820 * in to this call the sctp_sndrcvinfo structure defined in Section
2821 * 5.2.2) The input parameters accepted by this call include
2822 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
2823 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
2824 * to this call if the caller is using the UDP model.
2826 static int sctp_setsockopt_default_send_param(struct sock
*sk
,
2827 char __user
*optval
,
2828 unsigned int optlen
)
2830 struct sctp_sock
*sp
= sctp_sk(sk
);
2831 struct sctp_association
*asoc
;
2832 struct sctp_sndrcvinfo info
;
2834 if (optlen
!= sizeof(info
))
2836 if (copy_from_user(&info
, optval
, optlen
))
2838 if (info
.sinfo_flags
&
2839 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
2840 SCTP_ABORT
| SCTP_EOF
))
2843 asoc
= sctp_id2assoc(sk
, info
.sinfo_assoc_id
);
2844 if (!asoc
&& info
.sinfo_assoc_id
&& sctp_style(sk
, UDP
))
2847 asoc
->default_stream
= info
.sinfo_stream
;
2848 asoc
->default_flags
= info
.sinfo_flags
;
2849 asoc
->default_ppid
= info
.sinfo_ppid
;
2850 asoc
->default_context
= info
.sinfo_context
;
2851 asoc
->default_timetolive
= info
.sinfo_timetolive
;
2853 sp
->default_stream
= info
.sinfo_stream
;
2854 sp
->default_flags
= info
.sinfo_flags
;
2855 sp
->default_ppid
= info
.sinfo_ppid
;
2856 sp
->default_context
= info
.sinfo_context
;
2857 sp
->default_timetolive
= info
.sinfo_timetolive
;
2863 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
2864 * (SCTP_DEFAULT_SNDINFO)
2866 static int sctp_setsockopt_default_sndinfo(struct sock
*sk
,
2867 char __user
*optval
,
2868 unsigned int optlen
)
2870 struct sctp_sock
*sp
= sctp_sk(sk
);
2871 struct sctp_association
*asoc
;
2872 struct sctp_sndinfo info
;
2874 if (optlen
!= sizeof(info
))
2876 if (copy_from_user(&info
, optval
, optlen
))
2878 if (info
.snd_flags
&
2879 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
2880 SCTP_ABORT
| SCTP_EOF
))
2883 asoc
= sctp_id2assoc(sk
, info
.snd_assoc_id
);
2884 if (!asoc
&& info
.snd_assoc_id
&& sctp_style(sk
, UDP
))
2887 asoc
->default_stream
= info
.snd_sid
;
2888 asoc
->default_flags
= info
.snd_flags
;
2889 asoc
->default_ppid
= info
.snd_ppid
;
2890 asoc
->default_context
= info
.snd_context
;
2892 sp
->default_stream
= info
.snd_sid
;
2893 sp
->default_flags
= info
.snd_flags
;
2894 sp
->default_ppid
= info
.snd_ppid
;
2895 sp
->default_context
= info
.snd_context
;
2901 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
2903 * Requests that the local SCTP stack use the enclosed peer address as
2904 * the association primary. The enclosed address must be one of the
2905 * association peer's addresses.
2907 static int sctp_setsockopt_primary_addr(struct sock
*sk
, char __user
*optval
,
2908 unsigned int optlen
)
2910 struct sctp_prim prim
;
2911 struct sctp_transport
*trans
;
2913 if (optlen
!= sizeof(struct sctp_prim
))
2916 if (copy_from_user(&prim
, optval
, sizeof(struct sctp_prim
)))
2919 trans
= sctp_addr_id2transport(sk
, &prim
.ssp_addr
, prim
.ssp_assoc_id
);
2923 sctp_assoc_set_primary(trans
->asoc
, trans
);
2929 * 7.1.5 SCTP_NODELAY
2931 * Turn on/off any Nagle-like algorithm. This means that packets are
2932 * generally sent as soon as possible and no unnecessary delays are
2933 * introduced, at the cost of more packets in the network. Expects an
2934 * integer boolean flag.
2936 static int sctp_setsockopt_nodelay(struct sock
*sk
, char __user
*optval
,
2937 unsigned int optlen
)
2941 if (optlen
< sizeof(int))
2943 if (get_user(val
, (int __user
*)optval
))
2946 sctp_sk(sk
)->nodelay
= (val
== 0) ? 0 : 1;
2952 * 7.1.1 SCTP_RTOINFO
2954 * The protocol parameters used to initialize and bound retransmission
2955 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
2956 * and modify these parameters.
2957 * All parameters are time values, in milliseconds. A value of 0, when
2958 * modifying the parameters, indicates that the current value should not
2962 static int sctp_setsockopt_rtoinfo(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
2964 struct sctp_rtoinfo rtoinfo
;
2965 struct sctp_association
*asoc
;
2966 unsigned long rto_min
, rto_max
;
2967 struct sctp_sock
*sp
= sctp_sk(sk
);
2969 if (optlen
!= sizeof (struct sctp_rtoinfo
))
2972 if (copy_from_user(&rtoinfo
, optval
, optlen
))
2975 asoc
= sctp_id2assoc(sk
, rtoinfo
.srto_assoc_id
);
2977 /* Set the values to the specific association */
2978 if (!asoc
&& rtoinfo
.srto_assoc_id
&& sctp_style(sk
, UDP
))
2981 rto_max
= rtoinfo
.srto_max
;
2982 rto_min
= rtoinfo
.srto_min
;
2985 rto_max
= asoc
? msecs_to_jiffies(rto_max
) : rto_max
;
2987 rto_max
= asoc
? asoc
->rto_max
: sp
->rtoinfo
.srto_max
;
2990 rto_min
= asoc
? msecs_to_jiffies(rto_min
) : rto_min
;
2992 rto_min
= asoc
? asoc
->rto_min
: sp
->rtoinfo
.srto_min
;
2994 if (rto_min
> rto_max
)
2998 if (rtoinfo
.srto_initial
!= 0)
3000 msecs_to_jiffies(rtoinfo
.srto_initial
);
3001 asoc
->rto_max
= rto_max
;
3002 asoc
->rto_min
= rto_min
;
3004 /* If there is no association or the association-id = 0
3005 * set the values to the endpoint.
3007 if (rtoinfo
.srto_initial
!= 0)
3008 sp
->rtoinfo
.srto_initial
= rtoinfo
.srto_initial
;
3009 sp
->rtoinfo
.srto_max
= rto_max
;
3010 sp
->rtoinfo
.srto_min
= rto_min
;
3018 * 7.1.2 SCTP_ASSOCINFO
3020 * This option is used to tune the maximum retransmission attempts
3021 * of the association.
3022 * Returns an error if the new association retransmission value is
3023 * greater than the sum of the retransmission value of the peer.
3024 * See [SCTP] for more information.
3027 static int sctp_setsockopt_associnfo(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
3030 struct sctp_assocparams assocparams
;
3031 struct sctp_association
*asoc
;
3033 if (optlen
!= sizeof(struct sctp_assocparams
))
3035 if (copy_from_user(&assocparams
, optval
, optlen
))
3038 asoc
= sctp_id2assoc(sk
, assocparams
.sasoc_assoc_id
);
3040 if (!asoc
&& assocparams
.sasoc_assoc_id
&& sctp_style(sk
, UDP
))
3043 /* Set the values to the specific association */
3045 if (assocparams
.sasoc_asocmaxrxt
!= 0) {
3048 struct sctp_transport
*peer_addr
;
3050 list_for_each_entry(peer_addr
, &asoc
->peer
.transport_addr_list
,
3052 path_sum
+= peer_addr
->pathmaxrxt
;
3056 /* Only validate asocmaxrxt if we have more than
3057 * one path/transport. We do this because path
3058 * retransmissions are only counted when we have more
3062 assocparams
.sasoc_asocmaxrxt
> path_sum
)
3065 asoc
->max_retrans
= assocparams
.sasoc_asocmaxrxt
;
3068 if (assocparams
.sasoc_cookie_life
!= 0)
3069 asoc
->cookie_life
= ms_to_ktime(assocparams
.sasoc_cookie_life
);
3071 /* Set the values to the endpoint */
3072 struct sctp_sock
*sp
= sctp_sk(sk
);
3074 if (assocparams
.sasoc_asocmaxrxt
!= 0)
3075 sp
->assocparams
.sasoc_asocmaxrxt
=
3076 assocparams
.sasoc_asocmaxrxt
;
3077 if (assocparams
.sasoc_cookie_life
!= 0)
3078 sp
->assocparams
.sasoc_cookie_life
=
3079 assocparams
.sasoc_cookie_life
;
3085 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
3087 * This socket option is a boolean flag which turns on or off mapped V4
3088 * addresses. If this option is turned on and the socket is type
3089 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
3090 * If this option is turned off, then no mapping will be done of V4
3091 * addresses and a user will receive both PF_INET6 and PF_INET type
3092 * addresses on the socket.
3094 static int sctp_setsockopt_mappedv4(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
3097 struct sctp_sock
*sp
= sctp_sk(sk
);
3099 if (optlen
< sizeof(int))
3101 if (get_user(val
, (int __user
*)optval
))
3112 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
3113 * This option will get or set the maximum size to put in any outgoing
3114 * SCTP DATA chunk. If a message is larger than this size it will be
3115 * fragmented by SCTP into the specified size. Note that the underlying
3116 * SCTP implementation may fragment into smaller sized chunks when the
3117 * PMTU of the underlying association is smaller than the value set by
3118 * the user. The default value for this option is '0' which indicates
3119 * the user is NOT limiting fragmentation and only the PMTU will effect
3120 * SCTP's choice of DATA chunk size. Note also that values set larger
3121 * than the maximum size of an IP datagram will effectively let SCTP
3122 * control fragmentation (i.e. the same as setting this option to 0).
3124 * The following structure is used to access and modify this parameter:
3126 * struct sctp_assoc_value {
3127 * sctp_assoc_t assoc_id;
3128 * uint32_t assoc_value;
3131 * assoc_id: This parameter is ignored for one-to-one style sockets.
3132 * For one-to-many style sockets this parameter indicates which
3133 * association the user is performing an action upon. Note that if
3134 * this field's value is zero then the endpoints default value is
3135 * changed (effecting future associations only).
3136 * assoc_value: This parameter specifies the maximum size in bytes.
3138 static int sctp_setsockopt_maxseg(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
3140 struct sctp_sock
*sp
= sctp_sk(sk
);
3141 struct sctp_assoc_value params
;
3142 struct sctp_association
*asoc
;
3145 if (optlen
== sizeof(int)) {
3146 pr_warn_ratelimited(DEPRECATED
3148 "Use of int in maxseg socket option.\n"
3149 "Use struct sctp_assoc_value instead\n",
3150 current
->comm
, task_pid_nr(current
));
3151 if (copy_from_user(&val
, optval
, optlen
))
3153 params
.assoc_id
= 0;
3154 } else if (optlen
== sizeof(struct sctp_assoc_value
)) {
3155 if (copy_from_user(¶ms
, optval
, optlen
))
3157 val
= params
.assoc_value
;
3163 int min_len
, max_len
;
3165 min_len
= SCTP_DEFAULT_MINSEGMENT
- sp
->pf
->af
->net_header_len
;
3166 min_len
-= sizeof(struct sctphdr
) +
3167 sizeof(struct sctp_data_chunk
);
3169 max_len
= SCTP_MAX_CHUNK_LEN
- sizeof(struct sctp_data_chunk
);
3171 if (val
< min_len
|| val
> max_len
)
3175 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
3178 val
= asoc
->pathmtu
- sp
->pf
->af
->net_header_len
;
3179 val
-= sizeof(struct sctphdr
) +
3180 sctp_datachk_len(&asoc
->stream
);
3182 asoc
->user_frag
= val
;
3183 asoc
->frag_point
= sctp_frag_point(asoc
, asoc
->pathmtu
);
3185 if (params
.assoc_id
&& sctp_style(sk
, UDP
))
3187 sp
->user_frag
= val
;
3195 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
3197 * Requests that the peer mark the enclosed address as the association
3198 * primary. The enclosed address must be one of the association's
3199 * locally bound addresses. The following structure is used to make a
3200 * set primary request:
3202 static int sctp_setsockopt_peer_primary_addr(struct sock
*sk
, char __user
*optval
,
3203 unsigned int optlen
)
3205 struct net
*net
= sock_net(sk
);
3206 struct sctp_sock
*sp
;
3207 struct sctp_association
*asoc
= NULL
;
3208 struct sctp_setpeerprim prim
;
3209 struct sctp_chunk
*chunk
;
3215 if (!net
->sctp
.addip_enable
)
3218 if (optlen
!= sizeof(struct sctp_setpeerprim
))
3221 if (copy_from_user(&prim
, optval
, optlen
))
3224 asoc
= sctp_id2assoc(sk
, prim
.sspp_assoc_id
);
3228 if (!asoc
->peer
.asconf_capable
)
3231 if (asoc
->peer
.addip_disabled_mask
& SCTP_PARAM_SET_PRIMARY
)
3234 if (!sctp_state(asoc
, ESTABLISHED
))
3237 af
= sctp_get_af_specific(prim
.sspp_addr
.ss_family
);
3241 if (!af
->addr_valid((union sctp_addr
*)&prim
.sspp_addr
, sp
, NULL
))
3242 return -EADDRNOTAVAIL
;
3244 if (!sctp_assoc_lookup_laddr(asoc
, (union sctp_addr
*)&prim
.sspp_addr
))
3245 return -EADDRNOTAVAIL
;
3247 /* Create an ASCONF chunk with SET_PRIMARY parameter */
3248 chunk
= sctp_make_asconf_set_prim(asoc
,
3249 (union sctp_addr
*)&prim
.sspp_addr
);
3253 err
= sctp_send_asconf(asoc
, chunk
);
3255 pr_debug("%s: we set peer primary addr primitively\n", __func__
);
3260 static int sctp_setsockopt_adaptation_layer(struct sock
*sk
, char __user
*optval
,
3261 unsigned int optlen
)
3263 struct sctp_setadaptation adaptation
;
3265 if (optlen
!= sizeof(struct sctp_setadaptation
))
3267 if (copy_from_user(&adaptation
, optval
, optlen
))
3270 sctp_sk(sk
)->adaptation_ind
= adaptation
.ssb_adaptation_ind
;
3276 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
3278 * The context field in the sctp_sndrcvinfo structure is normally only
3279 * used when a failed message is retrieved holding the value that was
3280 * sent down on the actual send call. This option allows the setting of
3281 * a default context on an association basis that will be received on
3282 * reading messages from the peer. This is especially helpful in the
3283 * one-2-many model for an application to keep some reference to an
3284 * internal state machine that is processing messages on the
3285 * association. Note that the setting of this value only effects
3286 * received messages from the peer and does not effect the value that is
3287 * saved with outbound messages.
3289 static int sctp_setsockopt_context(struct sock
*sk
, char __user
*optval
,
3290 unsigned int optlen
)
3292 struct sctp_assoc_value params
;
3293 struct sctp_sock
*sp
;
3294 struct sctp_association
*asoc
;
3296 if (optlen
!= sizeof(struct sctp_assoc_value
))
3298 if (copy_from_user(¶ms
, optval
, optlen
))
3303 if (params
.assoc_id
!= 0) {
3304 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
3307 asoc
->default_rcv_context
= params
.assoc_value
;
3309 sp
->default_rcv_context
= params
.assoc_value
;
3316 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
3318 * This options will at a minimum specify if the implementation is doing
3319 * fragmented interleave. Fragmented interleave, for a one to many
3320 * socket, is when subsequent calls to receive a message may return
3321 * parts of messages from different associations. Some implementations
3322 * may allow you to turn this value on or off. If so, when turned off,
3323 * no fragment interleave will occur (which will cause a head of line
3324 * blocking amongst multiple associations sharing the same one to many
3325 * socket). When this option is turned on, then each receive call may
3326 * come from a different association (thus the user must receive data
3327 * with the extended calls (e.g. sctp_recvmsg) to keep track of which
3328 * association each receive belongs to.
3330 * This option takes a boolean value. A non-zero value indicates that
3331 * fragmented interleave is on. A value of zero indicates that
3332 * fragmented interleave is off.
3334 * Note that it is important that an implementation that allows this
3335 * option to be turned on, have it off by default. Otherwise an unaware
3336 * application using the one to many model may become confused and act
3339 static int sctp_setsockopt_fragment_interleave(struct sock
*sk
,
3340 char __user
*optval
,
3341 unsigned int optlen
)
3345 if (optlen
!= sizeof(int))
3347 if (get_user(val
, (int __user
*)optval
))
3350 sctp_sk(sk
)->frag_interleave
= !!val
;
3352 if (!sctp_sk(sk
)->frag_interleave
)
3353 sctp_sk(sk
)->strm_interleave
= 0;
3359 * 8.1.21. Set or Get the SCTP Partial Delivery Point
3360 * (SCTP_PARTIAL_DELIVERY_POINT)
3362 * This option will set or get the SCTP partial delivery point. This
3363 * point is the size of a message where the partial delivery API will be
3364 * invoked to help free up rwnd space for the peer. Setting this to a
3365 * lower value will cause partial deliveries to happen more often. The
3366 * calls argument is an integer that sets or gets the partial delivery
3367 * point. Note also that the call will fail if the user attempts to set
3368 * this value larger than the socket receive buffer size.
3370 * Note that any single message having a length smaller than or equal to
3371 * the SCTP partial delivery point will be delivered in one single read
3372 * call as long as the user provided buffer is large enough to hold the
3375 static int sctp_setsockopt_partial_delivery_point(struct sock
*sk
,
3376 char __user
*optval
,
3377 unsigned int optlen
)
3381 if (optlen
!= sizeof(u32
))
3383 if (get_user(val
, (int __user
*)optval
))
3386 /* Note: We double the receive buffer from what the user sets
3387 * it to be, also initial rwnd is based on rcvbuf/2.
3389 if (val
> (sk
->sk_rcvbuf
>> 1))
3392 sctp_sk(sk
)->pd_point
= val
;
3394 return 0; /* is this the right error code? */
3398 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
3400 * This option will allow a user to change the maximum burst of packets
3401 * that can be emitted by this association. Note that the default value
3402 * is 4, and some implementations may restrict this setting so that it
3403 * can only be lowered.
3405 * NOTE: This text doesn't seem right. Do this on a socket basis with
3406 * future associations inheriting the socket value.
3408 static int sctp_setsockopt_maxburst(struct sock
*sk
,
3409 char __user
*optval
,
3410 unsigned int optlen
)
3412 struct sctp_assoc_value params
;
3413 struct sctp_sock
*sp
;
3414 struct sctp_association
*asoc
;
3418 if (optlen
== sizeof(int)) {
3419 pr_warn_ratelimited(DEPRECATED
3421 "Use of int in max_burst socket option deprecated.\n"
3422 "Use struct sctp_assoc_value instead\n",
3423 current
->comm
, task_pid_nr(current
));
3424 if (copy_from_user(&val
, optval
, optlen
))
3426 } else if (optlen
== sizeof(struct sctp_assoc_value
)) {
3427 if (copy_from_user(¶ms
, optval
, optlen
))
3429 val
= params
.assoc_value
;
3430 assoc_id
= params
.assoc_id
;
3436 if (assoc_id
!= 0) {
3437 asoc
= sctp_id2assoc(sk
, assoc_id
);
3440 asoc
->max_burst
= val
;
3442 sp
->max_burst
= val
;
3448 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
3450 * This set option adds a chunk type that the user is requesting to be
3451 * received only in an authenticated way. Changes to the list of chunks
3452 * will only effect future associations on the socket.
3454 static int sctp_setsockopt_auth_chunk(struct sock
*sk
,
3455 char __user
*optval
,
3456 unsigned int optlen
)
3458 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3459 struct sctp_authchunk val
;
3461 if (!ep
->auth_enable
)
3464 if (optlen
!= sizeof(struct sctp_authchunk
))
3466 if (copy_from_user(&val
, optval
, optlen
))
3469 switch (val
.sauth_chunk
) {
3471 case SCTP_CID_INIT_ACK
:
3472 case SCTP_CID_SHUTDOWN_COMPLETE
:
3477 /* add this chunk id to the endpoint */
3478 return sctp_auth_ep_add_chunkid(ep
, val
.sauth_chunk
);
3482 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
3484 * This option gets or sets the list of HMAC algorithms that the local
3485 * endpoint requires the peer to use.
3487 static int sctp_setsockopt_hmac_ident(struct sock
*sk
,
3488 char __user
*optval
,
3489 unsigned int optlen
)
3491 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3492 struct sctp_hmacalgo
*hmacs
;
3496 if (!ep
->auth_enable
)
3499 if (optlen
< sizeof(struct sctp_hmacalgo
))
3501 optlen
= min_t(unsigned int, optlen
, sizeof(struct sctp_hmacalgo
) +
3502 SCTP_AUTH_NUM_HMACS
* sizeof(u16
));
3504 hmacs
= memdup_user(optval
, optlen
);
3506 return PTR_ERR(hmacs
);
3508 idents
= hmacs
->shmac_num_idents
;
3509 if (idents
== 0 || idents
> SCTP_AUTH_NUM_HMACS
||
3510 (idents
* sizeof(u16
)) > (optlen
- sizeof(struct sctp_hmacalgo
))) {
3515 err
= sctp_auth_ep_set_hmacs(ep
, hmacs
);
3522 * 7.1.20. Set a shared key (SCTP_AUTH_KEY)
3524 * This option will set a shared secret key which is used to build an
3525 * association shared key.
3527 static int sctp_setsockopt_auth_key(struct sock
*sk
,
3528 char __user
*optval
,
3529 unsigned int optlen
)
3531 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3532 struct sctp_authkey
*authkey
;
3533 struct sctp_association
*asoc
;
3536 if (!ep
->auth_enable
)
3539 if (optlen
<= sizeof(struct sctp_authkey
))
3541 /* authkey->sca_keylength is u16, so optlen can't be bigger than
3544 optlen
= min_t(unsigned int, optlen
, USHRT_MAX
+
3545 sizeof(struct sctp_authkey
));
3547 authkey
= memdup_user(optval
, optlen
);
3548 if (IS_ERR(authkey
))
3549 return PTR_ERR(authkey
);
3551 if (authkey
->sca_keylength
> optlen
- sizeof(struct sctp_authkey
)) {
3556 asoc
= sctp_id2assoc(sk
, authkey
->sca_assoc_id
);
3557 if (!asoc
&& authkey
->sca_assoc_id
&& sctp_style(sk
, UDP
)) {
3562 ret
= sctp_auth_set_key(ep
, asoc
, authkey
);
3569 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
3571 * This option will get or set the active shared key to be used to build
3572 * the association shared key.
3574 static int sctp_setsockopt_active_key(struct sock
*sk
,
3575 char __user
*optval
,
3576 unsigned int optlen
)
3578 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3579 struct sctp_authkeyid val
;
3580 struct sctp_association
*asoc
;
3582 if (!ep
->auth_enable
)
3585 if (optlen
!= sizeof(struct sctp_authkeyid
))
3587 if (copy_from_user(&val
, optval
, optlen
))
3590 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
3591 if (!asoc
&& val
.scact_assoc_id
&& sctp_style(sk
, UDP
))
3594 return sctp_auth_set_active_key(ep
, asoc
, val
.scact_keynumber
);
3598 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
3600 * This set option will delete a shared secret key from use.
3602 static int sctp_setsockopt_del_key(struct sock
*sk
,
3603 char __user
*optval
,
3604 unsigned int optlen
)
3606 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3607 struct sctp_authkeyid val
;
3608 struct sctp_association
*asoc
;
3610 if (!ep
->auth_enable
)
3613 if (optlen
!= sizeof(struct sctp_authkeyid
))
3615 if (copy_from_user(&val
, optval
, optlen
))
3618 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
3619 if (!asoc
&& val
.scact_assoc_id
&& sctp_style(sk
, UDP
))
3622 return sctp_auth_del_key_id(ep
, asoc
, val
.scact_keynumber
);
3627 * 8.1.23 SCTP_AUTO_ASCONF
3629 * This option will enable or disable the use of the automatic generation of
3630 * ASCONF chunks to add and delete addresses to an existing association. Note
3631 * that this option has two caveats namely: a) it only affects sockets that
3632 * are bound to all addresses available to the SCTP stack, and b) the system
3633 * administrator may have an overriding control that turns the ASCONF feature
3634 * off no matter what setting the socket option may have.
3635 * This option expects an integer boolean flag, where a non-zero value turns on
3636 * the option, and a zero value turns off the option.
3637 * Note. In this implementation, socket operation overrides default parameter
3638 * being set by sysctl as well as FreeBSD implementation
3640 static int sctp_setsockopt_auto_asconf(struct sock
*sk
, char __user
*optval
,
3641 unsigned int optlen
)
3644 struct sctp_sock
*sp
= sctp_sk(sk
);
3646 if (optlen
< sizeof(int))
3648 if (get_user(val
, (int __user
*)optval
))
3650 if (!sctp_is_ep_boundall(sk
) && val
)
3652 if ((val
&& sp
->do_auto_asconf
) || (!val
&& !sp
->do_auto_asconf
))
3655 spin_lock_bh(&sock_net(sk
)->sctp
.addr_wq_lock
);
3656 if (val
== 0 && sp
->do_auto_asconf
) {
3657 list_del(&sp
->auto_asconf_list
);
3658 sp
->do_auto_asconf
= 0;
3659 } else if (val
&& !sp
->do_auto_asconf
) {
3660 list_add_tail(&sp
->auto_asconf_list
,
3661 &sock_net(sk
)->sctp
.auto_asconf_splist
);
3662 sp
->do_auto_asconf
= 1;
3664 spin_unlock_bh(&sock_net(sk
)->sctp
.addr_wq_lock
);
3669 * SCTP_PEER_ADDR_THLDS
3671 * This option allows us to alter the partially failed threshold for one or all
3672 * transports in an association. See Section 6.1 of:
3673 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
3675 static int sctp_setsockopt_paddr_thresholds(struct sock
*sk
,
3676 char __user
*optval
,
3677 unsigned int optlen
)
3679 struct sctp_paddrthlds val
;
3680 struct sctp_transport
*trans
;
3681 struct sctp_association
*asoc
;
3683 if (optlen
< sizeof(struct sctp_paddrthlds
))
3685 if (copy_from_user(&val
, (struct sctp_paddrthlds __user
*)optval
,
3686 sizeof(struct sctp_paddrthlds
)))
3690 if (sctp_is_any(sk
, (const union sctp_addr
*)&val
.spt_address
)) {
3691 asoc
= sctp_id2assoc(sk
, val
.spt_assoc_id
);
3694 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
3696 if (val
.spt_pathmaxrxt
)
3697 trans
->pathmaxrxt
= val
.spt_pathmaxrxt
;
3698 trans
->pf_retrans
= val
.spt_pathpfthld
;
3701 if (val
.spt_pathmaxrxt
)
3702 asoc
->pathmaxrxt
= val
.spt_pathmaxrxt
;
3703 asoc
->pf_retrans
= val
.spt_pathpfthld
;
3705 trans
= sctp_addr_id2transport(sk
, &val
.spt_address
,
3710 if (val
.spt_pathmaxrxt
)
3711 trans
->pathmaxrxt
= val
.spt_pathmaxrxt
;
3712 trans
->pf_retrans
= val
.spt_pathpfthld
;
3718 static int sctp_setsockopt_recvrcvinfo(struct sock
*sk
,
3719 char __user
*optval
,
3720 unsigned int optlen
)
3724 if (optlen
< sizeof(int))
3726 if (get_user(val
, (int __user
*) optval
))
3729 sctp_sk(sk
)->recvrcvinfo
= (val
== 0) ? 0 : 1;
3734 static int sctp_setsockopt_recvnxtinfo(struct sock
*sk
,
3735 char __user
*optval
,
3736 unsigned int optlen
)
3740 if (optlen
< sizeof(int))
3742 if (get_user(val
, (int __user
*) optval
))
3745 sctp_sk(sk
)->recvnxtinfo
= (val
== 0) ? 0 : 1;
3750 static int sctp_setsockopt_pr_supported(struct sock
*sk
,
3751 char __user
*optval
,
3752 unsigned int optlen
)
3754 struct sctp_assoc_value params
;
3755 struct sctp_association
*asoc
;
3756 int retval
= -EINVAL
;
3758 if (optlen
!= sizeof(params
))
3761 if (copy_from_user(¶ms
, optval
, optlen
)) {
3766 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
3768 asoc
->prsctp_enable
= !!params
.assoc_value
;
3769 } else if (!params
.assoc_id
) {
3770 struct sctp_sock
*sp
= sctp_sk(sk
);
3772 sp
->ep
->prsctp_enable
= !!params
.assoc_value
;
3783 static int sctp_setsockopt_default_prinfo(struct sock
*sk
,
3784 char __user
*optval
,
3785 unsigned int optlen
)
3787 struct sctp_default_prinfo info
;
3788 struct sctp_association
*asoc
;
3789 int retval
= -EINVAL
;
3791 if (optlen
!= sizeof(info
))
3794 if (copy_from_user(&info
, optval
, sizeof(info
))) {
3799 if (info
.pr_policy
& ~SCTP_PR_SCTP_MASK
)
3802 if (info
.pr_policy
== SCTP_PR_SCTP_NONE
)
3805 asoc
= sctp_id2assoc(sk
, info
.pr_assoc_id
);
3807 SCTP_PR_SET_POLICY(asoc
->default_flags
, info
.pr_policy
);
3808 asoc
->default_timetolive
= info
.pr_value
;
3809 } else if (!info
.pr_assoc_id
) {
3810 struct sctp_sock
*sp
= sctp_sk(sk
);
3812 SCTP_PR_SET_POLICY(sp
->default_flags
, info
.pr_policy
);
3813 sp
->default_timetolive
= info
.pr_value
;
3824 static int sctp_setsockopt_reconfig_supported(struct sock
*sk
,
3825 char __user
*optval
,
3826 unsigned int optlen
)
3828 struct sctp_assoc_value params
;
3829 struct sctp_association
*asoc
;
3830 int retval
= -EINVAL
;
3832 if (optlen
!= sizeof(params
))
3835 if (copy_from_user(¶ms
, optval
, optlen
)) {
3840 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
3842 asoc
->reconf_enable
= !!params
.assoc_value
;
3843 } else if (!params
.assoc_id
) {
3844 struct sctp_sock
*sp
= sctp_sk(sk
);
3846 sp
->ep
->reconf_enable
= !!params
.assoc_value
;
3857 static int sctp_setsockopt_enable_strreset(struct sock
*sk
,
3858 char __user
*optval
,
3859 unsigned int optlen
)
3861 struct sctp_assoc_value params
;
3862 struct sctp_association
*asoc
;
3863 int retval
= -EINVAL
;
3865 if (optlen
!= sizeof(params
))
3868 if (copy_from_user(¶ms
, optval
, optlen
)) {
3873 if (params
.assoc_value
& (~SCTP_ENABLE_STRRESET_MASK
))
3876 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
3878 asoc
->strreset_enable
= params
.assoc_value
;
3879 } else if (!params
.assoc_id
) {
3880 struct sctp_sock
*sp
= sctp_sk(sk
);
3882 sp
->ep
->strreset_enable
= params
.assoc_value
;
3893 static int sctp_setsockopt_reset_streams(struct sock
*sk
,
3894 char __user
*optval
,
3895 unsigned int optlen
)
3897 struct sctp_reset_streams
*params
;
3898 struct sctp_association
*asoc
;
3899 int retval
= -EINVAL
;
3901 if (optlen
< sizeof(*params
))
3903 /* srs_number_streams is u16, so optlen can't be bigger than this. */
3904 optlen
= min_t(unsigned int, optlen
, USHRT_MAX
+
3905 sizeof(__u16
) * sizeof(*params
));
3907 params
= memdup_user(optval
, optlen
);
3909 return PTR_ERR(params
);
3911 if (params
->srs_number_streams
* sizeof(__u16
) >
3912 optlen
- sizeof(*params
))
3915 asoc
= sctp_id2assoc(sk
, params
->srs_assoc_id
);
3919 retval
= sctp_send_reset_streams(asoc
, params
);
3926 static int sctp_setsockopt_reset_assoc(struct sock
*sk
,
3927 char __user
*optval
,
3928 unsigned int optlen
)
3930 struct sctp_association
*asoc
;
3931 sctp_assoc_t associd
;
3932 int retval
= -EINVAL
;
3934 if (optlen
!= sizeof(associd
))
3937 if (copy_from_user(&associd
, optval
, optlen
)) {
3942 asoc
= sctp_id2assoc(sk
, associd
);
3946 retval
= sctp_send_reset_assoc(asoc
);
3952 static int sctp_setsockopt_add_streams(struct sock
*sk
,
3953 char __user
*optval
,
3954 unsigned int optlen
)
3956 struct sctp_association
*asoc
;
3957 struct sctp_add_streams params
;
3958 int retval
= -EINVAL
;
3960 if (optlen
!= sizeof(params
))
3963 if (copy_from_user(¶ms
, optval
, optlen
)) {
3968 asoc
= sctp_id2assoc(sk
, params
.sas_assoc_id
);
3972 retval
= sctp_send_add_streams(asoc
, ¶ms
);
3978 static int sctp_setsockopt_scheduler(struct sock
*sk
,
3979 char __user
*optval
,
3980 unsigned int optlen
)
3982 struct sctp_association
*asoc
;
3983 struct sctp_assoc_value params
;
3984 int retval
= -EINVAL
;
3986 if (optlen
< sizeof(params
))
3989 optlen
= sizeof(params
);
3990 if (copy_from_user(¶ms
, optval
, optlen
)) {
3995 if (params
.assoc_value
> SCTP_SS_MAX
)
3998 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4002 retval
= sctp_sched_set_sched(asoc
, params
.assoc_value
);
4008 static int sctp_setsockopt_scheduler_value(struct sock
*sk
,
4009 char __user
*optval
,
4010 unsigned int optlen
)
4012 struct sctp_association
*asoc
;
4013 struct sctp_stream_value params
;
4014 int retval
= -EINVAL
;
4016 if (optlen
< sizeof(params
))
4019 optlen
= sizeof(params
);
4020 if (copy_from_user(¶ms
, optval
, optlen
)) {
4025 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4029 retval
= sctp_sched_set_value(asoc
, params
.stream_id
,
4030 params
.stream_value
, GFP_KERNEL
);
4036 static int sctp_setsockopt_interleaving_supported(struct sock
*sk
,
4037 char __user
*optval
,
4038 unsigned int optlen
)
4040 struct sctp_sock
*sp
= sctp_sk(sk
);
4041 struct net
*net
= sock_net(sk
);
4042 struct sctp_assoc_value params
;
4043 int retval
= -EINVAL
;
4045 if (optlen
< sizeof(params
))
4048 optlen
= sizeof(params
);
4049 if (copy_from_user(¶ms
, optval
, optlen
)) {
4054 if (params
.assoc_id
)
4057 if (!net
->sctp
.intl_enable
|| !sp
->frag_interleave
) {
4062 sp
->strm_interleave
= !!params
.assoc_value
;
4070 /* API 6.2 setsockopt(), getsockopt()
4072 * Applications use setsockopt() and getsockopt() to set or retrieve
4073 * socket options. Socket options are used to change the default
4074 * behavior of sockets calls. They are described in Section 7.
4078 * ret = getsockopt(int sd, int level, int optname, void __user *optval,
4079 * int __user *optlen);
4080 * ret = setsockopt(int sd, int level, int optname, const void __user *optval,
4083 * sd - the socket descript.
4084 * level - set to IPPROTO_SCTP for all SCTP options.
4085 * optname - the option name.
4086 * optval - the buffer to store the value of the option.
4087 * optlen - the size of the buffer.
4089 static int sctp_setsockopt(struct sock
*sk
, int level
, int optname
,
4090 char __user
*optval
, unsigned int optlen
)
4094 pr_debug("%s: sk:%p, optname:%d\n", __func__
, sk
, optname
);
4096 /* I can hardly begin to describe how wrong this is. This is
4097 * so broken as to be worse than useless. The API draft
4098 * REALLY is NOT helpful here... I am not convinced that the
4099 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP
4100 * are at all well-founded.
4102 if (level
!= SOL_SCTP
) {
4103 struct sctp_af
*af
= sctp_sk(sk
)->pf
->af
;
4104 retval
= af
->setsockopt(sk
, level
, optname
, optval
, optlen
);
4111 case SCTP_SOCKOPT_BINDX_ADD
:
4112 /* 'optlen' is the size of the addresses buffer. */
4113 retval
= sctp_setsockopt_bindx(sk
, (struct sockaddr __user
*)optval
,
4114 optlen
, SCTP_BINDX_ADD_ADDR
);
4117 case SCTP_SOCKOPT_BINDX_REM
:
4118 /* 'optlen' is the size of the addresses buffer. */
4119 retval
= sctp_setsockopt_bindx(sk
, (struct sockaddr __user
*)optval
,
4120 optlen
, SCTP_BINDX_REM_ADDR
);
4123 case SCTP_SOCKOPT_CONNECTX_OLD
:
4124 /* 'optlen' is the size of the addresses buffer. */
4125 retval
= sctp_setsockopt_connectx_old(sk
,
4126 (struct sockaddr __user
*)optval
,
4130 case SCTP_SOCKOPT_CONNECTX
:
4131 /* 'optlen' is the size of the addresses buffer. */
4132 retval
= sctp_setsockopt_connectx(sk
,
4133 (struct sockaddr __user
*)optval
,
4137 case SCTP_DISABLE_FRAGMENTS
:
4138 retval
= sctp_setsockopt_disable_fragments(sk
, optval
, optlen
);
4142 retval
= sctp_setsockopt_events(sk
, optval
, optlen
);
4145 case SCTP_AUTOCLOSE
:
4146 retval
= sctp_setsockopt_autoclose(sk
, optval
, optlen
);
4149 case SCTP_PEER_ADDR_PARAMS
:
4150 retval
= sctp_setsockopt_peer_addr_params(sk
, optval
, optlen
);
4153 case SCTP_DELAYED_SACK
:
4154 retval
= sctp_setsockopt_delayed_ack(sk
, optval
, optlen
);
4156 case SCTP_PARTIAL_DELIVERY_POINT
:
4157 retval
= sctp_setsockopt_partial_delivery_point(sk
, optval
, optlen
);
4161 retval
= sctp_setsockopt_initmsg(sk
, optval
, optlen
);
4163 case SCTP_DEFAULT_SEND_PARAM
:
4164 retval
= sctp_setsockopt_default_send_param(sk
, optval
,
4167 case SCTP_DEFAULT_SNDINFO
:
4168 retval
= sctp_setsockopt_default_sndinfo(sk
, optval
, optlen
);
4170 case SCTP_PRIMARY_ADDR
:
4171 retval
= sctp_setsockopt_primary_addr(sk
, optval
, optlen
);
4173 case SCTP_SET_PEER_PRIMARY_ADDR
:
4174 retval
= sctp_setsockopt_peer_primary_addr(sk
, optval
, optlen
);
4177 retval
= sctp_setsockopt_nodelay(sk
, optval
, optlen
);
4180 retval
= sctp_setsockopt_rtoinfo(sk
, optval
, optlen
);
4182 case SCTP_ASSOCINFO
:
4183 retval
= sctp_setsockopt_associnfo(sk
, optval
, optlen
);
4185 case SCTP_I_WANT_MAPPED_V4_ADDR
:
4186 retval
= sctp_setsockopt_mappedv4(sk
, optval
, optlen
);
4189 retval
= sctp_setsockopt_maxseg(sk
, optval
, optlen
);
4191 case SCTP_ADAPTATION_LAYER
:
4192 retval
= sctp_setsockopt_adaptation_layer(sk
, optval
, optlen
);
4195 retval
= sctp_setsockopt_context(sk
, optval
, optlen
);
4197 case SCTP_FRAGMENT_INTERLEAVE
:
4198 retval
= sctp_setsockopt_fragment_interleave(sk
, optval
, optlen
);
4200 case SCTP_MAX_BURST
:
4201 retval
= sctp_setsockopt_maxburst(sk
, optval
, optlen
);
4203 case SCTP_AUTH_CHUNK
:
4204 retval
= sctp_setsockopt_auth_chunk(sk
, optval
, optlen
);
4206 case SCTP_HMAC_IDENT
:
4207 retval
= sctp_setsockopt_hmac_ident(sk
, optval
, optlen
);
4210 retval
= sctp_setsockopt_auth_key(sk
, optval
, optlen
);
4212 case SCTP_AUTH_ACTIVE_KEY
:
4213 retval
= sctp_setsockopt_active_key(sk
, optval
, optlen
);
4215 case SCTP_AUTH_DELETE_KEY
:
4216 retval
= sctp_setsockopt_del_key(sk
, optval
, optlen
);
4218 case SCTP_AUTO_ASCONF
:
4219 retval
= sctp_setsockopt_auto_asconf(sk
, optval
, optlen
);
4221 case SCTP_PEER_ADDR_THLDS
:
4222 retval
= sctp_setsockopt_paddr_thresholds(sk
, optval
, optlen
);
4224 case SCTP_RECVRCVINFO
:
4225 retval
= sctp_setsockopt_recvrcvinfo(sk
, optval
, optlen
);
4227 case SCTP_RECVNXTINFO
:
4228 retval
= sctp_setsockopt_recvnxtinfo(sk
, optval
, optlen
);
4230 case SCTP_PR_SUPPORTED
:
4231 retval
= sctp_setsockopt_pr_supported(sk
, optval
, optlen
);
4233 case SCTP_DEFAULT_PRINFO
:
4234 retval
= sctp_setsockopt_default_prinfo(sk
, optval
, optlen
);
4236 case SCTP_RECONFIG_SUPPORTED
:
4237 retval
= sctp_setsockopt_reconfig_supported(sk
, optval
, optlen
);
4239 case SCTP_ENABLE_STREAM_RESET
:
4240 retval
= sctp_setsockopt_enable_strreset(sk
, optval
, optlen
);
4242 case SCTP_RESET_STREAMS
:
4243 retval
= sctp_setsockopt_reset_streams(sk
, optval
, optlen
);
4245 case SCTP_RESET_ASSOC
:
4246 retval
= sctp_setsockopt_reset_assoc(sk
, optval
, optlen
);
4248 case SCTP_ADD_STREAMS
:
4249 retval
= sctp_setsockopt_add_streams(sk
, optval
, optlen
);
4251 case SCTP_STREAM_SCHEDULER
:
4252 retval
= sctp_setsockopt_scheduler(sk
, optval
, optlen
);
4254 case SCTP_STREAM_SCHEDULER_VALUE
:
4255 retval
= sctp_setsockopt_scheduler_value(sk
, optval
, optlen
);
4257 case SCTP_INTERLEAVING_SUPPORTED
:
4258 retval
= sctp_setsockopt_interleaving_supported(sk
, optval
,
4262 retval
= -ENOPROTOOPT
;
4272 /* API 3.1.6 connect() - UDP Style Syntax
4274 * An application may use the connect() call in the UDP model to initiate an
4275 * association without sending data.
4279 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
4281 * sd: the socket descriptor to have a new association added to.
4283 * nam: the address structure (either struct sockaddr_in or struct
4284 * sockaddr_in6 defined in RFC2553 [7]).
4286 * len: the size of the address.
4288 static int sctp_connect(struct sock
*sk
, struct sockaddr
*addr
,
4296 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__
, sk
,
4299 /* Validate addr_len before calling common connect/connectx routine. */
4300 af
= sctp_get_af_specific(addr
->sa_family
);
4301 if (!af
|| addr_len
< af
->sockaddr_len
) {
4304 /* Pass correct addr len to common routine (so it knows there
4305 * is only one address being passed.
4307 err
= __sctp_connect(sk
, addr
, af
->sockaddr_len
, NULL
);
4314 /* FIXME: Write comments. */
4315 static int sctp_disconnect(struct sock
*sk
, int flags
)
4317 return -EOPNOTSUPP
; /* STUB */
4320 /* 4.1.4 accept() - TCP Style Syntax
4322 * Applications use accept() call to remove an established SCTP
4323 * association from the accept queue of the endpoint. A new socket
4324 * descriptor will be returned from accept() to represent the newly
4325 * formed association.
4327 static struct sock
*sctp_accept(struct sock
*sk
, int flags
, int *err
, bool kern
)
4329 struct sctp_sock
*sp
;
4330 struct sctp_endpoint
*ep
;
4331 struct sock
*newsk
= NULL
;
4332 struct sctp_association
*asoc
;
4341 if (!sctp_style(sk
, TCP
)) {
4342 error
= -EOPNOTSUPP
;
4346 if (!sctp_sstate(sk
, LISTENING
)) {
4351 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
4353 error
= sctp_wait_for_accept(sk
, timeo
);
4357 /* We treat the list of associations on the endpoint as the accept
4358 * queue and pick the first association on the list.
4360 asoc
= list_entry(ep
->asocs
.next
, struct sctp_association
, asocs
);
4362 newsk
= sp
->pf
->create_accept_sk(sk
, asoc
, kern
);
4368 /* Populate the fields of the newsk from the oldsk and migrate the
4369 * asoc to the newsk.
4371 sctp_sock_migrate(sk
, newsk
, asoc
, SCTP_SOCKET_TCP
);
4379 /* The SCTP ioctl handler. */
4380 static int sctp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
4387 * SEQPACKET-style sockets in LISTENING state are valid, for
4388 * SCTP, so only discard TCP-style sockets in LISTENING state.
4390 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))
4395 struct sk_buff
*skb
;
4396 unsigned int amount
= 0;
4398 skb
= skb_peek(&sk
->sk_receive_queue
);
4401 * We will only return the amount of this packet since
4402 * that is all that will be read.
4406 rc
= put_user(amount
, (int __user
*)arg
);
4418 /* This is the function which gets called during socket creation to
4419 * initialized the SCTP-specific portion of the sock.
4420 * The sock structure should already be zero-filled memory.
4422 static int sctp_init_sock(struct sock
*sk
)
4424 struct net
*net
= sock_net(sk
);
4425 struct sctp_sock
*sp
;
4427 pr_debug("%s: sk:%p\n", __func__
, sk
);
4431 /* Initialize the SCTP per socket area. */
4432 switch (sk
->sk_type
) {
4433 case SOCK_SEQPACKET
:
4434 sp
->type
= SCTP_SOCKET_UDP
;
4437 sp
->type
= SCTP_SOCKET_TCP
;
4440 return -ESOCKTNOSUPPORT
;
4443 sk
->sk_gso_type
= SKB_GSO_SCTP
;
4445 /* Initialize default send parameters. These parameters can be
4446 * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
4448 sp
->default_stream
= 0;
4449 sp
->default_ppid
= 0;
4450 sp
->default_flags
= 0;
4451 sp
->default_context
= 0;
4452 sp
->default_timetolive
= 0;
4454 sp
->default_rcv_context
= 0;
4455 sp
->max_burst
= net
->sctp
.max_burst
;
4457 sp
->sctp_hmac_alg
= net
->sctp
.sctp_hmac_alg
;
4459 /* Initialize default setup parameters. These parameters
4460 * can be modified with the SCTP_INITMSG socket option or
4461 * overridden by the SCTP_INIT CMSG.
4463 sp
->initmsg
.sinit_num_ostreams
= sctp_max_outstreams
;
4464 sp
->initmsg
.sinit_max_instreams
= sctp_max_instreams
;
4465 sp
->initmsg
.sinit_max_attempts
= net
->sctp
.max_retrans_init
;
4466 sp
->initmsg
.sinit_max_init_timeo
= net
->sctp
.rto_max
;
4468 /* Initialize default RTO related parameters. These parameters can
4469 * be modified for with the SCTP_RTOINFO socket option.
4471 sp
->rtoinfo
.srto_initial
= net
->sctp
.rto_initial
;
4472 sp
->rtoinfo
.srto_max
= net
->sctp
.rto_max
;
4473 sp
->rtoinfo
.srto_min
= net
->sctp
.rto_min
;
4475 /* Initialize default association related parameters. These parameters
4476 * can be modified with the SCTP_ASSOCINFO socket option.
4478 sp
->assocparams
.sasoc_asocmaxrxt
= net
->sctp
.max_retrans_association
;
4479 sp
->assocparams
.sasoc_number_peer_destinations
= 0;
4480 sp
->assocparams
.sasoc_peer_rwnd
= 0;
4481 sp
->assocparams
.sasoc_local_rwnd
= 0;
4482 sp
->assocparams
.sasoc_cookie_life
= net
->sctp
.valid_cookie_life
;
4484 /* Initialize default event subscriptions. By default, all the
4487 memset(&sp
->subscribe
, 0, sizeof(struct sctp_event_subscribe
));
4489 /* Default Peer Address Parameters. These defaults can
4490 * be modified via SCTP_PEER_ADDR_PARAMS
4492 sp
->hbinterval
= net
->sctp
.hb_interval
;
4493 sp
->pathmaxrxt
= net
->sctp
.max_retrans_path
;
4494 sp
->pathmtu
= 0; /* allow default discovery */
4495 sp
->sackdelay
= net
->sctp
.sack_timeout
;
4497 sp
->param_flags
= SPP_HB_ENABLE
|
4499 SPP_SACKDELAY_ENABLE
;
4501 /* If enabled no SCTP message fragmentation will be performed.
4502 * Configure through SCTP_DISABLE_FRAGMENTS socket option.
4504 sp
->disable_fragments
= 0;
4506 /* Enable Nagle algorithm by default. */
4509 sp
->recvrcvinfo
= 0;
4510 sp
->recvnxtinfo
= 0;
4512 /* Enable by default. */
4515 /* Auto-close idle associations after the configured
4516 * number of seconds. A value of 0 disables this
4517 * feature. Configure through the SCTP_AUTOCLOSE socket option,
4518 * for UDP-style sockets only.
4522 /* User specified fragmentation limit. */
4525 sp
->adaptation_ind
= 0;
4527 sp
->pf
= sctp_get_pf_specific(sk
->sk_family
);
4529 /* Control variables for partial data delivery. */
4530 atomic_set(&sp
->pd_mode
, 0);
4531 skb_queue_head_init(&sp
->pd_lobby
);
4532 sp
->frag_interleave
= 0;
4534 /* Create a per socket endpoint structure. Even if we
4535 * change the data structure relationships, this may still
4536 * be useful for storing pre-connect address information.
4538 sp
->ep
= sctp_endpoint_new(sk
, GFP_KERNEL
);
4544 sk
->sk_destruct
= sctp_destruct_sock
;
4546 SCTP_DBG_OBJCNT_INC(sock
);
4549 sk_sockets_allocated_inc(sk
);
4550 sock_prot_inuse_add(net
, sk
->sk_prot
, 1);
4552 /* Nothing can fail after this block, otherwise
4553 * sctp_destroy_sock() will be called without addr_wq_lock held
4555 if (net
->sctp
.default_auto_asconf
) {
4556 spin_lock(&sock_net(sk
)->sctp
.addr_wq_lock
);
4557 list_add_tail(&sp
->auto_asconf_list
,
4558 &net
->sctp
.auto_asconf_splist
);
4559 sp
->do_auto_asconf
= 1;
4560 spin_unlock(&sock_net(sk
)->sctp
.addr_wq_lock
);
4562 sp
->do_auto_asconf
= 0;
4570 /* Cleanup any SCTP per socket resources. Must be called with
4571 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
4573 static void sctp_destroy_sock(struct sock
*sk
)
4575 struct sctp_sock
*sp
;
4577 pr_debug("%s: sk:%p\n", __func__
, sk
);
4579 /* Release our hold on the endpoint. */
4581 /* This could happen during socket init, thus we bail out
4582 * early, since the rest of the below is not setup either.
4587 if (sp
->do_auto_asconf
) {
4588 sp
->do_auto_asconf
= 0;
4589 list_del(&sp
->auto_asconf_list
);
4591 sctp_endpoint_free(sp
->ep
);
4593 sk_sockets_allocated_dec(sk
);
4594 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
4598 /* Triggered when there are no references on the socket anymore */
4599 static void sctp_destruct_sock(struct sock
*sk
)
4601 struct sctp_sock
*sp
= sctp_sk(sk
);
4603 /* Free up the HMAC transform. */
4604 crypto_free_shash(sp
->hmac
);
4606 inet_sock_destruct(sk
);
4609 /* API 4.1.7 shutdown() - TCP Style Syntax
4610 * int shutdown(int socket, int how);
4612 * sd - the socket descriptor of the association to be closed.
4613 * how - Specifies the type of shutdown. The values are
4616 * Disables further receive operations. No SCTP
4617 * protocol action is taken.
4619 * Disables further send operations, and initiates
4620 * the SCTP shutdown sequence.
4622 * Disables further send and receive operations
4623 * and initiates the SCTP shutdown sequence.
4625 static void sctp_shutdown(struct sock
*sk
, int how
)
4627 struct net
*net
= sock_net(sk
);
4628 struct sctp_endpoint
*ep
;
4630 if (!sctp_style(sk
, TCP
))
4633 ep
= sctp_sk(sk
)->ep
;
4634 if (how
& SEND_SHUTDOWN
&& !list_empty(&ep
->asocs
)) {
4635 struct sctp_association
*asoc
;
4637 inet_sk_set_state(sk
, SCTP_SS_CLOSING
);
4638 asoc
= list_entry(ep
->asocs
.next
,
4639 struct sctp_association
, asocs
);
4640 sctp_primitive_SHUTDOWN(net
, asoc
, NULL
);
4644 int sctp_get_sctp_info(struct sock
*sk
, struct sctp_association
*asoc
,
4645 struct sctp_info
*info
)
4647 struct sctp_transport
*prim
;
4648 struct list_head
*pos
;
4651 memset(info
, 0, sizeof(*info
));
4653 struct sctp_sock
*sp
= sctp_sk(sk
);
4655 info
->sctpi_s_autoclose
= sp
->autoclose
;
4656 info
->sctpi_s_adaptation_ind
= sp
->adaptation_ind
;
4657 info
->sctpi_s_pd_point
= sp
->pd_point
;
4658 info
->sctpi_s_nodelay
= sp
->nodelay
;
4659 info
->sctpi_s_disable_fragments
= sp
->disable_fragments
;
4660 info
->sctpi_s_v4mapped
= sp
->v4mapped
;
4661 info
->sctpi_s_frag_interleave
= sp
->frag_interleave
;
4662 info
->sctpi_s_type
= sp
->type
;
4667 info
->sctpi_tag
= asoc
->c
.my_vtag
;
4668 info
->sctpi_state
= asoc
->state
;
4669 info
->sctpi_rwnd
= asoc
->a_rwnd
;
4670 info
->sctpi_unackdata
= asoc
->unack_data
;
4671 info
->sctpi_penddata
= sctp_tsnmap_pending(&asoc
->peer
.tsn_map
);
4672 info
->sctpi_instrms
= asoc
->stream
.incnt
;
4673 info
->sctpi_outstrms
= asoc
->stream
.outcnt
;
4674 list_for_each(pos
, &asoc
->base
.inqueue
.in_chunk_list
)
4675 info
->sctpi_inqueue
++;
4676 list_for_each(pos
, &asoc
->outqueue
.out_chunk_list
)
4677 info
->sctpi_outqueue
++;
4678 info
->sctpi_overall_error
= asoc
->overall_error_count
;
4679 info
->sctpi_max_burst
= asoc
->max_burst
;
4680 info
->sctpi_maxseg
= asoc
->frag_point
;
4681 info
->sctpi_peer_rwnd
= asoc
->peer
.rwnd
;
4682 info
->sctpi_peer_tag
= asoc
->c
.peer_vtag
;
4684 mask
= asoc
->peer
.ecn_capable
<< 1;
4685 mask
= (mask
| asoc
->peer
.ipv4_address
) << 1;
4686 mask
= (mask
| asoc
->peer
.ipv6_address
) << 1;
4687 mask
= (mask
| asoc
->peer
.hostname_address
) << 1;
4688 mask
= (mask
| asoc
->peer
.asconf_capable
) << 1;
4689 mask
= (mask
| asoc
->peer
.prsctp_capable
) << 1;
4690 mask
= (mask
| asoc
->peer
.auth_capable
);
4691 info
->sctpi_peer_capable
= mask
;
4692 mask
= asoc
->peer
.sack_needed
<< 1;
4693 mask
= (mask
| asoc
->peer
.sack_generation
) << 1;
4694 mask
= (mask
| asoc
->peer
.zero_window_announced
);
4695 info
->sctpi_peer_sack
= mask
;
4697 info
->sctpi_isacks
= asoc
->stats
.isacks
;
4698 info
->sctpi_osacks
= asoc
->stats
.osacks
;
4699 info
->sctpi_opackets
= asoc
->stats
.opackets
;
4700 info
->sctpi_ipackets
= asoc
->stats
.ipackets
;
4701 info
->sctpi_rtxchunks
= asoc
->stats
.rtxchunks
;
4702 info
->sctpi_outofseqtsns
= asoc
->stats
.outofseqtsns
;
4703 info
->sctpi_idupchunks
= asoc
->stats
.idupchunks
;
4704 info
->sctpi_gapcnt
= asoc
->stats
.gapcnt
;
4705 info
->sctpi_ouodchunks
= asoc
->stats
.ouodchunks
;
4706 info
->sctpi_iuodchunks
= asoc
->stats
.iuodchunks
;
4707 info
->sctpi_oodchunks
= asoc
->stats
.oodchunks
;
4708 info
->sctpi_iodchunks
= asoc
->stats
.iodchunks
;
4709 info
->sctpi_octrlchunks
= asoc
->stats
.octrlchunks
;
4710 info
->sctpi_ictrlchunks
= asoc
->stats
.ictrlchunks
;
4712 prim
= asoc
->peer
.primary_path
;
4713 memcpy(&info
->sctpi_p_address
, &prim
->ipaddr
, sizeof(prim
->ipaddr
));
4714 info
->sctpi_p_state
= prim
->state
;
4715 info
->sctpi_p_cwnd
= prim
->cwnd
;
4716 info
->sctpi_p_srtt
= prim
->srtt
;
4717 info
->sctpi_p_rto
= jiffies_to_msecs(prim
->rto
);
4718 info
->sctpi_p_hbinterval
= prim
->hbinterval
;
4719 info
->sctpi_p_pathmaxrxt
= prim
->pathmaxrxt
;
4720 info
->sctpi_p_sackdelay
= jiffies_to_msecs(prim
->sackdelay
);
4721 info
->sctpi_p_ssthresh
= prim
->ssthresh
;
4722 info
->sctpi_p_partial_bytes_acked
= prim
->partial_bytes_acked
;
4723 info
->sctpi_p_flight_size
= prim
->flight_size
;
4724 info
->sctpi_p_error
= prim
->error_count
;
4728 EXPORT_SYMBOL_GPL(sctp_get_sctp_info
);
4730 /* use callback to avoid exporting the core structure */
4731 void sctp_transport_walk_start(struct rhashtable_iter
*iter
)
4733 rhltable_walk_enter(&sctp_transport_hashtable
, iter
);
4735 rhashtable_walk_start(iter
);
4738 void sctp_transport_walk_stop(struct rhashtable_iter
*iter
)
4740 rhashtable_walk_stop(iter
);
4741 rhashtable_walk_exit(iter
);
4744 struct sctp_transport
*sctp_transport_get_next(struct net
*net
,
4745 struct rhashtable_iter
*iter
)
4747 struct sctp_transport
*t
;
4749 t
= rhashtable_walk_next(iter
);
4750 for (; t
; t
= rhashtable_walk_next(iter
)) {
4752 if (PTR_ERR(t
) == -EAGAIN
)
4757 if (net_eq(sock_net(t
->asoc
->base
.sk
), net
) &&
4758 t
->asoc
->peer
.primary_path
== t
)
4765 struct sctp_transport
*sctp_transport_get_idx(struct net
*net
,
4766 struct rhashtable_iter
*iter
,
4769 void *obj
= SEQ_START_TOKEN
;
4771 while (pos
&& (obj
= sctp_transport_get_next(net
, iter
)) &&
4778 int sctp_for_each_endpoint(int (*cb
)(struct sctp_endpoint
*, void *),
4782 struct sctp_ep_common
*epb
;
4783 struct sctp_hashbucket
*head
;
4785 for (head
= sctp_ep_hashtable
; hash
< sctp_ep_hashsize
;
4787 read_lock_bh(&head
->lock
);
4788 sctp_for_each_hentry(epb
, &head
->chain
) {
4789 err
= cb(sctp_ep(epb
), p
);
4793 read_unlock_bh(&head
->lock
);
4798 EXPORT_SYMBOL_GPL(sctp_for_each_endpoint
);
4800 int sctp_transport_lookup_process(int (*cb
)(struct sctp_transport
*, void *),
4802 const union sctp_addr
*laddr
,
4803 const union sctp_addr
*paddr
, void *p
)
4805 struct sctp_transport
*transport
;
4809 transport
= sctp_addrs_lookup_transport(net
, laddr
, paddr
);
4814 err
= cb(transport
, p
);
4815 sctp_transport_put(transport
);
4819 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process
);
4821 int sctp_for_each_transport(int (*cb
)(struct sctp_transport
*, void *),
4822 int (*cb_done
)(struct sctp_transport
*, void *),
4823 struct net
*net
, int *pos
, void *p
) {
4824 struct rhashtable_iter hti
;
4825 struct sctp_transport
*tsp
;
4830 sctp_transport_walk_start(&hti
);
4832 tsp
= sctp_transport_get_idx(net
, &hti
, *pos
+ 1);
4833 for (; !IS_ERR_OR_NULL(tsp
); tsp
= sctp_transport_get_next(net
, &hti
)) {
4834 if (!sctp_transport_hold(tsp
))
4840 sctp_transport_put(tsp
);
4842 sctp_transport_walk_stop(&hti
);
4845 if (cb_done
&& !cb_done(tsp
, p
)) {
4847 sctp_transport_put(tsp
);
4850 sctp_transport_put(tsp
);
4855 EXPORT_SYMBOL_GPL(sctp_for_each_transport
);
4857 /* 7.2.1 Association Status (SCTP_STATUS)
4859 * Applications can retrieve current status information about an
4860 * association, including association state, peer receiver window size,
4861 * number of unacked data chunks, and number of data chunks pending
4862 * receipt. This information is read-only.
4864 static int sctp_getsockopt_sctp_status(struct sock
*sk
, int len
,
4865 char __user
*optval
,
4868 struct sctp_status status
;
4869 struct sctp_association
*asoc
= NULL
;
4870 struct sctp_transport
*transport
;
4871 sctp_assoc_t associd
;
4874 if (len
< sizeof(status
)) {
4879 len
= sizeof(status
);
4880 if (copy_from_user(&status
, optval
, len
)) {
4885 associd
= status
.sstat_assoc_id
;
4886 asoc
= sctp_id2assoc(sk
, associd
);
4892 transport
= asoc
->peer
.primary_path
;
4894 status
.sstat_assoc_id
= sctp_assoc2id(asoc
);
4895 status
.sstat_state
= sctp_assoc_to_state(asoc
);
4896 status
.sstat_rwnd
= asoc
->peer
.rwnd
;
4897 status
.sstat_unackdata
= asoc
->unack_data
;
4899 status
.sstat_penddata
= sctp_tsnmap_pending(&asoc
->peer
.tsn_map
);
4900 status
.sstat_instrms
= asoc
->stream
.incnt
;
4901 status
.sstat_outstrms
= asoc
->stream
.outcnt
;
4902 status
.sstat_fragmentation_point
= asoc
->frag_point
;
4903 status
.sstat_primary
.spinfo_assoc_id
= sctp_assoc2id(transport
->asoc
);
4904 memcpy(&status
.sstat_primary
.spinfo_address
, &transport
->ipaddr
,
4905 transport
->af_specific
->sockaddr_len
);
4906 /* Map ipv4 address into v4-mapped-on-v6 address. */
4907 sctp_get_pf_specific(sk
->sk_family
)->addr_to_user(sctp_sk(sk
),
4908 (union sctp_addr
*)&status
.sstat_primary
.spinfo_address
);
4909 status
.sstat_primary
.spinfo_state
= transport
->state
;
4910 status
.sstat_primary
.spinfo_cwnd
= transport
->cwnd
;
4911 status
.sstat_primary
.spinfo_srtt
= transport
->srtt
;
4912 status
.sstat_primary
.spinfo_rto
= jiffies_to_msecs(transport
->rto
);
4913 status
.sstat_primary
.spinfo_mtu
= transport
->pathmtu
;
4915 if (status
.sstat_primary
.spinfo_state
== SCTP_UNKNOWN
)
4916 status
.sstat_primary
.spinfo_state
= SCTP_ACTIVE
;
4918 if (put_user(len
, optlen
)) {
4923 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
4924 __func__
, len
, status
.sstat_state
, status
.sstat_rwnd
,
4925 status
.sstat_assoc_id
);
4927 if (copy_to_user(optval
, &status
, len
)) {
4937 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
4939 * Applications can retrieve information about a specific peer address
4940 * of an association, including its reachability state, congestion
4941 * window, and retransmission timer values. This information is
4944 static int sctp_getsockopt_peer_addr_info(struct sock
*sk
, int len
,
4945 char __user
*optval
,
4948 struct sctp_paddrinfo pinfo
;
4949 struct sctp_transport
*transport
;
4952 if (len
< sizeof(pinfo
)) {
4957 len
= sizeof(pinfo
);
4958 if (copy_from_user(&pinfo
, optval
, len
)) {
4963 transport
= sctp_addr_id2transport(sk
, &pinfo
.spinfo_address
,
4964 pinfo
.spinfo_assoc_id
);
4968 pinfo
.spinfo_assoc_id
= sctp_assoc2id(transport
->asoc
);
4969 pinfo
.spinfo_state
= transport
->state
;
4970 pinfo
.spinfo_cwnd
= transport
->cwnd
;
4971 pinfo
.spinfo_srtt
= transport
->srtt
;
4972 pinfo
.spinfo_rto
= jiffies_to_msecs(transport
->rto
);
4973 pinfo
.spinfo_mtu
= transport
->pathmtu
;
4975 if (pinfo
.spinfo_state
== SCTP_UNKNOWN
)
4976 pinfo
.spinfo_state
= SCTP_ACTIVE
;
4978 if (put_user(len
, optlen
)) {
4983 if (copy_to_user(optval
, &pinfo
, len
)) {
4992 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
4994 * This option is a on/off flag. If enabled no SCTP message
4995 * fragmentation will be performed. Instead if a message being sent
4996 * exceeds the current PMTU size, the message will NOT be sent and
4997 * instead a error will be indicated to the user.
4999 static int sctp_getsockopt_disable_fragments(struct sock
*sk
, int len
,
5000 char __user
*optval
, int __user
*optlen
)
5004 if (len
< sizeof(int))
5008 val
= (sctp_sk(sk
)->disable_fragments
== 1);
5009 if (put_user(len
, optlen
))
5011 if (copy_to_user(optval
, &val
, len
))
5016 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
5018 * This socket option is used to specify various notifications and
5019 * ancillary data the user wishes to receive.
5021 static int sctp_getsockopt_events(struct sock
*sk
, int len
, char __user
*optval
,
5026 if (len
> sizeof(struct sctp_event_subscribe
))
5027 len
= sizeof(struct sctp_event_subscribe
);
5028 if (put_user(len
, optlen
))
5030 if (copy_to_user(optval
, &sctp_sk(sk
)->subscribe
, len
))
5035 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
5037 * This socket option is applicable to the UDP-style socket only. When
5038 * set it will cause associations that are idle for more than the
5039 * specified number of seconds to automatically close. An association
5040 * being idle is defined an association that has NOT sent or received
5041 * user data. The special value of '0' indicates that no automatic
5042 * close of any associations should be performed. The option expects an
5043 * integer defining the number of seconds of idle time before an
5044 * association is closed.
5046 static int sctp_getsockopt_autoclose(struct sock
*sk
, int len
, char __user
*optval
, int __user
*optlen
)
5048 /* Applicable to UDP-style socket only */
5049 if (sctp_style(sk
, TCP
))
5051 if (len
< sizeof(int))
5054 if (put_user(len
, optlen
))
5056 if (put_user(sctp_sk(sk
)->autoclose
, (int __user
*)optval
))
5061 /* Helper routine to branch off an association to a new socket. */
5062 int sctp_do_peeloff(struct sock
*sk
, sctp_assoc_t id
, struct socket
**sockp
)
5064 struct sctp_association
*asoc
= sctp_id2assoc(sk
, id
);
5065 struct sctp_sock
*sp
= sctp_sk(sk
);
5066 struct socket
*sock
;
5069 /* Do not peel off from one netns to another one. */
5070 if (!net_eq(current
->nsproxy
->net_ns
, sock_net(sk
)))
5076 /* An association cannot be branched off from an already peeled-off
5077 * socket, nor is this supported for tcp style sockets.
5079 if (!sctp_style(sk
, UDP
))
5082 /* Create a new socket. */
5083 err
= sock_create(sk
->sk_family
, SOCK_SEQPACKET
, IPPROTO_SCTP
, &sock
);
5087 sctp_copy_sock(sock
->sk
, sk
, asoc
);
5089 /* Make peeled-off sockets more like 1-1 accepted sockets.
5090 * Set the daddr and initialize id to something more random
5092 sp
->pf
->to_sk_daddr(&asoc
->peer
.primary_addr
, sk
);
5094 /* Populate the fields of the newsk from the oldsk and migrate the
5095 * asoc to the newsk.
5097 sctp_sock_migrate(sk
, sock
->sk
, asoc
, SCTP_SOCKET_UDP_HIGH_BANDWIDTH
);
5103 EXPORT_SYMBOL(sctp_do_peeloff
);
5105 static int sctp_getsockopt_peeloff_common(struct sock
*sk
, sctp_peeloff_arg_t
*peeloff
,
5106 struct file
**newfile
, unsigned flags
)
5108 struct socket
*newsock
;
5111 retval
= sctp_do_peeloff(sk
, peeloff
->associd
, &newsock
);
5115 /* Map the socket to an unused fd that can be returned to the user. */
5116 retval
= get_unused_fd_flags(flags
& SOCK_CLOEXEC
);
5118 sock_release(newsock
);
5122 *newfile
= sock_alloc_file(newsock
, 0, NULL
);
5123 if (IS_ERR(*newfile
)) {
5124 put_unused_fd(retval
);
5125 retval
= PTR_ERR(*newfile
);
5130 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__
, sk
, newsock
->sk
,
5133 peeloff
->sd
= retval
;
5135 if (flags
& SOCK_NONBLOCK
)
5136 (*newfile
)->f_flags
|= O_NONBLOCK
;
5141 static int sctp_getsockopt_peeloff(struct sock
*sk
, int len
, char __user
*optval
, int __user
*optlen
)
5143 sctp_peeloff_arg_t peeloff
;
5144 struct file
*newfile
= NULL
;
5147 if (len
< sizeof(sctp_peeloff_arg_t
))
5149 len
= sizeof(sctp_peeloff_arg_t
);
5150 if (copy_from_user(&peeloff
, optval
, len
))
5153 retval
= sctp_getsockopt_peeloff_common(sk
, &peeloff
, &newfile
, 0);
5157 /* Return the fd mapped to the new socket. */
5158 if (put_user(len
, optlen
)) {
5160 put_unused_fd(retval
);
5164 if (copy_to_user(optval
, &peeloff
, len
)) {
5166 put_unused_fd(retval
);
5169 fd_install(retval
, newfile
);
5174 static int sctp_getsockopt_peeloff_flags(struct sock
*sk
, int len
,
5175 char __user
*optval
, int __user
*optlen
)
5177 sctp_peeloff_flags_arg_t peeloff
;
5178 struct file
*newfile
= NULL
;
5181 if (len
< sizeof(sctp_peeloff_flags_arg_t
))
5183 len
= sizeof(sctp_peeloff_flags_arg_t
);
5184 if (copy_from_user(&peeloff
, optval
, len
))
5187 retval
= sctp_getsockopt_peeloff_common(sk
, &peeloff
.p_arg
,
5188 &newfile
, peeloff
.flags
);
5192 /* Return the fd mapped to the new socket. */
5193 if (put_user(len
, optlen
)) {
5195 put_unused_fd(retval
);
5199 if (copy_to_user(optval
, &peeloff
, len
)) {
5201 put_unused_fd(retval
);
5204 fd_install(retval
, newfile
);
5209 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
5211 * Applications can enable or disable heartbeats for any peer address of
5212 * an association, modify an address's heartbeat interval, force a
5213 * heartbeat to be sent immediately, and adjust the address's maximum
5214 * number of retransmissions sent before an address is considered
5215 * unreachable. The following structure is used to access and modify an
5216 * address's parameters:
5218 * struct sctp_paddrparams {
5219 * sctp_assoc_t spp_assoc_id;
5220 * struct sockaddr_storage spp_address;
5221 * uint32_t spp_hbinterval;
5222 * uint16_t spp_pathmaxrxt;
5223 * uint32_t spp_pathmtu;
5224 * uint32_t spp_sackdelay;
5225 * uint32_t spp_flags;
5228 * spp_assoc_id - (one-to-many style socket) This is filled in the
5229 * application, and identifies the association for
5231 * spp_address - This specifies which address is of interest.
5232 * spp_hbinterval - This contains the value of the heartbeat interval,
5233 * in milliseconds. If a value of zero
5234 * is present in this field then no changes are to
5235 * be made to this parameter.
5236 * spp_pathmaxrxt - This contains the maximum number of
5237 * retransmissions before this address shall be
5238 * considered unreachable. If a value of zero
5239 * is present in this field then no changes are to
5240 * be made to this parameter.
5241 * spp_pathmtu - When Path MTU discovery is disabled the value
5242 * specified here will be the "fixed" path mtu.
5243 * Note that if the spp_address field is empty
5244 * then all associations on this address will
5245 * have this fixed path mtu set upon them.
5247 * spp_sackdelay - When delayed sack is enabled, this value specifies
5248 * the number of milliseconds that sacks will be delayed
5249 * for. This value will apply to all addresses of an
5250 * association if the spp_address field is empty. Note
5251 * also, that if delayed sack is enabled and this
5252 * value is set to 0, no change is made to the last
5253 * recorded delayed sack timer value.
5255 * spp_flags - These flags are used to control various features
5256 * on an association. The flag field may contain
5257 * zero or more of the following options.
5259 * SPP_HB_ENABLE - Enable heartbeats on the
5260 * specified address. Note that if the address
5261 * field is empty all addresses for the association
5262 * have heartbeats enabled upon them.
5264 * SPP_HB_DISABLE - Disable heartbeats on the
5265 * speicifed address. Note that if the address
5266 * field is empty all addresses for the association
5267 * will have their heartbeats disabled. Note also
5268 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
5269 * mutually exclusive, only one of these two should
5270 * be specified. Enabling both fields will have
5271 * undetermined results.
5273 * SPP_HB_DEMAND - Request a user initiated heartbeat
5274 * to be made immediately.
5276 * SPP_PMTUD_ENABLE - This field will enable PMTU
5277 * discovery upon the specified address. Note that
5278 * if the address feild is empty then all addresses
5279 * on the association are effected.
5281 * SPP_PMTUD_DISABLE - This field will disable PMTU
5282 * discovery upon the specified address. Note that
5283 * if the address feild is empty then all addresses
5284 * on the association are effected. Not also that
5285 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
5286 * exclusive. Enabling both will have undetermined
5289 * SPP_SACKDELAY_ENABLE - Setting this flag turns
5290 * on delayed sack. The time specified in spp_sackdelay
5291 * is used to specify the sack delay for this address. Note
5292 * that if spp_address is empty then all addresses will
5293 * enable delayed sack and take on the sack delay
5294 * value specified in spp_sackdelay.
5295 * SPP_SACKDELAY_DISABLE - Setting this flag turns
5296 * off delayed sack. If the spp_address field is blank then
5297 * delayed sack is disabled for the entire association. Note
5298 * also that this field is mutually exclusive to
5299 * SPP_SACKDELAY_ENABLE, setting both will have undefined
5302 static int sctp_getsockopt_peer_addr_params(struct sock
*sk
, int len
,
5303 char __user
*optval
, int __user
*optlen
)
5305 struct sctp_paddrparams params
;
5306 struct sctp_transport
*trans
= NULL
;
5307 struct sctp_association
*asoc
= NULL
;
5308 struct sctp_sock
*sp
= sctp_sk(sk
);
5310 if (len
< sizeof(struct sctp_paddrparams
))
5312 len
= sizeof(struct sctp_paddrparams
);
5313 if (copy_from_user(¶ms
, optval
, len
))
5316 /* If an address other than INADDR_ANY is specified, and
5317 * no transport is found, then the request is invalid.
5319 if (!sctp_is_any(sk
, (union sctp_addr
*)¶ms
.spp_address
)) {
5320 trans
= sctp_addr_id2transport(sk
, ¶ms
.spp_address
,
5321 params
.spp_assoc_id
);
5323 pr_debug("%s: failed no transport\n", __func__
);
5328 /* Get association, if assoc_id != 0 and the socket is a one
5329 * to many style socket, and an association was not found, then
5330 * the id was invalid.
5332 asoc
= sctp_id2assoc(sk
, params
.spp_assoc_id
);
5333 if (!asoc
&& params
.spp_assoc_id
&& sctp_style(sk
, UDP
)) {
5334 pr_debug("%s: failed no association\n", __func__
);
5339 /* Fetch transport values. */
5340 params
.spp_hbinterval
= jiffies_to_msecs(trans
->hbinterval
);
5341 params
.spp_pathmtu
= trans
->pathmtu
;
5342 params
.spp_pathmaxrxt
= trans
->pathmaxrxt
;
5343 params
.spp_sackdelay
= jiffies_to_msecs(trans
->sackdelay
);
5345 /*draft-11 doesn't say what to return in spp_flags*/
5346 params
.spp_flags
= trans
->param_flags
;
5348 /* Fetch association values. */
5349 params
.spp_hbinterval
= jiffies_to_msecs(asoc
->hbinterval
);
5350 params
.spp_pathmtu
= asoc
->pathmtu
;
5351 params
.spp_pathmaxrxt
= asoc
->pathmaxrxt
;
5352 params
.spp_sackdelay
= jiffies_to_msecs(asoc
->sackdelay
);
5354 /*draft-11 doesn't say what to return in spp_flags*/
5355 params
.spp_flags
= asoc
->param_flags
;
5357 /* Fetch socket values. */
5358 params
.spp_hbinterval
= sp
->hbinterval
;
5359 params
.spp_pathmtu
= sp
->pathmtu
;
5360 params
.spp_sackdelay
= sp
->sackdelay
;
5361 params
.spp_pathmaxrxt
= sp
->pathmaxrxt
;
5363 /*draft-11 doesn't say what to return in spp_flags*/
5364 params
.spp_flags
= sp
->param_flags
;
5367 if (copy_to_user(optval
, ¶ms
, len
))
5370 if (put_user(len
, optlen
))
5377 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
5379 * This option will effect the way delayed acks are performed. This
5380 * option allows you to get or set the delayed ack time, in
5381 * milliseconds. It also allows changing the delayed ack frequency.
5382 * Changing the frequency to 1 disables the delayed sack algorithm. If
5383 * the assoc_id is 0, then this sets or gets the endpoints default
5384 * values. If the assoc_id field is non-zero, then the set or get
5385 * effects the specified association for the one to many model (the
5386 * assoc_id field is ignored by the one to one model). Note that if
5387 * sack_delay or sack_freq are 0 when setting this option, then the
5388 * current values will remain unchanged.
5390 * struct sctp_sack_info {
5391 * sctp_assoc_t sack_assoc_id;
5392 * uint32_t sack_delay;
5393 * uint32_t sack_freq;
5396 * sack_assoc_id - This parameter, indicates which association the user
5397 * is performing an action upon. Note that if this field's value is
5398 * zero then the endpoints default value is changed (effecting future
5399 * associations only).
5401 * sack_delay - This parameter contains the number of milliseconds that
5402 * the user is requesting the delayed ACK timer be set to. Note that
5403 * this value is defined in the standard to be between 200 and 500
5406 * sack_freq - This parameter contains the number of packets that must
5407 * be received before a sack is sent without waiting for the delay
5408 * timer to expire. The default value for this is 2, setting this
5409 * value to 1 will disable the delayed sack algorithm.
5411 static int sctp_getsockopt_delayed_ack(struct sock
*sk
, int len
,
5412 char __user
*optval
,
5415 struct sctp_sack_info params
;
5416 struct sctp_association
*asoc
= NULL
;
5417 struct sctp_sock
*sp
= sctp_sk(sk
);
5419 if (len
>= sizeof(struct sctp_sack_info
)) {
5420 len
= sizeof(struct sctp_sack_info
);
5422 if (copy_from_user(¶ms
, optval
, len
))
5424 } else if (len
== sizeof(struct sctp_assoc_value
)) {
5425 pr_warn_ratelimited(DEPRECATED
5427 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
5428 "Use struct sctp_sack_info instead\n",
5429 current
->comm
, task_pid_nr(current
));
5430 if (copy_from_user(¶ms
, optval
, len
))
5435 /* Get association, if sack_assoc_id != 0 and the socket is a one
5436 * to many style socket, and an association was not found, then
5437 * the id was invalid.
5439 asoc
= sctp_id2assoc(sk
, params
.sack_assoc_id
);
5440 if (!asoc
&& params
.sack_assoc_id
&& sctp_style(sk
, UDP
))
5444 /* Fetch association values. */
5445 if (asoc
->param_flags
& SPP_SACKDELAY_ENABLE
) {
5446 params
.sack_delay
= jiffies_to_msecs(
5448 params
.sack_freq
= asoc
->sackfreq
;
5451 params
.sack_delay
= 0;
5452 params
.sack_freq
= 1;
5455 /* Fetch socket values. */
5456 if (sp
->param_flags
& SPP_SACKDELAY_ENABLE
) {
5457 params
.sack_delay
= sp
->sackdelay
;
5458 params
.sack_freq
= sp
->sackfreq
;
5460 params
.sack_delay
= 0;
5461 params
.sack_freq
= 1;
5465 if (copy_to_user(optval
, ¶ms
, len
))
5468 if (put_user(len
, optlen
))
5474 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
5476 * Applications can specify protocol parameters for the default association
5477 * initialization. The option name argument to setsockopt() and getsockopt()
5480 * Setting initialization parameters is effective only on an unconnected
5481 * socket (for UDP-style sockets only future associations are effected
5482 * by the change). With TCP-style sockets, this option is inherited by
5483 * sockets derived from a listener socket.
5485 static int sctp_getsockopt_initmsg(struct sock
*sk
, int len
, char __user
*optval
, int __user
*optlen
)
5487 if (len
< sizeof(struct sctp_initmsg
))
5489 len
= sizeof(struct sctp_initmsg
);
5490 if (put_user(len
, optlen
))
5492 if (copy_to_user(optval
, &sctp_sk(sk
)->initmsg
, len
))
5498 static int sctp_getsockopt_peer_addrs(struct sock
*sk
, int len
,
5499 char __user
*optval
, int __user
*optlen
)
5501 struct sctp_association
*asoc
;
5503 struct sctp_getaddrs getaddrs
;
5504 struct sctp_transport
*from
;
5506 union sctp_addr temp
;
5507 struct sctp_sock
*sp
= sctp_sk(sk
);
5512 if (len
< sizeof(struct sctp_getaddrs
))
5515 if (copy_from_user(&getaddrs
, optval
, sizeof(struct sctp_getaddrs
)))
5518 /* For UDP-style sockets, id specifies the association to query. */
5519 asoc
= sctp_id2assoc(sk
, getaddrs
.assoc_id
);
5523 to
= optval
+ offsetof(struct sctp_getaddrs
, addrs
);
5524 space_left
= len
- offsetof(struct sctp_getaddrs
, addrs
);
5526 list_for_each_entry(from
, &asoc
->peer
.transport_addr_list
,
5528 memcpy(&temp
, &from
->ipaddr
, sizeof(temp
));
5529 addrlen
= sctp_get_pf_specific(sk
->sk_family
)
5530 ->addr_to_user(sp
, &temp
);
5531 if (space_left
< addrlen
)
5533 if (copy_to_user(to
, &temp
, addrlen
))
5537 space_left
-= addrlen
;
5540 if (put_user(cnt
, &((struct sctp_getaddrs __user
*)optval
)->addr_num
))
5542 bytes_copied
= ((char __user
*)to
) - optval
;
5543 if (put_user(bytes_copied
, optlen
))
5549 static int sctp_copy_laddrs(struct sock
*sk
, __u16 port
, void *to
,
5550 size_t space_left
, int *bytes_copied
)
5552 struct sctp_sockaddr_entry
*addr
;
5553 union sctp_addr temp
;
5556 struct net
*net
= sock_net(sk
);
5559 list_for_each_entry_rcu(addr
, &net
->sctp
.local_addr_list
, list
) {
5563 if ((PF_INET
== sk
->sk_family
) &&
5564 (AF_INET6
== addr
->a
.sa
.sa_family
))
5566 if ((PF_INET6
== sk
->sk_family
) &&
5567 inet_v6_ipv6only(sk
) &&
5568 (AF_INET
== addr
->a
.sa
.sa_family
))
5570 memcpy(&temp
, &addr
->a
, sizeof(temp
));
5571 if (!temp
.v4
.sin_port
)
5572 temp
.v4
.sin_port
= htons(port
);
5574 addrlen
= sctp_get_pf_specific(sk
->sk_family
)
5575 ->addr_to_user(sctp_sk(sk
), &temp
);
5577 if (space_left
< addrlen
) {
5581 memcpy(to
, &temp
, addrlen
);
5585 space_left
-= addrlen
;
5586 *bytes_copied
+= addrlen
;
5594 static int sctp_getsockopt_local_addrs(struct sock
*sk
, int len
,
5595 char __user
*optval
, int __user
*optlen
)
5597 struct sctp_bind_addr
*bp
;
5598 struct sctp_association
*asoc
;
5600 struct sctp_getaddrs getaddrs
;
5601 struct sctp_sockaddr_entry
*addr
;
5603 union sctp_addr temp
;
5604 struct sctp_sock
*sp
= sctp_sk(sk
);
5608 int bytes_copied
= 0;
5612 if (len
< sizeof(struct sctp_getaddrs
))
5615 if (copy_from_user(&getaddrs
, optval
, sizeof(struct sctp_getaddrs
)))
5619 * For UDP-style sockets, id specifies the association to query.
5620 * If the id field is set to the value '0' then the locally bound
5621 * addresses are returned without regard to any particular
5624 if (0 == getaddrs
.assoc_id
) {
5625 bp
= &sctp_sk(sk
)->ep
->base
.bind_addr
;
5627 asoc
= sctp_id2assoc(sk
, getaddrs
.assoc_id
);
5630 bp
= &asoc
->base
.bind_addr
;
5633 to
= optval
+ offsetof(struct sctp_getaddrs
, addrs
);
5634 space_left
= len
- offsetof(struct sctp_getaddrs
, addrs
);
5636 addrs
= kmalloc(space_left
, GFP_USER
| __GFP_NOWARN
);
5640 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
5641 * addresses from the global local address list.
5643 if (sctp_list_single_entry(&bp
->address_list
)) {
5644 addr
= list_entry(bp
->address_list
.next
,
5645 struct sctp_sockaddr_entry
, list
);
5646 if (sctp_is_any(sk
, &addr
->a
)) {
5647 cnt
= sctp_copy_laddrs(sk
, bp
->port
, addrs
,
5648 space_left
, &bytes_copied
);
5658 /* Protection on the bound address list is not needed since
5659 * in the socket option context we hold a socket lock and
5660 * thus the bound address list can't change.
5662 list_for_each_entry(addr
, &bp
->address_list
, list
) {
5663 memcpy(&temp
, &addr
->a
, sizeof(temp
));
5664 addrlen
= sctp_get_pf_specific(sk
->sk_family
)
5665 ->addr_to_user(sp
, &temp
);
5666 if (space_left
< addrlen
) {
5667 err
= -ENOMEM
; /*fixme: right error?*/
5670 memcpy(buf
, &temp
, addrlen
);
5672 bytes_copied
+= addrlen
;
5674 space_left
-= addrlen
;
5678 if (copy_to_user(to
, addrs
, bytes_copied
)) {
5682 if (put_user(cnt
, &((struct sctp_getaddrs __user
*)optval
)->addr_num
)) {
5686 /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
5687 * but we can't change it anymore.
5689 if (put_user(bytes_copied
, optlen
))
5696 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
5698 * Requests that the local SCTP stack use the enclosed peer address as
5699 * the association primary. The enclosed address must be one of the
5700 * association peer's addresses.
5702 static int sctp_getsockopt_primary_addr(struct sock
*sk
, int len
,
5703 char __user
*optval
, int __user
*optlen
)
5705 struct sctp_prim prim
;
5706 struct sctp_association
*asoc
;
5707 struct sctp_sock
*sp
= sctp_sk(sk
);
5709 if (len
< sizeof(struct sctp_prim
))
5712 len
= sizeof(struct sctp_prim
);
5714 if (copy_from_user(&prim
, optval
, len
))
5717 asoc
= sctp_id2assoc(sk
, prim
.ssp_assoc_id
);
5721 if (!asoc
->peer
.primary_path
)
5724 memcpy(&prim
.ssp_addr
, &asoc
->peer
.primary_path
->ipaddr
,
5725 asoc
->peer
.primary_path
->af_specific
->sockaddr_len
);
5727 sctp_get_pf_specific(sk
->sk_family
)->addr_to_user(sp
,
5728 (union sctp_addr
*)&prim
.ssp_addr
);
5730 if (put_user(len
, optlen
))
5732 if (copy_to_user(optval
, &prim
, len
))
5739 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
5741 * Requests that the local endpoint set the specified Adaptation Layer
5742 * Indication parameter for all future INIT and INIT-ACK exchanges.
5744 static int sctp_getsockopt_adaptation_layer(struct sock
*sk
, int len
,
5745 char __user
*optval
, int __user
*optlen
)
5747 struct sctp_setadaptation adaptation
;
5749 if (len
< sizeof(struct sctp_setadaptation
))
5752 len
= sizeof(struct sctp_setadaptation
);
5754 adaptation
.ssb_adaptation_ind
= sctp_sk(sk
)->adaptation_ind
;
5756 if (put_user(len
, optlen
))
5758 if (copy_to_user(optval
, &adaptation
, len
))
5766 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
5768 * Applications that wish to use the sendto() system call may wish to
5769 * specify a default set of parameters that would normally be supplied
5770 * through the inclusion of ancillary data. This socket option allows
5771 * such an application to set the default sctp_sndrcvinfo structure.
5774 * The application that wishes to use this socket option simply passes
5775 * in to this call the sctp_sndrcvinfo structure defined in Section
5776 * 5.2.2) The input parameters accepted by this call include
5777 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
5778 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
5779 * to this call if the caller is using the UDP model.
5781 * For getsockopt, it get the default sctp_sndrcvinfo structure.
5783 static int sctp_getsockopt_default_send_param(struct sock
*sk
,
5784 int len
, char __user
*optval
,
5787 struct sctp_sock
*sp
= sctp_sk(sk
);
5788 struct sctp_association
*asoc
;
5789 struct sctp_sndrcvinfo info
;
5791 if (len
< sizeof(info
))
5796 if (copy_from_user(&info
, optval
, len
))
5799 asoc
= sctp_id2assoc(sk
, info
.sinfo_assoc_id
);
5800 if (!asoc
&& info
.sinfo_assoc_id
&& sctp_style(sk
, UDP
))
5803 info
.sinfo_stream
= asoc
->default_stream
;
5804 info
.sinfo_flags
= asoc
->default_flags
;
5805 info
.sinfo_ppid
= asoc
->default_ppid
;
5806 info
.sinfo_context
= asoc
->default_context
;
5807 info
.sinfo_timetolive
= asoc
->default_timetolive
;
5809 info
.sinfo_stream
= sp
->default_stream
;
5810 info
.sinfo_flags
= sp
->default_flags
;
5811 info
.sinfo_ppid
= sp
->default_ppid
;
5812 info
.sinfo_context
= sp
->default_context
;
5813 info
.sinfo_timetolive
= sp
->default_timetolive
;
5816 if (put_user(len
, optlen
))
5818 if (copy_to_user(optval
, &info
, len
))
5824 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
5825 * (SCTP_DEFAULT_SNDINFO)
5827 static int sctp_getsockopt_default_sndinfo(struct sock
*sk
, int len
,
5828 char __user
*optval
,
5831 struct sctp_sock
*sp
= sctp_sk(sk
);
5832 struct sctp_association
*asoc
;
5833 struct sctp_sndinfo info
;
5835 if (len
< sizeof(info
))
5840 if (copy_from_user(&info
, optval
, len
))
5843 asoc
= sctp_id2assoc(sk
, info
.snd_assoc_id
);
5844 if (!asoc
&& info
.snd_assoc_id
&& sctp_style(sk
, UDP
))
5847 info
.snd_sid
= asoc
->default_stream
;
5848 info
.snd_flags
= asoc
->default_flags
;
5849 info
.snd_ppid
= asoc
->default_ppid
;
5850 info
.snd_context
= asoc
->default_context
;
5852 info
.snd_sid
= sp
->default_stream
;
5853 info
.snd_flags
= sp
->default_flags
;
5854 info
.snd_ppid
= sp
->default_ppid
;
5855 info
.snd_context
= sp
->default_context
;
5858 if (put_user(len
, optlen
))
5860 if (copy_to_user(optval
, &info
, len
))
5868 * 7.1.5 SCTP_NODELAY
5870 * Turn on/off any Nagle-like algorithm. This means that packets are
5871 * generally sent as soon as possible and no unnecessary delays are
5872 * introduced, at the cost of more packets in the network. Expects an
5873 * integer boolean flag.
5876 static int sctp_getsockopt_nodelay(struct sock
*sk
, int len
,
5877 char __user
*optval
, int __user
*optlen
)
5881 if (len
< sizeof(int))
5885 val
= (sctp_sk(sk
)->nodelay
== 1);
5886 if (put_user(len
, optlen
))
5888 if (copy_to_user(optval
, &val
, len
))
5895 * 7.1.1 SCTP_RTOINFO
5897 * The protocol parameters used to initialize and bound retransmission
5898 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
5899 * and modify these parameters.
5900 * All parameters are time values, in milliseconds. A value of 0, when
5901 * modifying the parameters, indicates that the current value should not
5905 static int sctp_getsockopt_rtoinfo(struct sock
*sk
, int len
,
5906 char __user
*optval
,
5907 int __user
*optlen
) {
5908 struct sctp_rtoinfo rtoinfo
;
5909 struct sctp_association
*asoc
;
5911 if (len
< sizeof (struct sctp_rtoinfo
))
5914 len
= sizeof(struct sctp_rtoinfo
);
5916 if (copy_from_user(&rtoinfo
, optval
, len
))
5919 asoc
= sctp_id2assoc(sk
, rtoinfo
.srto_assoc_id
);
5921 if (!asoc
&& rtoinfo
.srto_assoc_id
&& sctp_style(sk
, UDP
))
5924 /* Values corresponding to the specific association. */
5926 rtoinfo
.srto_initial
= jiffies_to_msecs(asoc
->rto_initial
);
5927 rtoinfo
.srto_max
= jiffies_to_msecs(asoc
->rto_max
);
5928 rtoinfo
.srto_min
= jiffies_to_msecs(asoc
->rto_min
);
5930 /* Values corresponding to the endpoint. */
5931 struct sctp_sock
*sp
= sctp_sk(sk
);
5933 rtoinfo
.srto_initial
= sp
->rtoinfo
.srto_initial
;
5934 rtoinfo
.srto_max
= sp
->rtoinfo
.srto_max
;
5935 rtoinfo
.srto_min
= sp
->rtoinfo
.srto_min
;
5938 if (put_user(len
, optlen
))
5941 if (copy_to_user(optval
, &rtoinfo
, len
))
5949 * 7.1.2 SCTP_ASSOCINFO
5951 * This option is used to tune the maximum retransmission attempts
5952 * of the association.
5953 * Returns an error if the new association retransmission value is
5954 * greater than the sum of the retransmission value of the peer.
5955 * See [SCTP] for more information.
5958 static int sctp_getsockopt_associnfo(struct sock
*sk
, int len
,
5959 char __user
*optval
,
5963 struct sctp_assocparams assocparams
;
5964 struct sctp_association
*asoc
;
5965 struct list_head
*pos
;
5968 if (len
< sizeof (struct sctp_assocparams
))
5971 len
= sizeof(struct sctp_assocparams
);
5973 if (copy_from_user(&assocparams
, optval
, len
))
5976 asoc
= sctp_id2assoc(sk
, assocparams
.sasoc_assoc_id
);
5978 if (!asoc
&& assocparams
.sasoc_assoc_id
&& sctp_style(sk
, UDP
))
5981 /* Values correspoinding to the specific association */
5983 assocparams
.sasoc_asocmaxrxt
= asoc
->max_retrans
;
5984 assocparams
.sasoc_peer_rwnd
= asoc
->peer
.rwnd
;
5985 assocparams
.sasoc_local_rwnd
= asoc
->a_rwnd
;
5986 assocparams
.sasoc_cookie_life
= ktime_to_ms(asoc
->cookie_life
);
5988 list_for_each(pos
, &asoc
->peer
.transport_addr_list
) {
5992 assocparams
.sasoc_number_peer_destinations
= cnt
;
5994 /* Values corresponding to the endpoint */
5995 struct sctp_sock
*sp
= sctp_sk(sk
);
5997 assocparams
.sasoc_asocmaxrxt
= sp
->assocparams
.sasoc_asocmaxrxt
;
5998 assocparams
.sasoc_peer_rwnd
= sp
->assocparams
.sasoc_peer_rwnd
;
5999 assocparams
.sasoc_local_rwnd
= sp
->assocparams
.sasoc_local_rwnd
;
6000 assocparams
.sasoc_cookie_life
=
6001 sp
->assocparams
.sasoc_cookie_life
;
6002 assocparams
.sasoc_number_peer_destinations
=
6004 sasoc_number_peer_destinations
;
6007 if (put_user(len
, optlen
))
6010 if (copy_to_user(optval
, &assocparams
, len
))
6017 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
6019 * This socket option is a boolean flag which turns on or off mapped V4
6020 * addresses. If this option is turned on and the socket is type
6021 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
6022 * If this option is turned off, then no mapping will be done of V4
6023 * addresses and a user will receive both PF_INET6 and PF_INET type
6024 * addresses on the socket.
6026 static int sctp_getsockopt_mappedv4(struct sock
*sk
, int len
,
6027 char __user
*optval
, int __user
*optlen
)
6030 struct sctp_sock
*sp
= sctp_sk(sk
);
6032 if (len
< sizeof(int))
6037 if (put_user(len
, optlen
))
6039 if (copy_to_user(optval
, &val
, len
))
6046 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
6047 * (chapter and verse is quoted at sctp_setsockopt_context())
6049 static int sctp_getsockopt_context(struct sock
*sk
, int len
,
6050 char __user
*optval
, int __user
*optlen
)
6052 struct sctp_assoc_value params
;
6053 struct sctp_sock
*sp
;
6054 struct sctp_association
*asoc
;
6056 if (len
< sizeof(struct sctp_assoc_value
))
6059 len
= sizeof(struct sctp_assoc_value
);
6061 if (copy_from_user(¶ms
, optval
, len
))
6066 if (params
.assoc_id
!= 0) {
6067 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6070 params
.assoc_value
= asoc
->default_rcv_context
;
6072 params
.assoc_value
= sp
->default_rcv_context
;
6075 if (put_user(len
, optlen
))
6077 if (copy_to_user(optval
, ¶ms
, len
))
6084 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
6085 * This option will get or set the maximum size to put in any outgoing
6086 * SCTP DATA chunk. If a message is larger than this size it will be
6087 * fragmented by SCTP into the specified size. Note that the underlying
6088 * SCTP implementation may fragment into smaller sized chunks when the
6089 * PMTU of the underlying association is smaller than the value set by
6090 * the user. The default value for this option is '0' which indicates
6091 * the user is NOT limiting fragmentation and only the PMTU will effect
6092 * SCTP's choice of DATA chunk size. Note also that values set larger
6093 * than the maximum size of an IP datagram will effectively let SCTP
6094 * control fragmentation (i.e. the same as setting this option to 0).
6096 * The following structure is used to access and modify this parameter:
6098 * struct sctp_assoc_value {
6099 * sctp_assoc_t assoc_id;
6100 * uint32_t assoc_value;
6103 * assoc_id: This parameter is ignored for one-to-one style sockets.
6104 * For one-to-many style sockets this parameter indicates which
6105 * association the user is performing an action upon. Note that if
6106 * this field's value is zero then the endpoints default value is
6107 * changed (effecting future associations only).
6108 * assoc_value: This parameter specifies the maximum size in bytes.
6110 static int sctp_getsockopt_maxseg(struct sock
*sk
, int len
,
6111 char __user
*optval
, int __user
*optlen
)
6113 struct sctp_assoc_value params
;
6114 struct sctp_association
*asoc
;
6116 if (len
== sizeof(int)) {
6117 pr_warn_ratelimited(DEPRECATED
6119 "Use of int in maxseg socket option.\n"
6120 "Use struct sctp_assoc_value instead\n",
6121 current
->comm
, task_pid_nr(current
));
6122 params
.assoc_id
= 0;
6123 } else if (len
>= sizeof(struct sctp_assoc_value
)) {
6124 len
= sizeof(struct sctp_assoc_value
);
6125 if (copy_from_user(¶ms
, optval
, len
))
6130 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6131 if (!asoc
&& params
.assoc_id
&& sctp_style(sk
, UDP
))
6135 params
.assoc_value
= asoc
->frag_point
;
6137 params
.assoc_value
= sctp_sk(sk
)->user_frag
;
6139 if (put_user(len
, optlen
))
6141 if (len
== sizeof(int)) {
6142 if (copy_to_user(optval
, ¶ms
.assoc_value
, len
))
6145 if (copy_to_user(optval
, ¶ms
, len
))
6153 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
6154 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
6156 static int sctp_getsockopt_fragment_interleave(struct sock
*sk
, int len
,
6157 char __user
*optval
, int __user
*optlen
)
6161 if (len
< sizeof(int))
6166 val
= sctp_sk(sk
)->frag_interleave
;
6167 if (put_user(len
, optlen
))
6169 if (copy_to_user(optval
, &val
, len
))
6176 * 7.1.25. Set or Get the sctp partial delivery point
6177 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
6179 static int sctp_getsockopt_partial_delivery_point(struct sock
*sk
, int len
,
6180 char __user
*optval
,
6185 if (len
< sizeof(u32
))
6190 val
= sctp_sk(sk
)->pd_point
;
6191 if (put_user(len
, optlen
))
6193 if (copy_to_user(optval
, &val
, len
))
6200 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
6201 * (chapter and verse is quoted at sctp_setsockopt_maxburst())
6203 static int sctp_getsockopt_maxburst(struct sock
*sk
, int len
,
6204 char __user
*optval
,
6207 struct sctp_assoc_value params
;
6208 struct sctp_sock
*sp
;
6209 struct sctp_association
*asoc
;
6211 if (len
== sizeof(int)) {
6212 pr_warn_ratelimited(DEPRECATED
6214 "Use of int in max_burst socket option.\n"
6215 "Use struct sctp_assoc_value instead\n",
6216 current
->comm
, task_pid_nr(current
));
6217 params
.assoc_id
= 0;
6218 } else if (len
>= sizeof(struct sctp_assoc_value
)) {
6219 len
= sizeof(struct sctp_assoc_value
);
6220 if (copy_from_user(¶ms
, optval
, len
))
6227 if (params
.assoc_id
!= 0) {
6228 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6231 params
.assoc_value
= asoc
->max_burst
;
6233 params
.assoc_value
= sp
->max_burst
;
6235 if (len
== sizeof(int)) {
6236 if (copy_to_user(optval
, ¶ms
.assoc_value
, len
))
6239 if (copy_to_user(optval
, ¶ms
, len
))
6247 static int sctp_getsockopt_hmac_ident(struct sock
*sk
, int len
,
6248 char __user
*optval
, int __user
*optlen
)
6250 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
6251 struct sctp_hmacalgo __user
*p
= (void __user
*)optval
;
6252 struct sctp_hmac_algo_param
*hmacs
;
6257 if (!ep
->auth_enable
)
6260 hmacs
= ep
->auth_hmacs_list
;
6261 data_len
= ntohs(hmacs
->param_hdr
.length
) -
6262 sizeof(struct sctp_paramhdr
);
6264 if (len
< sizeof(struct sctp_hmacalgo
) + data_len
)
6267 len
= sizeof(struct sctp_hmacalgo
) + data_len
;
6268 num_idents
= data_len
/ sizeof(u16
);
6270 if (put_user(len
, optlen
))
6272 if (put_user(num_idents
, &p
->shmac_num_idents
))
6274 for (i
= 0; i
< num_idents
; i
++) {
6275 __u16 hmacid
= ntohs(hmacs
->hmac_ids
[i
]);
6277 if (copy_to_user(&p
->shmac_idents
[i
], &hmacid
, sizeof(__u16
)))
6283 static int sctp_getsockopt_active_key(struct sock
*sk
, int len
,
6284 char __user
*optval
, int __user
*optlen
)
6286 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
6287 struct sctp_authkeyid val
;
6288 struct sctp_association
*asoc
;
6290 if (!ep
->auth_enable
)
6293 if (len
< sizeof(struct sctp_authkeyid
))
6296 len
= sizeof(struct sctp_authkeyid
);
6297 if (copy_from_user(&val
, optval
, len
))
6300 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
6301 if (!asoc
&& val
.scact_assoc_id
&& sctp_style(sk
, UDP
))
6305 val
.scact_keynumber
= asoc
->active_key_id
;
6307 val
.scact_keynumber
= ep
->active_key_id
;
6309 if (put_user(len
, optlen
))
6311 if (copy_to_user(optval
, &val
, len
))
6317 static int sctp_getsockopt_peer_auth_chunks(struct sock
*sk
, int len
,
6318 char __user
*optval
, int __user
*optlen
)
6320 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
6321 struct sctp_authchunks __user
*p
= (void __user
*)optval
;
6322 struct sctp_authchunks val
;
6323 struct sctp_association
*asoc
;
6324 struct sctp_chunks_param
*ch
;
6328 if (!ep
->auth_enable
)
6331 if (len
< sizeof(struct sctp_authchunks
))
6334 if (copy_from_user(&val
, optval
, sizeof(val
)))
6337 to
= p
->gauth_chunks
;
6338 asoc
= sctp_id2assoc(sk
, val
.gauth_assoc_id
);
6342 ch
= asoc
->peer
.peer_chunks
;
6346 /* See if the user provided enough room for all the data */
6347 num_chunks
= ntohs(ch
->param_hdr
.length
) - sizeof(struct sctp_paramhdr
);
6348 if (len
< num_chunks
)
6351 if (copy_to_user(to
, ch
->chunks
, num_chunks
))
6354 len
= sizeof(struct sctp_authchunks
) + num_chunks
;
6355 if (put_user(len
, optlen
))
6357 if (put_user(num_chunks
, &p
->gauth_number_of_chunks
))
6362 static int sctp_getsockopt_local_auth_chunks(struct sock
*sk
, int len
,
6363 char __user
*optval
, int __user
*optlen
)
6365 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
6366 struct sctp_authchunks __user
*p
= (void __user
*)optval
;
6367 struct sctp_authchunks val
;
6368 struct sctp_association
*asoc
;
6369 struct sctp_chunks_param
*ch
;
6373 if (!ep
->auth_enable
)
6376 if (len
< sizeof(struct sctp_authchunks
))
6379 if (copy_from_user(&val
, optval
, sizeof(val
)))
6382 to
= p
->gauth_chunks
;
6383 asoc
= sctp_id2assoc(sk
, val
.gauth_assoc_id
);
6384 if (!asoc
&& val
.gauth_assoc_id
&& sctp_style(sk
, UDP
))
6388 ch
= (struct sctp_chunks_param
*)asoc
->c
.auth_chunks
;
6390 ch
= ep
->auth_chunk_list
;
6395 num_chunks
= ntohs(ch
->param_hdr
.length
) - sizeof(struct sctp_paramhdr
);
6396 if (len
< sizeof(struct sctp_authchunks
) + num_chunks
)
6399 if (copy_to_user(to
, ch
->chunks
, num_chunks
))
6402 len
= sizeof(struct sctp_authchunks
) + num_chunks
;
6403 if (put_user(len
, optlen
))
6405 if (put_user(num_chunks
, &p
->gauth_number_of_chunks
))
6412 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
6413 * This option gets the current number of associations that are attached
6414 * to a one-to-many style socket. The option value is an uint32_t.
6416 static int sctp_getsockopt_assoc_number(struct sock
*sk
, int len
,
6417 char __user
*optval
, int __user
*optlen
)
6419 struct sctp_sock
*sp
= sctp_sk(sk
);
6420 struct sctp_association
*asoc
;
6423 if (sctp_style(sk
, TCP
))
6426 if (len
< sizeof(u32
))
6431 list_for_each_entry(asoc
, &(sp
->ep
->asocs
), asocs
) {
6435 if (put_user(len
, optlen
))
6437 if (copy_to_user(optval
, &val
, len
))
6444 * 8.1.23 SCTP_AUTO_ASCONF
6445 * See the corresponding setsockopt entry as description
6447 static int sctp_getsockopt_auto_asconf(struct sock
*sk
, int len
,
6448 char __user
*optval
, int __user
*optlen
)
6452 if (len
< sizeof(int))
6456 if (sctp_sk(sk
)->do_auto_asconf
&& sctp_is_ep_boundall(sk
))
6458 if (put_user(len
, optlen
))
6460 if (copy_to_user(optval
, &val
, len
))
6466 * 8.2.6. Get the Current Identifiers of Associations
6467 * (SCTP_GET_ASSOC_ID_LIST)
6469 * This option gets the current list of SCTP association identifiers of
6470 * the SCTP associations handled by a one-to-many style socket.
6472 static int sctp_getsockopt_assoc_ids(struct sock
*sk
, int len
,
6473 char __user
*optval
, int __user
*optlen
)
6475 struct sctp_sock
*sp
= sctp_sk(sk
);
6476 struct sctp_association
*asoc
;
6477 struct sctp_assoc_ids
*ids
;
6480 if (sctp_style(sk
, TCP
))
6483 if (len
< sizeof(struct sctp_assoc_ids
))
6486 list_for_each_entry(asoc
, &(sp
->ep
->asocs
), asocs
) {
6490 if (len
< sizeof(struct sctp_assoc_ids
) + sizeof(sctp_assoc_t
) * num
)
6493 len
= sizeof(struct sctp_assoc_ids
) + sizeof(sctp_assoc_t
) * num
;
6495 ids
= kmalloc(len
, GFP_USER
| __GFP_NOWARN
);
6499 ids
->gaids_number_of_ids
= num
;
6501 list_for_each_entry(asoc
, &(sp
->ep
->asocs
), asocs
) {
6502 ids
->gaids_assoc_id
[num
++] = asoc
->assoc_id
;
6505 if (put_user(len
, optlen
) || copy_to_user(optval
, ids
, len
)) {
6515 * SCTP_PEER_ADDR_THLDS
6517 * This option allows us to fetch the partially failed threshold for one or all
6518 * transports in an association. See Section 6.1 of:
6519 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
6521 static int sctp_getsockopt_paddr_thresholds(struct sock
*sk
,
6522 char __user
*optval
,
6526 struct sctp_paddrthlds val
;
6527 struct sctp_transport
*trans
;
6528 struct sctp_association
*asoc
;
6530 if (len
< sizeof(struct sctp_paddrthlds
))
6532 len
= sizeof(struct sctp_paddrthlds
);
6533 if (copy_from_user(&val
, (struct sctp_paddrthlds __user
*)optval
, len
))
6536 if (sctp_is_any(sk
, (const union sctp_addr
*)&val
.spt_address
)) {
6537 asoc
= sctp_id2assoc(sk
, val
.spt_assoc_id
);
6541 val
.spt_pathpfthld
= asoc
->pf_retrans
;
6542 val
.spt_pathmaxrxt
= asoc
->pathmaxrxt
;
6544 trans
= sctp_addr_id2transport(sk
, &val
.spt_address
,
6549 val
.spt_pathmaxrxt
= trans
->pathmaxrxt
;
6550 val
.spt_pathpfthld
= trans
->pf_retrans
;
6553 if (put_user(len
, optlen
) || copy_to_user(optval
, &val
, len
))
6560 * SCTP_GET_ASSOC_STATS
6562 * This option retrieves local per endpoint statistics. It is modeled
6563 * after OpenSolaris' implementation
6565 static int sctp_getsockopt_assoc_stats(struct sock
*sk
, int len
,
6566 char __user
*optval
,
6569 struct sctp_assoc_stats sas
;
6570 struct sctp_association
*asoc
= NULL
;
6572 /* User must provide at least the assoc id */
6573 if (len
< sizeof(sctp_assoc_t
))
6576 /* Allow the struct to grow and fill in as much as possible */
6577 len
= min_t(size_t, len
, sizeof(sas
));
6579 if (copy_from_user(&sas
, optval
, len
))
6582 asoc
= sctp_id2assoc(sk
, sas
.sas_assoc_id
);
6586 sas
.sas_rtxchunks
= asoc
->stats
.rtxchunks
;
6587 sas
.sas_gapcnt
= asoc
->stats
.gapcnt
;
6588 sas
.sas_outofseqtsns
= asoc
->stats
.outofseqtsns
;
6589 sas
.sas_osacks
= asoc
->stats
.osacks
;
6590 sas
.sas_isacks
= asoc
->stats
.isacks
;
6591 sas
.sas_octrlchunks
= asoc
->stats
.octrlchunks
;
6592 sas
.sas_ictrlchunks
= asoc
->stats
.ictrlchunks
;
6593 sas
.sas_oodchunks
= asoc
->stats
.oodchunks
;
6594 sas
.sas_iodchunks
= asoc
->stats
.iodchunks
;
6595 sas
.sas_ouodchunks
= asoc
->stats
.ouodchunks
;
6596 sas
.sas_iuodchunks
= asoc
->stats
.iuodchunks
;
6597 sas
.sas_idupchunks
= asoc
->stats
.idupchunks
;
6598 sas
.sas_opackets
= asoc
->stats
.opackets
;
6599 sas
.sas_ipackets
= asoc
->stats
.ipackets
;
6601 /* New high max rto observed, will return 0 if not a single
6602 * RTO update took place. obs_rto_ipaddr will be bogus
6605 sas
.sas_maxrto
= asoc
->stats
.max_obs_rto
;
6606 memcpy(&sas
.sas_obs_rto_ipaddr
, &asoc
->stats
.obs_rto_ipaddr
,
6607 sizeof(struct sockaddr_storage
));
6609 /* Mark beginning of a new observation period */
6610 asoc
->stats
.max_obs_rto
= asoc
->rto_min
;
6612 if (put_user(len
, optlen
))
6615 pr_debug("%s: len:%d, assoc_id:%d\n", __func__
, len
, sas
.sas_assoc_id
);
6617 if (copy_to_user(optval
, &sas
, len
))
6623 static int sctp_getsockopt_recvrcvinfo(struct sock
*sk
, int len
,
6624 char __user
*optval
,
6629 if (len
< sizeof(int))
6633 if (sctp_sk(sk
)->recvrcvinfo
)
6635 if (put_user(len
, optlen
))
6637 if (copy_to_user(optval
, &val
, len
))
6643 static int sctp_getsockopt_recvnxtinfo(struct sock
*sk
, int len
,
6644 char __user
*optval
,
6649 if (len
< sizeof(int))
6653 if (sctp_sk(sk
)->recvnxtinfo
)
6655 if (put_user(len
, optlen
))
6657 if (copy_to_user(optval
, &val
, len
))
6663 static int sctp_getsockopt_pr_supported(struct sock
*sk
, int len
,
6664 char __user
*optval
,
6667 struct sctp_assoc_value params
;
6668 struct sctp_association
*asoc
;
6669 int retval
= -EFAULT
;
6671 if (len
< sizeof(params
)) {
6676 len
= sizeof(params
);
6677 if (copy_from_user(¶ms
, optval
, len
))
6680 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6682 params
.assoc_value
= asoc
->prsctp_enable
;
6683 } else if (!params
.assoc_id
) {
6684 struct sctp_sock
*sp
= sctp_sk(sk
);
6686 params
.assoc_value
= sp
->ep
->prsctp_enable
;
6692 if (put_user(len
, optlen
))
6695 if (copy_to_user(optval
, ¶ms
, len
))
6704 static int sctp_getsockopt_default_prinfo(struct sock
*sk
, int len
,
6705 char __user
*optval
,
6708 struct sctp_default_prinfo info
;
6709 struct sctp_association
*asoc
;
6710 int retval
= -EFAULT
;
6712 if (len
< sizeof(info
)) {
6718 if (copy_from_user(&info
, optval
, len
))
6721 asoc
= sctp_id2assoc(sk
, info
.pr_assoc_id
);
6723 info
.pr_policy
= SCTP_PR_POLICY(asoc
->default_flags
);
6724 info
.pr_value
= asoc
->default_timetolive
;
6725 } else if (!info
.pr_assoc_id
) {
6726 struct sctp_sock
*sp
= sctp_sk(sk
);
6728 info
.pr_policy
= SCTP_PR_POLICY(sp
->default_flags
);
6729 info
.pr_value
= sp
->default_timetolive
;
6735 if (put_user(len
, optlen
))
6738 if (copy_to_user(optval
, &info
, len
))
6747 static int sctp_getsockopt_pr_assocstatus(struct sock
*sk
, int len
,
6748 char __user
*optval
,
6751 struct sctp_prstatus params
;
6752 struct sctp_association
*asoc
;
6754 int retval
= -EINVAL
;
6756 if (len
< sizeof(params
))
6759 len
= sizeof(params
);
6760 if (copy_from_user(¶ms
, optval
, len
)) {
6765 policy
= params
.sprstat_policy
;
6766 if (policy
& ~SCTP_PR_SCTP_MASK
)
6769 asoc
= sctp_id2assoc(sk
, params
.sprstat_assoc_id
);
6773 if (policy
== SCTP_PR_SCTP_NONE
) {
6774 params
.sprstat_abandoned_unsent
= 0;
6775 params
.sprstat_abandoned_sent
= 0;
6776 for (policy
= 0; policy
<= SCTP_PR_INDEX(MAX
); policy
++) {
6777 params
.sprstat_abandoned_unsent
+=
6778 asoc
->abandoned_unsent
[policy
];
6779 params
.sprstat_abandoned_sent
+=
6780 asoc
->abandoned_sent
[policy
];
6783 params
.sprstat_abandoned_unsent
=
6784 asoc
->abandoned_unsent
[__SCTP_PR_INDEX(policy
)];
6785 params
.sprstat_abandoned_sent
=
6786 asoc
->abandoned_sent
[__SCTP_PR_INDEX(policy
)];
6789 if (put_user(len
, optlen
)) {
6794 if (copy_to_user(optval
, ¶ms
, len
)) {
6805 static int sctp_getsockopt_pr_streamstatus(struct sock
*sk
, int len
,
6806 char __user
*optval
,
6809 struct sctp_stream_out_ext
*streamoute
;
6810 struct sctp_association
*asoc
;
6811 struct sctp_prstatus params
;
6812 int retval
= -EINVAL
;
6815 if (len
< sizeof(params
))
6818 len
= sizeof(params
);
6819 if (copy_from_user(¶ms
, optval
, len
)) {
6824 policy
= params
.sprstat_policy
;
6825 if (policy
& ~SCTP_PR_SCTP_MASK
)
6828 asoc
= sctp_id2assoc(sk
, params
.sprstat_assoc_id
);
6829 if (!asoc
|| params
.sprstat_sid
>= asoc
->stream
.outcnt
)
6832 streamoute
= asoc
->stream
.out
[params
.sprstat_sid
].ext
;
6834 /* Not allocated yet, means all stats are 0 */
6835 params
.sprstat_abandoned_unsent
= 0;
6836 params
.sprstat_abandoned_sent
= 0;
6841 if (policy
== SCTP_PR_SCTP_NONE
) {
6842 params
.sprstat_abandoned_unsent
= 0;
6843 params
.sprstat_abandoned_sent
= 0;
6844 for (policy
= 0; policy
<= SCTP_PR_INDEX(MAX
); policy
++) {
6845 params
.sprstat_abandoned_unsent
+=
6846 streamoute
->abandoned_unsent
[policy
];
6847 params
.sprstat_abandoned_sent
+=
6848 streamoute
->abandoned_sent
[policy
];
6851 params
.sprstat_abandoned_unsent
=
6852 streamoute
->abandoned_unsent
[__SCTP_PR_INDEX(policy
)];
6853 params
.sprstat_abandoned_sent
=
6854 streamoute
->abandoned_sent
[__SCTP_PR_INDEX(policy
)];
6857 if (put_user(len
, optlen
) || copy_to_user(optval
, ¶ms
, len
)) {
6868 static int sctp_getsockopt_reconfig_supported(struct sock
*sk
, int len
,
6869 char __user
*optval
,
6872 struct sctp_assoc_value params
;
6873 struct sctp_association
*asoc
;
6874 int retval
= -EFAULT
;
6876 if (len
< sizeof(params
)) {
6881 len
= sizeof(params
);
6882 if (copy_from_user(¶ms
, optval
, len
))
6885 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6887 params
.assoc_value
= asoc
->reconf_enable
;
6888 } else if (!params
.assoc_id
) {
6889 struct sctp_sock
*sp
= sctp_sk(sk
);
6891 params
.assoc_value
= sp
->ep
->reconf_enable
;
6897 if (put_user(len
, optlen
))
6900 if (copy_to_user(optval
, ¶ms
, len
))
6909 static int sctp_getsockopt_enable_strreset(struct sock
*sk
, int len
,
6910 char __user
*optval
,
6913 struct sctp_assoc_value params
;
6914 struct sctp_association
*asoc
;
6915 int retval
= -EFAULT
;
6917 if (len
< sizeof(params
)) {
6922 len
= sizeof(params
);
6923 if (copy_from_user(¶ms
, optval
, len
))
6926 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6928 params
.assoc_value
= asoc
->strreset_enable
;
6929 } else if (!params
.assoc_id
) {
6930 struct sctp_sock
*sp
= sctp_sk(sk
);
6932 params
.assoc_value
= sp
->ep
->strreset_enable
;
6938 if (put_user(len
, optlen
))
6941 if (copy_to_user(optval
, ¶ms
, len
))
6950 static int sctp_getsockopt_scheduler(struct sock
*sk
, int len
,
6951 char __user
*optval
,
6954 struct sctp_assoc_value params
;
6955 struct sctp_association
*asoc
;
6956 int retval
= -EFAULT
;
6958 if (len
< sizeof(params
)) {
6963 len
= sizeof(params
);
6964 if (copy_from_user(¶ms
, optval
, len
))
6967 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6973 params
.assoc_value
= sctp_sched_get_sched(asoc
);
6975 if (put_user(len
, optlen
))
6978 if (copy_to_user(optval
, ¶ms
, len
))
6987 static int sctp_getsockopt_scheduler_value(struct sock
*sk
, int len
,
6988 char __user
*optval
,
6991 struct sctp_stream_value params
;
6992 struct sctp_association
*asoc
;
6993 int retval
= -EFAULT
;
6995 if (len
< sizeof(params
)) {
7000 len
= sizeof(params
);
7001 if (copy_from_user(¶ms
, optval
, len
))
7004 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7010 retval
= sctp_sched_get_value(asoc
, params
.stream_id
,
7011 ¶ms
.stream_value
);
7015 if (put_user(len
, optlen
)) {
7020 if (copy_to_user(optval
, ¶ms
, len
)) {
7029 static int sctp_getsockopt_interleaving_supported(struct sock
*sk
, int len
,
7030 char __user
*optval
,
7033 struct sctp_assoc_value params
;
7034 struct sctp_association
*asoc
;
7035 int retval
= -EFAULT
;
7037 if (len
< sizeof(params
)) {
7042 len
= sizeof(params
);
7043 if (copy_from_user(¶ms
, optval
, len
))
7046 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7048 params
.assoc_value
= asoc
->intl_enable
;
7049 } else if (!params
.assoc_id
) {
7050 struct sctp_sock
*sp
= sctp_sk(sk
);
7052 params
.assoc_value
= sp
->strm_interleave
;
7058 if (put_user(len
, optlen
))
7061 if (copy_to_user(optval
, ¶ms
, len
))
7070 static int sctp_getsockopt(struct sock
*sk
, int level
, int optname
,
7071 char __user
*optval
, int __user
*optlen
)
7076 pr_debug("%s: sk:%p, optname:%d\n", __func__
, sk
, optname
);
7078 /* I can hardly begin to describe how wrong this is. This is
7079 * so broken as to be worse than useless. The API draft
7080 * REALLY is NOT helpful here... I am not convinced that the
7081 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP
7082 * are at all well-founded.
7084 if (level
!= SOL_SCTP
) {
7085 struct sctp_af
*af
= sctp_sk(sk
)->pf
->af
;
7087 retval
= af
->getsockopt(sk
, level
, optname
, optval
, optlen
);
7091 if (get_user(len
, optlen
))
7101 retval
= sctp_getsockopt_sctp_status(sk
, len
, optval
, optlen
);
7103 case SCTP_DISABLE_FRAGMENTS
:
7104 retval
= sctp_getsockopt_disable_fragments(sk
, len
, optval
,
7108 retval
= sctp_getsockopt_events(sk
, len
, optval
, optlen
);
7110 case SCTP_AUTOCLOSE
:
7111 retval
= sctp_getsockopt_autoclose(sk
, len
, optval
, optlen
);
7113 case SCTP_SOCKOPT_PEELOFF
:
7114 retval
= sctp_getsockopt_peeloff(sk
, len
, optval
, optlen
);
7116 case SCTP_SOCKOPT_PEELOFF_FLAGS
:
7117 retval
= sctp_getsockopt_peeloff_flags(sk
, len
, optval
, optlen
);
7119 case SCTP_PEER_ADDR_PARAMS
:
7120 retval
= sctp_getsockopt_peer_addr_params(sk
, len
, optval
,
7123 case SCTP_DELAYED_SACK
:
7124 retval
= sctp_getsockopt_delayed_ack(sk
, len
, optval
,
7128 retval
= sctp_getsockopt_initmsg(sk
, len
, optval
, optlen
);
7130 case SCTP_GET_PEER_ADDRS
:
7131 retval
= sctp_getsockopt_peer_addrs(sk
, len
, optval
,
7134 case SCTP_GET_LOCAL_ADDRS
:
7135 retval
= sctp_getsockopt_local_addrs(sk
, len
, optval
,
7138 case SCTP_SOCKOPT_CONNECTX3
:
7139 retval
= sctp_getsockopt_connectx3(sk
, len
, optval
, optlen
);
7141 case SCTP_DEFAULT_SEND_PARAM
:
7142 retval
= sctp_getsockopt_default_send_param(sk
, len
,
7145 case SCTP_DEFAULT_SNDINFO
:
7146 retval
= sctp_getsockopt_default_sndinfo(sk
, len
,
7149 case SCTP_PRIMARY_ADDR
:
7150 retval
= sctp_getsockopt_primary_addr(sk
, len
, optval
, optlen
);
7153 retval
= sctp_getsockopt_nodelay(sk
, len
, optval
, optlen
);
7156 retval
= sctp_getsockopt_rtoinfo(sk
, len
, optval
, optlen
);
7158 case SCTP_ASSOCINFO
:
7159 retval
= sctp_getsockopt_associnfo(sk
, len
, optval
, optlen
);
7161 case SCTP_I_WANT_MAPPED_V4_ADDR
:
7162 retval
= sctp_getsockopt_mappedv4(sk
, len
, optval
, optlen
);
7165 retval
= sctp_getsockopt_maxseg(sk
, len
, optval
, optlen
);
7167 case SCTP_GET_PEER_ADDR_INFO
:
7168 retval
= sctp_getsockopt_peer_addr_info(sk
, len
, optval
,
7171 case SCTP_ADAPTATION_LAYER
:
7172 retval
= sctp_getsockopt_adaptation_layer(sk
, len
, optval
,
7176 retval
= sctp_getsockopt_context(sk
, len
, optval
, optlen
);
7178 case SCTP_FRAGMENT_INTERLEAVE
:
7179 retval
= sctp_getsockopt_fragment_interleave(sk
, len
, optval
,
7182 case SCTP_PARTIAL_DELIVERY_POINT
:
7183 retval
= sctp_getsockopt_partial_delivery_point(sk
, len
, optval
,
7186 case SCTP_MAX_BURST
:
7187 retval
= sctp_getsockopt_maxburst(sk
, len
, optval
, optlen
);
7190 case SCTP_AUTH_CHUNK
:
7191 case SCTP_AUTH_DELETE_KEY
:
7192 retval
= -EOPNOTSUPP
;
7194 case SCTP_HMAC_IDENT
:
7195 retval
= sctp_getsockopt_hmac_ident(sk
, len
, optval
, optlen
);
7197 case SCTP_AUTH_ACTIVE_KEY
:
7198 retval
= sctp_getsockopt_active_key(sk
, len
, optval
, optlen
);
7200 case SCTP_PEER_AUTH_CHUNKS
:
7201 retval
= sctp_getsockopt_peer_auth_chunks(sk
, len
, optval
,
7204 case SCTP_LOCAL_AUTH_CHUNKS
:
7205 retval
= sctp_getsockopt_local_auth_chunks(sk
, len
, optval
,
7208 case SCTP_GET_ASSOC_NUMBER
:
7209 retval
= sctp_getsockopt_assoc_number(sk
, len
, optval
, optlen
);
7211 case SCTP_GET_ASSOC_ID_LIST
:
7212 retval
= sctp_getsockopt_assoc_ids(sk
, len
, optval
, optlen
);
7214 case SCTP_AUTO_ASCONF
:
7215 retval
= sctp_getsockopt_auto_asconf(sk
, len
, optval
, optlen
);
7217 case SCTP_PEER_ADDR_THLDS
:
7218 retval
= sctp_getsockopt_paddr_thresholds(sk
, optval
, len
, optlen
);
7220 case SCTP_GET_ASSOC_STATS
:
7221 retval
= sctp_getsockopt_assoc_stats(sk
, len
, optval
, optlen
);
7223 case SCTP_RECVRCVINFO
:
7224 retval
= sctp_getsockopt_recvrcvinfo(sk
, len
, optval
, optlen
);
7226 case SCTP_RECVNXTINFO
:
7227 retval
= sctp_getsockopt_recvnxtinfo(sk
, len
, optval
, optlen
);
7229 case SCTP_PR_SUPPORTED
:
7230 retval
= sctp_getsockopt_pr_supported(sk
, len
, optval
, optlen
);
7232 case SCTP_DEFAULT_PRINFO
:
7233 retval
= sctp_getsockopt_default_prinfo(sk
, len
, optval
,
7236 case SCTP_PR_ASSOC_STATUS
:
7237 retval
= sctp_getsockopt_pr_assocstatus(sk
, len
, optval
,
7240 case SCTP_PR_STREAM_STATUS
:
7241 retval
= sctp_getsockopt_pr_streamstatus(sk
, len
, optval
,
7244 case SCTP_RECONFIG_SUPPORTED
:
7245 retval
= sctp_getsockopt_reconfig_supported(sk
, len
, optval
,
7248 case SCTP_ENABLE_STREAM_RESET
:
7249 retval
= sctp_getsockopt_enable_strreset(sk
, len
, optval
,
7252 case SCTP_STREAM_SCHEDULER
:
7253 retval
= sctp_getsockopt_scheduler(sk
, len
, optval
,
7256 case SCTP_STREAM_SCHEDULER_VALUE
:
7257 retval
= sctp_getsockopt_scheduler_value(sk
, len
, optval
,
7260 case SCTP_INTERLEAVING_SUPPORTED
:
7261 retval
= sctp_getsockopt_interleaving_supported(sk
, len
, optval
,
7265 retval
= -ENOPROTOOPT
;
7273 static int sctp_hash(struct sock
*sk
)
7279 static void sctp_unhash(struct sock
*sk
)
7284 /* Check if port is acceptable. Possibly find first available port.
7286 * The port hash table (contained in the 'global' SCTP protocol storage
7287 * returned by struct sctp_protocol *sctp_get_protocol()). The hash
7288 * table is an array of 4096 lists (sctp_bind_hashbucket). Each
7289 * list (the list number is the port number hashed out, so as you
7290 * would expect from a hash function, all the ports in a given list have
7291 * such a number that hashes out to the same list number; you were
7292 * expecting that, right?); so each list has a set of ports, with a
7293 * link to the socket (struct sock) that uses it, the port number and
7294 * a fastreuse flag (FIXME: NPI ipg).
7296 static struct sctp_bind_bucket
*sctp_bucket_create(
7297 struct sctp_bind_hashbucket
*head
, struct net
*, unsigned short snum
);
7299 static long sctp_get_port_local(struct sock
*sk
, union sctp_addr
*addr
)
7301 struct sctp_bind_hashbucket
*head
; /* hash list */
7302 struct sctp_bind_bucket
*pp
;
7303 unsigned short snum
;
7306 snum
= ntohs(addr
->v4
.sin_port
);
7308 pr_debug("%s: begins, snum:%d\n", __func__
, snum
);
7313 /* Search for an available port. */
7314 int low
, high
, remaining
, index
;
7316 struct net
*net
= sock_net(sk
);
7318 inet_get_local_port_range(net
, &low
, &high
);
7319 remaining
= (high
- low
) + 1;
7320 rover
= prandom_u32() % remaining
+ low
;
7324 if ((rover
< low
) || (rover
> high
))
7326 if (inet_is_local_reserved_port(net
, rover
))
7328 index
= sctp_phashfn(sock_net(sk
), rover
);
7329 head
= &sctp_port_hashtable
[index
];
7330 spin_lock(&head
->lock
);
7331 sctp_for_each_hentry(pp
, &head
->chain
)
7332 if ((pp
->port
== rover
) &&
7333 net_eq(sock_net(sk
), pp
->net
))
7337 spin_unlock(&head
->lock
);
7338 } while (--remaining
> 0);
7340 /* Exhausted local port range during search? */
7345 /* OK, here is the one we will use. HEAD (the port
7346 * hash table list entry) is non-NULL and we hold it's
7351 /* We are given an specific port number; we verify
7352 * that it is not being used. If it is used, we will
7353 * exahust the search in the hash list corresponding
7354 * to the port number (snum) - we detect that with the
7355 * port iterator, pp being NULL.
7357 head
= &sctp_port_hashtable
[sctp_phashfn(sock_net(sk
), snum
)];
7358 spin_lock(&head
->lock
);
7359 sctp_for_each_hentry(pp
, &head
->chain
) {
7360 if ((pp
->port
== snum
) && net_eq(pp
->net
, sock_net(sk
)))
7367 if (!hlist_empty(&pp
->owner
)) {
7368 /* We had a port hash table hit - there is an
7369 * available port (pp != NULL) and it is being
7370 * used by other socket (pp->owner not empty); that other
7371 * socket is going to be sk2.
7373 int reuse
= sk
->sk_reuse
;
7376 pr_debug("%s: found a possible match\n", __func__
);
7378 if (pp
->fastreuse
&& sk
->sk_reuse
&&
7379 sk
->sk_state
!= SCTP_SS_LISTENING
)
7382 /* Run through the list of sockets bound to the port
7383 * (pp->port) [via the pointers bind_next and
7384 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
7385 * we get the endpoint they describe and run through
7386 * the endpoint's list of IP (v4 or v6) addresses,
7387 * comparing each of the addresses with the address of
7388 * the socket sk. If we find a match, then that means
7389 * that this port/socket (sk) combination are already
7392 sk_for_each_bound(sk2
, &pp
->owner
) {
7393 struct sctp_endpoint
*ep2
;
7394 ep2
= sctp_sk(sk2
)->ep
;
7397 (reuse
&& sk2
->sk_reuse
&&
7398 sk2
->sk_state
!= SCTP_SS_LISTENING
))
7401 if (sctp_bind_addr_conflict(&ep2
->base
.bind_addr
, addr
,
7402 sctp_sk(sk2
), sctp_sk(sk
))) {
7408 pr_debug("%s: found a match\n", __func__
);
7411 /* If there was a hash table miss, create a new port. */
7413 if (!pp
&& !(pp
= sctp_bucket_create(head
, sock_net(sk
), snum
)))
7416 /* In either case (hit or miss), make sure fastreuse is 1 only
7417 * if sk->sk_reuse is too (that is, if the caller requested
7418 * SO_REUSEADDR on this socket -sk-).
7420 if (hlist_empty(&pp
->owner
)) {
7421 if (sk
->sk_reuse
&& sk
->sk_state
!= SCTP_SS_LISTENING
)
7425 } else if (pp
->fastreuse
&&
7426 (!sk
->sk_reuse
|| sk
->sk_state
== SCTP_SS_LISTENING
))
7429 /* We are set, so fill up all the data in the hash table
7430 * entry, tie the socket list information with the rest of the
7431 * sockets FIXME: Blurry, NPI (ipg).
7434 if (!sctp_sk(sk
)->bind_hash
) {
7435 inet_sk(sk
)->inet_num
= snum
;
7436 sk_add_bind_node(sk
, &pp
->owner
);
7437 sctp_sk(sk
)->bind_hash
= pp
;
7442 spin_unlock(&head
->lock
);
7449 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
7450 * port is requested.
7452 static int sctp_get_port(struct sock
*sk
, unsigned short snum
)
7454 union sctp_addr addr
;
7455 struct sctp_af
*af
= sctp_sk(sk
)->pf
->af
;
7457 /* Set up a dummy address struct from the sk. */
7458 af
->from_sk(&addr
, sk
);
7459 addr
.v4
.sin_port
= htons(snum
);
7461 /* Note: sk->sk_num gets filled in if ephemeral port request. */
7462 return !!sctp_get_port_local(sk
, &addr
);
7466 * Move a socket to LISTENING state.
7468 static int sctp_listen_start(struct sock
*sk
, int backlog
)
7470 struct sctp_sock
*sp
= sctp_sk(sk
);
7471 struct sctp_endpoint
*ep
= sp
->ep
;
7472 struct crypto_shash
*tfm
= NULL
;
7475 /* Allocate HMAC for generating cookie. */
7476 if (!sp
->hmac
&& sp
->sctp_hmac_alg
) {
7477 sprintf(alg
, "hmac(%s)", sp
->sctp_hmac_alg
);
7478 tfm
= crypto_alloc_shash(alg
, 0, 0);
7480 net_info_ratelimited("failed to load transform for %s: %ld\n",
7481 sp
->sctp_hmac_alg
, PTR_ERR(tfm
));
7484 sctp_sk(sk
)->hmac
= tfm
;
7488 * If a bind() or sctp_bindx() is not called prior to a listen()
7489 * call that allows new associations to be accepted, the system
7490 * picks an ephemeral port and will choose an address set equivalent
7491 * to binding with a wildcard address.
7493 * This is not currently spelled out in the SCTP sockets
7494 * extensions draft, but follows the practice as seen in TCP
7498 inet_sk_set_state(sk
, SCTP_SS_LISTENING
);
7499 if (!ep
->base
.bind_addr
.port
) {
7500 if (sctp_autobind(sk
))
7503 if (sctp_get_port(sk
, inet_sk(sk
)->inet_num
)) {
7504 inet_sk_set_state(sk
, SCTP_SS_CLOSED
);
7509 sk
->sk_max_ack_backlog
= backlog
;
7510 sctp_hash_endpoint(ep
);
7515 * 4.1.3 / 5.1.3 listen()
7517 * By default, new associations are not accepted for UDP style sockets.
7518 * An application uses listen() to mark a socket as being able to
7519 * accept new associations.
7521 * On TCP style sockets, applications use listen() to ready the SCTP
7522 * endpoint for accepting inbound associations.
7524 * On both types of endpoints a backlog of '0' disables listening.
7526 * Move a socket to LISTENING state.
7528 int sctp_inet_listen(struct socket
*sock
, int backlog
)
7530 struct sock
*sk
= sock
->sk
;
7531 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
7534 if (unlikely(backlog
< 0))
7539 /* Peeled-off sockets are not allowed to listen(). */
7540 if (sctp_style(sk
, UDP_HIGH_BANDWIDTH
))
7543 if (sock
->state
!= SS_UNCONNECTED
)
7546 if (!sctp_sstate(sk
, LISTENING
) && !sctp_sstate(sk
, CLOSED
))
7549 /* If backlog is zero, disable listening. */
7551 if (sctp_sstate(sk
, CLOSED
))
7555 sctp_unhash_endpoint(ep
);
7556 sk
->sk_state
= SCTP_SS_CLOSED
;
7558 sctp_sk(sk
)->bind_hash
->fastreuse
= 1;
7562 /* If we are already listening, just update the backlog */
7563 if (sctp_sstate(sk
, LISTENING
))
7564 sk
->sk_max_ack_backlog
= backlog
;
7566 err
= sctp_listen_start(sk
, backlog
);
7578 * This function is done by modeling the current datagram_poll() and the
7579 * tcp_poll(). Note that, based on these implementations, we don't
7580 * lock the socket in this function, even though it seems that,
7581 * ideally, locking or some other mechanisms can be used to ensure
7582 * the integrity of the counters (sndbuf and wmem_alloc) used
7583 * in this place. We assume that we don't need locks either until proven
7586 * Another thing to note is that we include the Async I/O support
7587 * here, again, by modeling the current TCP/UDP code. We don't have
7588 * a good way to test with it yet.
7590 __poll_t
sctp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
7592 struct sock
*sk
= sock
->sk
;
7593 struct sctp_sock
*sp
= sctp_sk(sk
);
7596 poll_wait(file
, sk_sleep(sk
), wait
);
7598 sock_rps_record_flow(sk
);
7600 /* A TCP-style listening socket becomes readable when the accept queue
7603 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))
7604 return (!list_empty(&sp
->ep
->asocs
)) ?
7605 (EPOLLIN
| EPOLLRDNORM
) : 0;
7609 /* Is there any exceptional events? */
7610 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
7612 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? EPOLLPRI
: 0);
7613 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
7614 mask
|= EPOLLRDHUP
| EPOLLIN
| EPOLLRDNORM
;
7615 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
7618 /* Is it readable? Reconsider this code with TCP-style support. */
7619 if (!skb_queue_empty(&sk
->sk_receive_queue
))
7620 mask
|= EPOLLIN
| EPOLLRDNORM
;
7622 /* The association is either gone or not ready. */
7623 if (!sctp_style(sk
, UDP
) && sctp_sstate(sk
, CLOSED
))
7626 /* Is it writable? */
7627 if (sctp_writeable(sk
)) {
7628 mask
|= EPOLLOUT
| EPOLLWRNORM
;
7630 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
7632 * Since the socket is not locked, the buffer
7633 * might be made available after the writeable check and
7634 * before the bit is set. This could cause a lost I/O
7635 * signal. tcp_poll() has a race breaker for this race
7636 * condition. Based on their implementation, we put
7637 * in the following code to cover it as well.
7639 if (sctp_writeable(sk
))
7640 mask
|= EPOLLOUT
| EPOLLWRNORM
;
7645 /********************************************************************
7646 * 2nd Level Abstractions
7647 ********************************************************************/
7649 static struct sctp_bind_bucket
*sctp_bucket_create(
7650 struct sctp_bind_hashbucket
*head
, struct net
*net
, unsigned short snum
)
7652 struct sctp_bind_bucket
*pp
;
7654 pp
= kmem_cache_alloc(sctp_bucket_cachep
, GFP_ATOMIC
);
7656 SCTP_DBG_OBJCNT_INC(bind_bucket
);
7659 INIT_HLIST_HEAD(&pp
->owner
);
7661 hlist_add_head(&pp
->node
, &head
->chain
);
7666 /* Caller must hold hashbucket lock for this tb with local BH disabled */
7667 static void sctp_bucket_destroy(struct sctp_bind_bucket
*pp
)
7669 if (pp
&& hlist_empty(&pp
->owner
)) {
7670 __hlist_del(&pp
->node
);
7671 kmem_cache_free(sctp_bucket_cachep
, pp
);
7672 SCTP_DBG_OBJCNT_DEC(bind_bucket
);
7676 /* Release this socket's reference to a local port. */
7677 static inline void __sctp_put_port(struct sock
*sk
)
7679 struct sctp_bind_hashbucket
*head
=
7680 &sctp_port_hashtable
[sctp_phashfn(sock_net(sk
),
7681 inet_sk(sk
)->inet_num
)];
7682 struct sctp_bind_bucket
*pp
;
7684 spin_lock(&head
->lock
);
7685 pp
= sctp_sk(sk
)->bind_hash
;
7686 __sk_del_bind_node(sk
);
7687 sctp_sk(sk
)->bind_hash
= NULL
;
7688 inet_sk(sk
)->inet_num
= 0;
7689 sctp_bucket_destroy(pp
);
7690 spin_unlock(&head
->lock
);
7693 void sctp_put_port(struct sock
*sk
)
7696 __sctp_put_port(sk
);
7701 * The system picks an ephemeral port and choose an address set equivalent
7702 * to binding with a wildcard address.
7703 * One of those addresses will be the primary address for the association.
7704 * This automatically enables the multihoming capability of SCTP.
7706 static int sctp_autobind(struct sock
*sk
)
7708 union sctp_addr autoaddr
;
7712 /* Initialize a local sockaddr structure to INADDR_ANY. */
7713 af
= sctp_sk(sk
)->pf
->af
;
7715 port
= htons(inet_sk(sk
)->inet_num
);
7716 af
->inaddr_any(&autoaddr
, port
);
7718 return sctp_do_bind(sk
, &autoaddr
, af
->sockaddr_len
);
7721 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
7724 * 4.2 The cmsghdr Structure *
7726 * When ancillary data is sent or received, any number of ancillary data
7727 * objects can be specified by the msg_control and msg_controllen members of
7728 * the msghdr structure, because each object is preceded by
7729 * a cmsghdr structure defining the object's length (the cmsg_len member).
7730 * Historically Berkeley-derived implementations have passed only one object
7731 * at a time, but this API allows multiple objects to be
7732 * passed in a single call to sendmsg() or recvmsg(). The following example
7733 * shows two ancillary data objects in a control buffer.
7735 * |<--------------------------- msg_controllen -------------------------->|
7738 * |<----- ancillary data object ----->|<----- ancillary data object ----->|
7740 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
7743 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
7745 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
7748 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
7749 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
7751 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
7753 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
7760 static int sctp_msghdr_parse(const struct msghdr
*msg
, struct sctp_cmsgs
*cmsgs
)
7762 struct msghdr
*my_msg
= (struct msghdr
*)msg
;
7763 struct cmsghdr
*cmsg
;
7765 for_each_cmsghdr(cmsg
, my_msg
) {
7766 if (!CMSG_OK(my_msg
, cmsg
))
7769 /* Should we parse this header or ignore? */
7770 if (cmsg
->cmsg_level
!= IPPROTO_SCTP
)
7773 /* Strictly check lengths following example in SCM code. */
7774 switch (cmsg
->cmsg_type
) {
7776 /* SCTP Socket API Extension
7777 * 5.3.1 SCTP Initiation Structure (SCTP_INIT)
7779 * This cmsghdr structure provides information for
7780 * initializing new SCTP associations with sendmsg().
7781 * The SCTP_INITMSG socket option uses this same data
7782 * structure. This structure is not used for
7785 * cmsg_level cmsg_type cmsg_data[]
7786 * ------------ ------------ ----------------------
7787 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
7789 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_initmsg
)))
7792 cmsgs
->init
= CMSG_DATA(cmsg
);
7796 /* SCTP Socket API Extension
7797 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
7799 * This cmsghdr structure specifies SCTP options for
7800 * sendmsg() and describes SCTP header information
7801 * about a received message through recvmsg().
7803 * cmsg_level cmsg_type cmsg_data[]
7804 * ------------ ------------ ----------------------
7805 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
7807 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_sndrcvinfo
)))
7810 cmsgs
->srinfo
= CMSG_DATA(cmsg
);
7812 if (cmsgs
->srinfo
->sinfo_flags
&
7813 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
7814 SCTP_SACK_IMMEDIATELY
| SCTP_PR_SCTP_MASK
|
7815 SCTP_ABORT
| SCTP_EOF
))
7820 /* SCTP Socket API Extension
7821 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
7823 * This cmsghdr structure specifies SCTP options for
7824 * sendmsg(). This structure and SCTP_RCVINFO replaces
7825 * SCTP_SNDRCV which has been deprecated.
7827 * cmsg_level cmsg_type cmsg_data[]
7828 * ------------ ------------ ---------------------
7829 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo
7831 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_sndinfo
)))
7834 cmsgs
->sinfo
= CMSG_DATA(cmsg
);
7836 if (cmsgs
->sinfo
->snd_flags
&
7837 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
7838 SCTP_SACK_IMMEDIATELY
| SCTP_PR_SCTP_MASK
|
7839 SCTP_ABORT
| SCTP_EOF
))
7851 * Wait for a packet..
7852 * Note: This function is the same function as in core/datagram.c
7853 * with a few modifications to make lksctp work.
7855 static int sctp_wait_for_packet(struct sock
*sk
, int *err
, long *timeo_p
)
7860 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
7862 /* Socket errors? */
7863 error
= sock_error(sk
);
7867 if (!skb_queue_empty(&sk
->sk_receive_queue
))
7870 /* Socket shut down? */
7871 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
7874 /* Sequenced packets can come disconnected. If so we report the
7879 /* Is there a good reason to think that we may receive some data? */
7880 if (list_empty(&sctp_sk(sk
)->ep
->asocs
) && !sctp_sstate(sk
, LISTENING
))
7883 /* Handle signals. */
7884 if (signal_pending(current
))
7887 /* Let another process have a go. Since we are going to sleep
7888 * anyway. Note: This may cause odd behaviors if the message
7889 * does not fit in the user's buffer, but this seems to be the
7890 * only way to honor MSG_DONTWAIT realistically.
7893 *timeo_p
= schedule_timeout(*timeo_p
);
7897 finish_wait(sk_sleep(sk
), &wait
);
7901 error
= sock_intr_errno(*timeo_p
);
7904 finish_wait(sk_sleep(sk
), &wait
);
7909 /* Receive a datagram.
7910 * Note: This is pretty much the same routine as in core/datagram.c
7911 * with a few changes to make lksctp work.
7913 struct sk_buff
*sctp_skb_recv_datagram(struct sock
*sk
, int flags
,
7914 int noblock
, int *err
)
7917 struct sk_buff
*skb
;
7920 timeo
= sock_rcvtimeo(sk
, noblock
);
7922 pr_debug("%s: timeo:%ld, max:%ld\n", __func__
, timeo
,
7923 MAX_SCHEDULE_TIMEOUT
);
7926 /* Again only user level code calls this function,
7927 * so nothing interrupt level
7928 * will suddenly eat the receive_queue.
7930 * Look at current nfs client by the way...
7931 * However, this function was correct in any case. 8)
7933 if (flags
& MSG_PEEK
) {
7934 skb
= skb_peek(&sk
->sk_receive_queue
);
7936 refcount_inc(&skb
->users
);
7938 skb
= __skb_dequeue(&sk
->sk_receive_queue
);
7944 /* Caller is allowed not to check sk->sk_err before calling. */
7945 error
= sock_error(sk
);
7949 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
7952 if (sk_can_busy_loop(sk
)) {
7953 sk_busy_loop(sk
, noblock
);
7955 if (!skb_queue_empty(&sk
->sk_receive_queue
))
7959 /* User doesn't want to wait. */
7963 } while (sctp_wait_for_packet(sk
, err
, &timeo
) == 0);
7972 /* If sndbuf has changed, wake up per association sndbuf waiters. */
7973 static void __sctp_write_space(struct sctp_association
*asoc
)
7975 struct sock
*sk
= asoc
->base
.sk
;
7977 if (sctp_wspace(asoc
) <= 0)
7980 if (waitqueue_active(&asoc
->wait
))
7981 wake_up_interruptible(&asoc
->wait
);
7983 if (sctp_writeable(sk
)) {
7984 struct socket_wq
*wq
;
7987 wq
= rcu_dereference(sk
->sk_wq
);
7989 if (waitqueue_active(&wq
->wait
))
7990 wake_up_interruptible(&wq
->wait
);
7992 /* Note that we try to include the Async I/O support
7993 * here by modeling from the current TCP/UDP code.
7994 * We have not tested with it yet.
7996 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
))
7997 sock_wake_async(wq
, SOCK_WAKE_SPACE
, POLL_OUT
);
8003 static void sctp_wake_up_waiters(struct sock
*sk
,
8004 struct sctp_association
*asoc
)
8006 struct sctp_association
*tmp
= asoc
;
8008 /* We do accounting for the sndbuf space per association,
8009 * so we only need to wake our own association.
8011 if (asoc
->ep
->sndbuf_policy
)
8012 return __sctp_write_space(asoc
);
8014 /* If association goes down and is just flushing its
8015 * outq, then just normally notify others.
8017 if (asoc
->base
.dead
)
8018 return sctp_write_space(sk
);
8020 /* Accounting for the sndbuf space is per socket, so we
8021 * need to wake up others, try to be fair and in case of
8022 * other associations, let them have a go first instead
8023 * of just doing a sctp_write_space() call.
8025 * Note that we reach sctp_wake_up_waiters() only when
8026 * associations free up queued chunks, thus we are under
8027 * lock and the list of associations on a socket is
8028 * guaranteed not to change.
8030 for (tmp
= list_next_entry(tmp
, asocs
); 1;
8031 tmp
= list_next_entry(tmp
, asocs
)) {
8032 /* Manually skip the head element. */
8033 if (&tmp
->asocs
== &((sctp_sk(sk
))->ep
->asocs
))
8035 /* Wake up association. */
8036 __sctp_write_space(tmp
);
8037 /* We've reached the end. */
8043 /* Do accounting for the sndbuf space.
8044 * Decrement the used sndbuf space of the corresponding association by the
8045 * data size which was just transmitted(freed).
8047 static void sctp_wfree(struct sk_buff
*skb
)
8049 struct sctp_chunk
*chunk
= skb_shinfo(skb
)->destructor_arg
;
8050 struct sctp_association
*asoc
= chunk
->asoc
;
8051 struct sock
*sk
= asoc
->base
.sk
;
8053 asoc
->sndbuf_used
-= SCTP_DATA_SNDSIZE(chunk
) +
8054 sizeof(struct sk_buff
) +
8055 sizeof(struct sctp_chunk
);
8057 WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk
), &sk
->sk_wmem_alloc
));
8060 * This undoes what is done via sctp_set_owner_w and sk_mem_charge
8062 sk
->sk_wmem_queued
-= skb
->truesize
;
8063 sk_mem_uncharge(sk
, skb
->truesize
);
8066 sctp_wake_up_waiters(sk
, asoc
);
8068 sctp_association_put(asoc
);
8071 /* Do accounting for the receive space on the socket.
8072 * Accounting for the association is done in ulpevent.c
8073 * We set this as a destructor for the cloned data skbs so that
8074 * accounting is done at the correct time.
8076 void sctp_sock_rfree(struct sk_buff
*skb
)
8078 struct sock
*sk
= skb
->sk
;
8079 struct sctp_ulpevent
*event
= sctp_skb2event(skb
);
8081 atomic_sub(event
->rmem_len
, &sk
->sk_rmem_alloc
);
8084 * Mimic the behavior of sock_rfree
8086 sk_mem_uncharge(sk
, event
->rmem_len
);
8090 /* Helper function to wait for space in the sndbuf. */
8091 static int sctp_wait_for_sndbuf(struct sctp_association
*asoc
, long *timeo_p
,
8094 struct sock
*sk
= asoc
->base
.sk
;
8095 long current_timeo
= *timeo_p
;
8099 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__
, asoc
,
8102 /* Increment the association's refcnt. */
8103 sctp_association_hold(asoc
);
8105 /* Wait on the association specific sndbuf space. */
8107 prepare_to_wait_exclusive(&asoc
->wait
, &wait
,
8108 TASK_INTERRUPTIBLE
);
8109 if (asoc
->base
.dead
)
8113 if (sk
->sk_err
|| asoc
->state
>= SCTP_STATE_SHUTDOWN_PENDING
)
8115 if (signal_pending(current
))
8116 goto do_interrupted
;
8117 if (msg_len
<= sctp_wspace(asoc
))
8120 /* Let another process have a go. Since we are going
8124 current_timeo
= schedule_timeout(current_timeo
);
8126 if (sk
!= asoc
->base
.sk
)
8129 *timeo_p
= current_timeo
;
8133 finish_wait(&asoc
->wait
, &wait
);
8135 /* Release the association's refcnt. */
8136 sctp_association_put(asoc
);
8149 err
= sock_intr_errno(*timeo_p
);
8157 void sctp_data_ready(struct sock
*sk
)
8159 struct socket_wq
*wq
;
8162 wq
= rcu_dereference(sk
->sk_wq
);
8163 if (skwq_has_sleeper(wq
))
8164 wake_up_interruptible_sync_poll(&wq
->wait
, EPOLLIN
|
8165 EPOLLRDNORM
| EPOLLRDBAND
);
8166 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
8170 /* If socket sndbuf has changed, wake up all per association waiters. */
8171 void sctp_write_space(struct sock
*sk
)
8173 struct sctp_association
*asoc
;
8175 /* Wake up the tasks in each wait queue. */
8176 list_for_each_entry(asoc
, &((sctp_sk(sk
))->ep
->asocs
), asocs
) {
8177 __sctp_write_space(asoc
);
8181 /* Is there any sndbuf space available on the socket?
8183 * Note that sk_wmem_alloc is the sum of the send buffers on all of the
8184 * associations on the same socket. For a UDP-style socket with
8185 * multiple associations, it is possible for it to be "unwriteable"
8186 * prematurely. I assume that this is acceptable because
8187 * a premature "unwriteable" is better than an accidental "writeable" which
8188 * would cause an unwanted block under certain circumstances. For the 1-1
8189 * UDP-style sockets or TCP-style sockets, this code should work.
8192 static int sctp_writeable(struct sock
*sk
)
8196 amt
= sk
->sk_sndbuf
- sk_wmem_alloc_get(sk
);
8202 /* Wait for an association to go into ESTABLISHED state. If timeout is 0,
8203 * returns immediately with EINPROGRESS.
8205 static int sctp_wait_for_connect(struct sctp_association
*asoc
, long *timeo_p
)
8207 struct sock
*sk
= asoc
->base
.sk
;
8209 long current_timeo
= *timeo_p
;
8212 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__
, asoc
, *timeo_p
);
8214 /* Increment the association's refcnt. */
8215 sctp_association_hold(asoc
);
8218 prepare_to_wait_exclusive(&asoc
->wait
, &wait
,
8219 TASK_INTERRUPTIBLE
);
8222 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
8224 if (sk
->sk_err
|| asoc
->state
>= SCTP_STATE_SHUTDOWN_PENDING
||
8227 if (signal_pending(current
))
8228 goto do_interrupted
;
8230 if (sctp_state(asoc
, ESTABLISHED
))
8233 /* Let another process have a go. Since we are going
8237 current_timeo
= schedule_timeout(current_timeo
);
8240 *timeo_p
= current_timeo
;
8244 finish_wait(&asoc
->wait
, &wait
);
8246 /* Release the association's refcnt. */
8247 sctp_association_put(asoc
);
8252 if (asoc
->init_err_counter
+ 1 > asoc
->max_init_attempts
)
8255 err
= -ECONNREFUSED
;
8259 err
= sock_intr_errno(*timeo_p
);
8267 static int sctp_wait_for_accept(struct sock
*sk
, long timeo
)
8269 struct sctp_endpoint
*ep
;
8273 ep
= sctp_sk(sk
)->ep
;
8277 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
,
8278 TASK_INTERRUPTIBLE
);
8280 if (list_empty(&ep
->asocs
)) {
8282 timeo
= schedule_timeout(timeo
);
8287 if (!sctp_sstate(sk
, LISTENING
))
8291 if (!list_empty(&ep
->asocs
))
8294 err
= sock_intr_errno(timeo
);
8295 if (signal_pending(current
))
8303 finish_wait(sk_sleep(sk
), &wait
);
8308 static void sctp_wait_for_close(struct sock
*sk
, long timeout
)
8313 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
8314 if (list_empty(&sctp_sk(sk
)->ep
->asocs
))
8317 timeout
= schedule_timeout(timeout
);
8319 } while (!signal_pending(current
) && timeout
);
8321 finish_wait(sk_sleep(sk
), &wait
);
8324 static void sctp_skb_set_owner_r_frag(struct sk_buff
*skb
, struct sock
*sk
)
8326 struct sk_buff
*frag
;
8331 /* Don't forget the fragments. */
8332 skb_walk_frags(skb
, frag
)
8333 sctp_skb_set_owner_r_frag(frag
, sk
);
8336 sctp_skb_set_owner_r(skb
, sk
);
8339 void sctp_copy_sock(struct sock
*newsk
, struct sock
*sk
,
8340 struct sctp_association
*asoc
)
8342 struct inet_sock
*inet
= inet_sk(sk
);
8343 struct inet_sock
*newinet
;
8345 newsk
->sk_type
= sk
->sk_type
;
8346 newsk
->sk_bound_dev_if
= sk
->sk_bound_dev_if
;
8347 newsk
->sk_flags
= sk
->sk_flags
;
8348 newsk
->sk_tsflags
= sk
->sk_tsflags
;
8349 newsk
->sk_no_check_tx
= sk
->sk_no_check_tx
;
8350 newsk
->sk_no_check_rx
= sk
->sk_no_check_rx
;
8351 newsk
->sk_reuse
= sk
->sk_reuse
;
8353 newsk
->sk_shutdown
= sk
->sk_shutdown
;
8354 newsk
->sk_destruct
= sctp_destruct_sock
;
8355 newsk
->sk_family
= sk
->sk_family
;
8356 newsk
->sk_protocol
= IPPROTO_SCTP
;
8357 newsk
->sk_backlog_rcv
= sk
->sk_prot
->backlog_rcv
;
8358 newsk
->sk_sndbuf
= sk
->sk_sndbuf
;
8359 newsk
->sk_rcvbuf
= sk
->sk_rcvbuf
;
8360 newsk
->sk_lingertime
= sk
->sk_lingertime
;
8361 newsk
->sk_rcvtimeo
= sk
->sk_rcvtimeo
;
8362 newsk
->sk_sndtimeo
= sk
->sk_sndtimeo
;
8363 newsk
->sk_rxhash
= sk
->sk_rxhash
;
8365 newinet
= inet_sk(newsk
);
8367 /* Initialize sk's sport, dport, rcv_saddr and daddr for
8368 * getsockname() and getpeername()
8370 newinet
->inet_sport
= inet
->inet_sport
;
8371 newinet
->inet_saddr
= inet
->inet_saddr
;
8372 newinet
->inet_rcv_saddr
= inet
->inet_rcv_saddr
;
8373 newinet
->inet_dport
= htons(asoc
->peer
.port
);
8374 newinet
->pmtudisc
= inet
->pmtudisc
;
8375 newinet
->inet_id
= asoc
->next_tsn
^ jiffies
;
8377 newinet
->uc_ttl
= inet
->uc_ttl
;
8378 newinet
->mc_loop
= 1;
8379 newinet
->mc_ttl
= 1;
8380 newinet
->mc_index
= 0;
8381 newinet
->mc_list
= NULL
;
8383 if (newsk
->sk_flags
& SK_FLAGS_TIMESTAMP
)
8384 net_enable_timestamp();
8386 security_sk_clone(sk
, newsk
);
8389 static inline void sctp_copy_descendant(struct sock
*sk_to
,
8390 const struct sock
*sk_from
)
8392 int ancestor_size
= sizeof(struct inet_sock
) +
8393 sizeof(struct sctp_sock
) -
8394 offsetof(struct sctp_sock
, auto_asconf_list
);
8396 if (sk_from
->sk_family
== PF_INET6
)
8397 ancestor_size
+= sizeof(struct ipv6_pinfo
);
8399 __inet_sk_copy_descendant(sk_to
, sk_from
, ancestor_size
);
8402 /* Populate the fields of the newsk from the oldsk and migrate the assoc
8403 * and its messages to the newsk.
8405 static void sctp_sock_migrate(struct sock
*oldsk
, struct sock
*newsk
,
8406 struct sctp_association
*assoc
,
8407 enum sctp_socket_type type
)
8409 struct sctp_sock
*oldsp
= sctp_sk(oldsk
);
8410 struct sctp_sock
*newsp
= sctp_sk(newsk
);
8411 struct sctp_bind_bucket
*pp
; /* hash list port iterator */
8412 struct sctp_endpoint
*newep
= newsp
->ep
;
8413 struct sk_buff
*skb
, *tmp
;
8414 struct sctp_ulpevent
*event
;
8415 struct sctp_bind_hashbucket
*head
;
8417 /* Migrate socket buffer sizes and all the socket level options to the
8420 newsk
->sk_sndbuf
= oldsk
->sk_sndbuf
;
8421 newsk
->sk_rcvbuf
= oldsk
->sk_rcvbuf
;
8422 /* Brute force copy old sctp opt. */
8423 sctp_copy_descendant(newsk
, oldsk
);
8425 /* Restore the ep value that was overwritten with the above structure
8431 /* Hook this new socket in to the bind_hash list. */
8432 head
= &sctp_port_hashtable
[sctp_phashfn(sock_net(oldsk
),
8433 inet_sk(oldsk
)->inet_num
)];
8434 spin_lock_bh(&head
->lock
);
8435 pp
= sctp_sk(oldsk
)->bind_hash
;
8436 sk_add_bind_node(newsk
, &pp
->owner
);
8437 sctp_sk(newsk
)->bind_hash
= pp
;
8438 inet_sk(newsk
)->inet_num
= inet_sk(oldsk
)->inet_num
;
8439 spin_unlock_bh(&head
->lock
);
8441 /* Copy the bind_addr list from the original endpoint to the new
8442 * endpoint so that we can handle restarts properly
8444 sctp_bind_addr_dup(&newsp
->ep
->base
.bind_addr
,
8445 &oldsp
->ep
->base
.bind_addr
, GFP_KERNEL
);
8447 /* Move any messages in the old socket's receive queue that are for the
8448 * peeled off association to the new socket's receive queue.
8450 sctp_skb_for_each(skb
, &oldsk
->sk_receive_queue
, tmp
) {
8451 event
= sctp_skb2event(skb
);
8452 if (event
->asoc
== assoc
) {
8453 __skb_unlink(skb
, &oldsk
->sk_receive_queue
);
8454 __skb_queue_tail(&newsk
->sk_receive_queue
, skb
);
8455 sctp_skb_set_owner_r_frag(skb
, newsk
);
8459 /* Clean up any messages pending delivery due to partial
8460 * delivery. Three cases:
8461 * 1) No partial deliver; no work.
8462 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
8463 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
8465 skb_queue_head_init(&newsp
->pd_lobby
);
8466 atomic_set(&sctp_sk(newsk
)->pd_mode
, assoc
->ulpq
.pd_mode
);
8468 if (atomic_read(&sctp_sk(oldsk
)->pd_mode
)) {
8469 struct sk_buff_head
*queue
;
8471 /* Decide which queue to move pd_lobby skbs to. */
8472 if (assoc
->ulpq
.pd_mode
) {
8473 queue
= &newsp
->pd_lobby
;
8475 queue
= &newsk
->sk_receive_queue
;
8477 /* Walk through the pd_lobby, looking for skbs that
8478 * need moved to the new socket.
8480 sctp_skb_for_each(skb
, &oldsp
->pd_lobby
, tmp
) {
8481 event
= sctp_skb2event(skb
);
8482 if (event
->asoc
== assoc
) {
8483 __skb_unlink(skb
, &oldsp
->pd_lobby
);
8484 __skb_queue_tail(queue
, skb
);
8485 sctp_skb_set_owner_r_frag(skb
, newsk
);
8489 /* Clear up any skbs waiting for the partial
8490 * delivery to finish.
8492 if (assoc
->ulpq
.pd_mode
)
8493 sctp_clear_pd(oldsk
, NULL
);
8497 sctp_for_each_rx_skb(assoc
, newsk
, sctp_skb_set_owner_r_frag
);
8499 /* Set the type of socket to indicate that it is peeled off from the
8500 * original UDP-style socket or created with the accept() call on a
8501 * TCP-style socket..
8505 /* Mark the new socket "in-use" by the user so that any packets
8506 * that may arrive on the association after we've moved it are
8507 * queued to the backlog. This prevents a potential race between
8508 * backlog processing on the old socket and new-packet processing
8509 * on the new socket.
8511 * The caller has just allocated newsk so we can guarantee that other
8512 * paths won't try to lock it and then oldsk.
8514 lock_sock_nested(newsk
, SINGLE_DEPTH_NESTING
);
8515 sctp_for_each_tx_datachunk(assoc
, sctp_clear_owner_w
);
8516 sctp_assoc_migrate(assoc
, newsk
);
8517 sctp_for_each_tx_datachunk(assoc
, sctp_set_owner_w
);
8519 /* If the association on the newsk is already closed before accept()
8520 * is called, set RCV_SHUTDOWN flag.
8522 if (sctp_state(assoc
, CLOSED
) && sctp_style(newsk
, TCP
)) {
8523 inet_sk_set_state(newsk
, SCTP_SS_CLOSED
);
8524 newsk
->sk_shutdown
|= RCV_SHUTDOWN
;
8526 inet_sk_set_state(newsk
, SCTP_SS_ESTABLISHED
);
8529 release_sock(newsk
);
8533 /* This proto struct describes the ULP interface for SCTP. */
8534 struct proto sctp_prot
= {
8536 .owner
= THIS_MODULE
,
8537 .close
= sctp_close
,
8538 .connect
= sctp_connect
,
8539 .disconnect
= sctp_disconnect
,
8540 .accept
= sctp_accept
,
8541 .ioctl
= sctp_ioctl
,
8542 .init
= sctp_init_sock
,
8543 .destroy
= sctp_destroy_sock
,
8544 .shutdown
= sctp_shutdown
,
8545 .setsockopt
= sctp_setsockopt
,
8546 .getsockopt
= sctp_getsockopt
,
8547 .sendmsg
= sctp_sendmsg
,
8548 .recvmsg
= sctp_recvmsg
,
8550 .backlog_rcv
= sctp_backlog_rcv
,
8552 .unhash
= sctp_unhash
,
8553 .get_port
= sctp_get_port
,
8554 .obj_size
= sizeof(struct sctp_sock
),
8555 .useroffset
= offsetof(struct sctp_sock
, subscribe
),
8556 .usersize
= offsetof(struct sctp_sock
, initmsg
) -
8557 offsetof(struct sctp_sock
, subscribe
) +
8558 sizeof_field(struct sctp_sock
, initmsg
),
8559 .sysctl_mem
= sysctl_sctp_mem
,
8560 .sysctl_rmem
= sysctl_sctp_rmem
,
8561 .sysctl_wmem
= sysctl_sctp_wmem
,
8562 .memory_pressure
= &sctp_memory_pressure
,
8563 .enter_memory_pressure
= sctp_enter_memory_pressure
,
8564 .memory_allocated
= &sctp_memory_allocated
,
8565 .sockets_allocated
= &sctp_sockets_allocated
,
8568 #if IS_ENABLED(CONFIG_IPV6)
8570 #include <net/transp_v6.h>
8571 static void sctp_v6_destroy_sock(struct sock
*sk
)
8573 sctp_destroy_sock(sk
);
8574 inet6_destroy_sock(sk
);
8577 struct proto sctpv6_prot
= {
8579 .owner
= THIS_MODULE
,
8580 .close
= sctp_close
,
8581 .connect
= sctp_connect
,
8582 .disconnect
= sctp_disconnect
,
8583 .accept
= sctp_accept
,
8584 .ioctl
= sctp_ioctl
,
8585 .init
= sctp_init_sock
,
8586 .destroy
= sctp_v6_destroy_sock
,
8587 .shutdown
= sctp_shutdown
,
8588 .setsockopt
= sctp_setsockopt
,
8589 .getsockopt
= sctp_getsockopt
,
8590 .sendmsg
= sctp_sendmsg
,
8591 .recvmsg
= sctp_recvmsg
,
8593 .backlog_rcv
= sctp_backlog_rcv
,
8595 .unhash
= sctp_unhash
,
8596 .get_port
= sctp_get_port
,
8597 .obj_size
= sizeof(struct sctp6_sock
),
8598 .useroffset
= offsetof(struct sctp6_sock
, sctp
.subscribe
),
8599 .usersize
= offsetof(struct sctp6_sock
, sctp
.initmsg
) -
8600 offsetof(struct sctp6_sock
, sctp
.subscribe
) +
8601 sizeof_field(struct sctp6_sock
, sctp
.initmsg
),
8602 .sysctl_mem
= sysctl_sctp_mem
,
8603 .sysctl_rmem
= sysctl_sctp_rmem
,
8604 .sysctl_wmem
= sysctl_sctp_wmem
,
8605 .memory_pressure
= &sctp_memory_pressure
,
8606 .enter_memory_pressure
= sctp_enter_memory_pressure
,
8607 .memory_allocated
= &sctp_memory_allocated
,
8608 .sockets_allocated
= &sctp_sockets_allocated
,
8610 #endif /* IS_ENABLED(CONFIG_IPV6) */