1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 * Copyright (c) 2001-2003 Intel Corp.
7 * Copyright (c) 2001-2002 Nokia, Inc.
8 * Copyright (c) 2001 La Monte H.P. Yarroll
10 * This file is part of the SCTP kernel implementation
12 * These functions interface with the sockets layer to implement the
13 * SCTP Extensions for the Sockets API.
15 * Note that the descriptions from the specification are USER level
16 * functions--this file is the functions which populate the struct proto
17 * for SCTP which is the BOTTOM of the sockets interface.
19 * Please send any bug reports or fixes you make to the
21 * lksctp developers <linux-sctp@vger.kernel.org>
23 * Written or modified by:
24 * La Monte H.P. Yarroll <piggy@acm.org>
25 * Narasimha Budihal <narsi@refcode.org>
26 * Karl Knutson <karl@athena.chicago.il.us>
27 * Jon Grimm <jgrimm@us.ibm.com>
28 * Xingang Guo <xingang.guo@intel.com>
29 * Daisy Chang <daisyc@us.ibm.com>
30 * Sridhar Samudrala <samudrala@us.ibm.com>
31 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
32 * Ardelle Fan <ardelle.fan@intel.com>
33 * Ryan Layer <rmlayer@us.ibm.com>
34 * Anup Pemmaiah <pemmaiah@cc.usu.edu>
35 * Kevin Gao <kevin.gao@intel.com>
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <crypto/hash.h>
41 #include <linux/types.h>
42 #include <linux/kernel.h>
43 #include <linux/wait.h>
44 #include <linux/time.h>
45 #include <linux/sched/signal.h>
47 #include <linux/capability.h>
48 #include <linux/fcntl.h>
49 #include <linux/poll.h>
50 #include <linux/init.h>
51 #include <linux/slab.h>
52 #include <linux/file.h>
53 #include <linux/compat.h>
54 #include <linux/rhashtable.h>
58 #include <net/route.h>
60 #include <net/inet_common.h>
61 #include <net/busy_poll.h>
63 #include <linux/socket.h> /* for sa_family_t */
64 #include <linux/export.h>
66 #include <net/sctp/sctp.h>
67 #include <net/sctp/sm.h>
68 #include <net/sctp/stream_sched.h>
70 /* Forward declarations for internal helper functions. */
71 static bool sctp_writeable(struct sock
*sk
);
72 static void sctp_wfree(struct sk_buff
*skb
);
73 static int sctp_wait_for_sndbuf(struct sctp_association
*asoc
, long *timeo_p
,
75 static int sctp_wait_for_packet(struct sock
*sk
, int *err
, long *timeo_p
);
76 static int sctp_wait_for_connect(struct sctp_association
*, long *timeo_p
);
77 static int sctp_wait_for_accept(struct sock
*sk
, long timeo
);
78 static void sctp_wait_for_close(struct sock
*sk
, long timeo
);
79 static void sctp_destruct_sock(struct sock
*sk
);
80 static struct sctp_af
*sctp_sockaddr_af(struct sctp_sock
*opt
,
81 union sctp_addr
*addr
, int len
);
82 static int sctp_bindx_add(struct sock
*, struct sockaddr
*, int);
83 static int sctp_bindx_rem(struct sock
*, struct sockaddr
*, int);
84 static int sctp_send_asconf_add_ip(struct sock
*, struct sockaddr
*, int);
85 static int sctp_send_asconf_del_ip(struct sock
*, struct sockaddr
*, int);
86 static int sctp_send_asconf(struct sctp_association
*asoc
,
87 struct sctp_chunk
*chunk
);
88 static int sctp_do_bind(struct sock
*, union sctp_addr
*, int);
89 static int sctp_autobind(struct sock
*sk
);
90 static int sctp_sock_migrate(struct sock
*oldsk
, struct sock
*newsk
,
91 struct sctp_association
*assoc
,
92 enum sctp_socket_type type
);
94 static unsigned long sctp_memory_pressure
;
95 static atomic_long_t sctp_memory_allocated
;
96 struct percpu_counter sctp_sockets_allocated
;
98 static void sctp_enter_memory_pressure(struct sock
*sk
)
100 sctp_memory_pressure
= 1;
104 /* Get the sndbuf space available at the time on the association. */
105 static inline int sctp_wspace(struct sctp_association
*asoc
)
107 struct sock
*sk
= asoc
->base
.sk
;
109 return asoc
->ep
->sndbuf_policy
? sk
->sk_sndbuf
- asoc
->sndbuf_used
110 : sk_stream_wspace(sk
);
113 /* Increment the used sndbuf space count of the corresponding association by
114 * the size of the outgoing data chunk.
115 * Also, set the skb destructor for sndbuf accounting later.
117 * Since it is always 1-1 between chunk and skb, and also a new skb is always
118 * allocated for chunk bundling in sctp_packet_transmit(), we can use the
119 * destructor in the data chunk skb for the purpose of the sndbuf space
122 static inline void sctp_set_owner_w(struct sctp_chunk
*chunk
)
124 struct sctp_association
*asoc
= chunk
->asoc
;
125 struct sock
*sk
= asoc
->base
.sk
;
127 /* The sndbuf space is tracked per association. */
128 sctp_association_hold(asoc
);
131 sctp_auth_shkey_hold(chunk
->shkey
);
133 skb_set_owner_w(chunk
->skb
, sk
);
135 chunk
->skb
->destructor
= sctp_wfree
;
136 /* Save the chunk pointer in skb for sctp_wfree to use later. */
137 skb_shinfo(chunk
->skb
)->destructor_arg
= chunk
;
139 refcount_add(sizeof(struct sctp_chunk
), &sk
->sk_wmem_alloc
);
140 asoc
->sndbuf_used
+= chunk
->skb
->truesize
+ sizeof(struct sctp_chunk
);
141 sk
->sk_wmem_queued
+= chunk
->skb
->truesize
+ sizeof(struct sctp_chunk
);
142 sk_mem_charge(sk
, chunk
->skb
->truesize
);
145 static void sctp_clear_owner_w(struct sctp_chunk
*chunk
)
147 skb_orphan(chunk
->skb
);
150 static void sctp_for_each_tx_datachunk(struct sctp_association
*asoc
,
151 void (*cb
)(struct sctp_chunk
*))
154 struct sctp_outq
*q
= &asoc
->outqueue
;
155 struct sctp_transport
*t
;
156 struct sctp_chunk
*chunk
;
158 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
, transports
)
159 list_for_each_entry(chunk
, &t
->transmitted
, transmitted_list
)
162 list_for_each_entry(chunk
, &q
->retransmit
, transmitted_list
)
165 list_for_each_entry(chunk
, &q
->sacked
, transmitted_list
)
168 list_for_each_entry(chunk
, &q
->abandoned
, transmitted_list
)
171 list_for_each_entry(chunk
, &q
->out_chunk_list
, list
)
175 static void sctp_for_each_rx_skb(struct sctp_association
*asoc
, struct sock
*sk
,
176 void (*cb
)(struct sk_buff
*, struct sock
*))
179 struct sk_buff
*skb
, *tmp
;
181 sctp_skb_for_each(skb
, &asoc
->ulpq
.lobby
, tmp
)
184 sctp_skb_for_each(skb
, &asoc
->ulpq
.reasm
, tmp
)
187 sctp_skb_for_each(skb
, &asoc
->ulpq
.reasm_uo
, tmp
)
191 /* Verify that this is a valid address. */
192 static inline int sctp_verify_addr(struct sock
*sk
, union sctp_addr
*addr
,
197 /* Verify basic sockaddr. */
198 af
= sctp_sockaddr_af(sctp_sk(sk
), addr
, len
);
202 /* Is this a valid SCTP address? */
203 if (!af
->addr_valid(addr
, sctp_sk(sk
), NULL
))
206 if (!sctp_sk(sk
)->pf
->send_verify(sctp_sk(sk
), (addr
)))
212 /* Look up the association by its id. If this is not a UDP-style
213 * socket, the ID field is always ignored.
215 struct sctp_association
*sctp_id2assoc(struct sock
*sk
, sctp_assoc_t id
)
217 struct sctp_association
*asoc
= NULL
;
219 /* If this is not a UDP-style socket, assoc id should be ignored. */
220 if (!sctp_style(sk
, UDP
)) {
221 /* Return NULL if the socket state is not ESTABLISHED. It
222 * could be a TCP-style listening socket or a socket which
223 * hasn't yet called connect() to establish an association.
225 if (!sctp_sstate(sk
, ESTABLISHED
) && !sctp_sstate(sk
, CLOSING
))
228 /* Get the first and the only association from the list. */
229 if (!list_empty(&sctp_sk(sk
)->ep
->asocs
))
230 asoc
= list_entry(sctp_sk(sk
)->ep
->asocs
.next
,
231 struct sctp_association
, asocs
);
235 /* Otherwise this is a UDP-style socket. */
236 if (id
<= SCTP_ALL_ASSOC
)
239 spin_lock_bh(&sctp_assocs_id_lock
);
240 asoc
= (struct sctp_association
*)idr_find(&sctp_assocs_id
, (int)id
);
241 if (asoc
&& (asoc
->base
.sk
!= sk
|| asoc
->base
.dead
))
243 spin_unlock_bh(&sctp_assocs_id_lock
);
248 /* Look up the transport from an address and an assoc id. If both address and
249 * id are specified, the associations matching the address and the id should be
252 static struct sctp_transport
*sctp_addr_id2transport(struct sock
*sk
,
253 struct sockaddr_storage
*addr
,
256 struct sctp_association
*addr_asoc
= NULL
, *id_asoc
= NULL
;
257 struct sctp_af
*af
= sctp_get_af_specific(addr
->ss_family
);
258 union sctp_addr
*laddr
= (union sctp_addr
*)addr
;
259 struct sctp_transport
*transport
;
261 if (!af
|| sctp_verify_addr(sk
, laddr
, af
->sockaddr_len
))
264 addr_asoc
= sctp_endpoint_lookup_assoc(sctp_sk(sk
)->ep
,
271 id_asoc
= sctp_id2assoc(sk
, id
);
272 if (id_asoc
&& (id_asoc
!= addr_asoc
))
275 sctp_get_pf_specific(sk
->sk_family
)->addr_to_user(sctp_sk(sk
),
276 (union sctp_addr
*)addr
);
281 /* API 3.1.2 bind() - UDP Style Syntax
282 * The syntax of bind() is,
284 * ret = bind(int sd, struct sockaddr *addr, int addrlen);
286 * sd - the socket descriptor returned by socket().
287 * addr - the address structure (struct sockaddr_in or struct
288 * sockaddr_in6 [RFC 2553]),
289 * addr_len - the size of the address structure.
291 static int sctp_bind(struct sock
*sk
, struct sockaddr
*addr
, int addr_len
)
297 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__
, sk
,
300 /* Disallow binding twice. */
301 if (!sctp_sk(sk
)->ep
->base
.bind_addr
.port
)
302 retval
= sctp_do_bind(sk
, (union sctp_addr
*)addr
,
312 static long sctp_get_port_local(struct sock
*, union sctp_addr
*);
314 /* Verify this is a valid sockaddr. */
315 static struct sctp_af
*sctp_sockaddr_af(struct sctp_sock
*opt
,
316 union sctp_addr
*addr
, int len
)
320 /* Check minimum size. */
321 if (len
< sizeof (struct sockaddr
))
324 if (!opt
->pf
->af_supported(addr
->sa
.sa_family
, opt
))
327 if (addr
->sa
.sa_family
== AF_INET6
) {
328 if (len
< SIN6_LEN_RFC2133
)
330 /* V4 mapped address are really of AF_INET family */
331 if (ipv6_addr_v4mapped(&addr
->v6
.sin6_addr
) &&
332 !opt
->pf
->af_supported(AF_INET
, opt
))
336 /* If we get this far, af is valid. */
337 af
= sctp_get_af_specific(addr
->sa
.sa_family
);
339 if (len
< af
->sockaddr_len
)
345 /* Bind a local address either to an endpoint or to an association. */
346 static int sctp_do_bind(struct sock
*sk
, union sctp_addr
*addr
, int len
)
348 struct net
*net
= sock_net(sk
);
349 struct sctp_sock
*sp
= sctp_sk(sk
);
350 struct sctp_endpoint
*ep
= sp
->ep
;
351 struct sctp_bind_addr
*bp
= &ep
->base
.bind_addr
;
356 /* Common sockaddr verification. */
357 af
= sctp_sockaddr_af(sp
, addr
, len
);
359 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
360 __func__
, sk
, addr
, len
);
364 snum
= ntohs(addr
->v4
.sin_port
);
366 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
367 __func__
, sk
, &addr
->sa
, bp
->port
, snum
, len
);
369 /* PF specific bind() address verification. */
370 if (!sp
->pf
->bind_verify(sp
, addr
))
371 return -EADDRNOTAVAIL
;
373 /* We must either be unbound, or bind to the same port.
374 * It's OK to allow 0 ports if we are already bound.
375 * We'll just inhert an already bound port in this case
380 else if (snum
!= bp
->port
) {
381 pr_debug("%s: new port %d doesn't match existing port "
382 "%d\n", __func__
, snum
, bp
->port
);
387 if (snum
&& snum
< inet_prot_sock(net
) &&
388 !ns_capable(net
->user_ns
, CAP_NET_BIND_SERVICE
))
391 /* See if the address matches any of the addresses we may have
392 * already bound before checking against other endpoints.
394 if (sctp_bind_addr_match(bp
, addr
, sp
))
397 /* Make sure we are allowed to bind here.
398 * The function sctp_get_port_local() does duplicate address
401 addr
->v4
.sin_port
= htons(snum
);
402 if ((ret
= sctp_get_port_local(sk
, addr
))) {
406 /* Refresh ephemeral port. */
408 bp
->port
= inet_sk(sk
)->inet_num
;
410 /* Add the address to the bind address list.
411 * Use GFP_ATOMIC since BHs will be disabled.
413 ret
= sctp_add_bind_addr(bp
, addr
, af
->sockaddr_len
,
414 SCTP_ADDR_SRC
, GFP_ATOMIC
);
416 /* Copy back into socket for getsockname() use. */
418 inet_sk(sk
)->inet_sport
= htons(inet_sk(sk
)->inet_num
);
419 sp
->pf
->to_sk_saddr(addr
, sk
);
425 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
427 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
428 * at any one time. If a sender, after sending an ASCONF chunk, decides
429 * it needs to transfer another ASCONF Chunk, it MUST wait until the
430 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
431 * subsequent ASCONF. Note this restriction binds each side, so at any
432 * time two ASCONF may be in-transit on any given association (one sent
433 * from each endpoint).
435 static int sctp_send_asconf(struct sctp_association
*asoc
,
436 struct sctp_chunk
*chunk
)
438 struct net
*net
= sock_net(asoc
->base
.sk
);
441 /* If there is an outstanding ASCONF chunk, queue it for later
444 if (asoc
->addip_last_asconf
) {
445 list_add_tail(&chunk
->list
, &asoc
->addip_chunk_list
);
449 /* Hold the chunk until an ASCONF_ACK is received. */
450 sctp_chunk_hold(chunk
);
451 retval
= sctp_primitive_ASCONF(net
, asoc
, chunk
);
453 sctp_chunk_free(chunk
);
455 asoc
->addip_last_asconf
= chunk
;
461 /* Add a list of addresses as bind addresses to local endpoint or
464 * Basically run through each address specified in the addrs/addrcnt
465 * array/length pair, determine if it is IPv6 or IPv4 and call
466 * sctp_do_bind() on it.
468 * If any of them fails, then the operation will be reversed and the
469 * ones that were added will be removed.
471 * Only sctp_setsockopt_bindx() is supposed to call this function.
473 static int sctp_bindx_add(struct sock
*sk
, struct sockaddr
*addrs
, int addrcnt
)
478 struct sockaddr
*sa_addr
;
481 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__
, sk
,
485 for (cnt
= 0; cnt
< addrcnt
; cnt
++) {
486 /* The list may contain either IPv4 or IPv6 address;
487 * determine the address length for walking thru the list.
490 af
= sctp_get_af_specific(sa_addr
->sa_family
);
496 retval
= sctp_do_bind(sk
, (union sctp_addr
*)sa_addr
,
499 addr_buf
+= af
->sockaddr_len
;
503 /* Failed. Cleanup the ones that have been added */
505 sctp_bindx_rem(sk
, addrs
, cnt
);
513 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the
514 * associations that are part of the endpoint indicating that a list of local
515 * addresses are added to the endpoint.
517 * If any of the addresses is already in the bind address list of the
518 * association, we do not send the chunk for that association. But it will not
519 * affect other associations.
521 * Only sctp_setsockopt_bindx() is supposed to call this function.
523 static int sctp_send_asconf_add_ip(struct sock
*sk
,
524 struct sockaddr
*addrs
,
527 struct net
*net
= sock_net(sk
);
528 struct sctp_sock
*sp
;
529 struct sctp_endpoint
*ep
;
530 struct sctp_association
*asoc
;
531 struct sctp_bind_addr
*bp
;
532 struct sctp_chunk
*chunk
;
533 struct sctp_sockaddr_entry
*laddr
;
534 union sctp_addr
*addr
;
535 union sctp_addr saveaddr
;
542 if (!net
->sctp
.addip_enable
)
548 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
549 __func__
, sk
, addrs
, addrcnt
);
551 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
552 if (!asoc
->peer
.asconf_capable
)
555 if (asoc
->peer
.addip_disabled_mask
& SCTP_PARAM_ADD_IP
)
558 if (!sctp_state(asoc
, ESTABLISHED
))
561 /* Check if any address in the packed array of addresses is
562 * in the bind address list of the association. If so,
563 * do not send the asconf chunk to its peer, but continue with
564 * other associations.
567 for (i
= 0; i
< addrcnt
; i
++) {
569 af
= sctp_get_af_specific(addr
->v4
.sin_family
);
575 if (sctp_assoc_lookup_laddr(asoc
, addr
))
578 addr_buf
+= af
->sockaddr_len
;
583 /* Use the first valid address in bind addr list of
584 * association as Address Parameter of ASCONF CHUNK.
586 bp
= &asoc
->base
.bind_addr
;
587 p
= bp
->address_list
.next
;
588 laddr
= list_entry(p
, struct sctp_sockaddr_entry
, list
);
589 chunk
= sctp_make_asconf_update_ip(asoc
, &laddr
->a
, addrs
,
590 addrcnt
, SCTP_PARAM_ADD_IP
);
596 /* Add the new addresses to the bind address list with
597 * use_as_src set to 0.
600 for (i
= 0; i
< addrcnt
; i
++) {
602 af
= sctp_get_af_specific(addr
->v4
.sin_family
);
603 memcpy(&saveaddr
, addr
, af
->sockaddr_len
);
604 retval
= sctp_add_bind_addr(bp
, &saveaddr
,
606 SCTP_ADDR_NEW
, GFP_ATOMIC
);
607 addr_buf
+= af
->sockaddr_len
;
609 if (asoc
->src_out_of_asoc_ok
) {
610 struct sctp_transport
*trans
;
612 list_for_each_entry(trans
,
613 &asoc
->peer
.transport_addr_list
, transports
) {
614 trans
->cwnd
= min(4*asoc
->pathmtu
, max_t(__u32
,
615 2*asoc
->pathmtu
, 4380));
616 trans
->ssthresh
= asoc
->peer
.i
.a_rwnd
;
617 trans
->rto
= asoc
->rto_initial
;
618 sctp_max_rto(asoc
, trans
);
619 trans
->rtt
= trans
->srtt
= trans
->rttvar
= 0;
620 /* Clear the source and route cache */
621 sctp_transport_route(trans
, NULL
,
622 sctp_sk(asoc
->base
.sk
));
625 retval
= sctp_send_asconf(asoc
, chunk
);
632 /* Remove a list of addresses from bind addresses list. Do not remove the
635 * Basically run through each address specified in the addrs/addrcnt
636 * array/length pair, determine if it is IPv6 or IPv4 and call
637 * sctp_del_bind() on it.
639 * If any of them fails, then the operation will be reversed and the
640 * ones that were removed will be added back.
642 * At least one address has to be left; if only one address is
643 * available, the operation will return -EBUSY.
645 * Only sctp_setsockopt_bindx() is supposed to call this function.
647 static int sctp_bindx_rem(struct sock
*sk
, struct sockaddr
*addrs
, int addrcnt
)
649 struct sctp_sock
*sp
= sctp_sk(sk
);
650 struct sctp_endpoint
*ep
= sp
->ep
;
652 struct sctp_bind_addr
*bp
= &ep
->base
.bind_addr
;
655 union sctp_addr
*sa_addr
;
658 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
659 __func__
, sk
, addrs
, addrcnt
);
662 for (cnt
= 0; cnt
< addrcnt
; cnt
++) {
663 /* If the bind address list is empty or if there is only one
664 * bind address, there is nothing more to be removed (we need
665 * at least one address here).
667 if (list_empty(&bp
->address_list
) ||
668 (sctp_list_single_entry(&bp
->address_list
))) {
674 af
= sctp_get_af_specific(sa_addr
->sa
.sa_family
);
680 if (!af
->addr_valid(sa_addr
, sp
, NULL
)) {
681 retval
= -EADDRNOTAVAIL
;
685 if (sa_addr
->v4
.sin_port
&&
686 sa_addr
->v4
.sin_port
!= htons(bp
->port
)) {
691 if (!sa_addr
->v4
.sin_port
)
692 sa_addr
->v4
.sin_port
= htons(bp
->port
);
694 /* FIXME - There is probably a need to check if sk->sk_saddr and
695 * sk->sk_rcv_addr are currently set to one of the addresses to
696 * be removed. This is something which needs to be looked into
697 * when we are fixing the outstanding issues with multi-homing
698 * socket routing and failover schemes. Refer to comments in
699 * sctp_do_bind(). -daisy
701 retval
= sctp_del_bind_addr(bp
, sa_addr
);
703 addr_buf
+= af
->sockaddr_len
;
706 /* Failed. Add the ones that has been removed back */
708 sctp_bindx_add(sk
, addrs
, cnt
);
716 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of
717 * the associations that are part of the endpoint indicating that a list of
718 * local addresses are removed from the endpoint.
720 * If any of the addresses is already in the bind address list of the
721 * association, we do not send the chunk for that association. But it will not
722 * affect other associations.
724 * Only sctp_setsockopt_bindx() is supposed to call this function.
726 static int sctp_send_asconf_del_ip(struct sock
*sk
,
727 struct sockaddr
*addrs
,
730 struct net
*net
= sock_net(sk
);
731 struct sctp_sock
*sp
;
732 struct sctp_endpoint
*ep
;
733 struct sctp_association
*asoc
;
734 struct sctp_transport
*transport
;
735 struct sctp_bind_addr
*bp
;
736 struct sctp_chunk
*chunk
;
737 union sctp_addr
*laddr
;
740 struct sctp_sockaddr_entry
*saddr
;
746 if (!net
->sctp
.addip_enable
)
752 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
753 __func__
, sk
, addrs
, addrcnt
);
755 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
757 if (!asoc
->peer
.asconf_capable
)
760 if (asoc
->peer
.addip_disabled_mask
& SCTP_PARAM_DEL_IP
)
763 if (!sctp_state(asoc
, ESTABLISHED
))
766 /* Check if any address in the packed array of addresses is
767 * not present in the bind address list of the association.
768 * If so, do not send the asconf chunk to its peer, but
769 * continue with other associations.
772 for (i
= 0; i
< addrcnt
; i
++) {
774 af
= sctp_get_af_specific(laddr
->v4
.sin_family
);
780 if (!sctp_assoc_lookup_laddr(asoc
, laddr
))
783 addr_buf
+= af
->sockaddr_len
;
788 /* Find one address in the association's bind address list
789 * that is not in the packed array of addresses. This is to
790 * make sure that we do not delete all the addresses in the
793 bp
= &asoc
->base
.bind_addr
;
794 laddr
= sctp_find_unmatch_addr(bp
, (union sctp_addr
*)addrs
,
796 if ((laddr
== NULL
) && (addrcnt
== 1)) {
797 if (asoc
->asconf_addr_del_pending
)
799 asoc
->asconf_addr_del_pending
=
800 kzalloc(sizeof(union sctp_addr
), GFP_ATOMIC
);
801 if (asoc
->asconf_addr_del_pending
== NULL
) {
805 asoc
->asconf_addr_del_pending
->sa
.sa_family
=
807 asoc
->asconf_addr_del_pending
->v4
.sin_port
=
809 if (addrs
->sa_family
== AF_INET
) {
810 struct sockaddr_in
*sin
;
812 sin
= (struct sockaddr_in
*)addrs
;
813 asoc
->asconf_addr_del_pending
->v4
.sin_addr
.s_addr
= sin
->sin_addr
.s_addr
;
814 } else if (addrs
->sa_family
== AF_INET6
) {
815 struct sockaddr_in6
*sin6
;
817 sin6
= (struct sockaddr_in6
*)addrs
;
818 asoc
->asconf_addr_del_pending
->v6
.sin6_addr
= sin6
->sin6_addr
;
821 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
822 __func__
, asoc
, &asoc
->asconf_addr_del_pending
->sa
,
823 asoc
->asconf_addr_del_pending
);
825 asoc
->src_out_of_asoc_ok
= 1;
833 /* We do not need RCU protection throughout this loop
834 * because this is done under a socket lock from the
837 chunk
= sctp_make_asconf_update_ip(asoc
, laddr
, addrs
, addrcnt
,
845 /* Reset use_as_src flag for the addresses in the bind address
846 * list that are to be deleted.
849 for (i
= 0; i
< addrcnt
; i
++) {
851 af
= sctp_get_af_specific(laddr
->v4
.sin_family
);
852 list_for_each_entry(saddr
, &bp
->address_list
, list
) {
853 if (sctp_cmp_addr_exact(&saddr
->a
, laddr
))
854 saddr
->state
= SCTP_ADDR_DEL
;
856 addr_buf
+= af
->sockaddr_len
;
859 /* Update the route and saddr entries for all the transports
860 * as some of the addresses in the bind address list are
861 * about to be deleted and cannot be used as source addresses.
863 list_for_each_entry(transport
, &asoc
->peer
.transport_addr_list
,
865 sctp_transport_route(transport
, NULL
,
866 sctp_sk(asoc
->base
.sk
));
870 /* We don't need to transmit ASCONF */
872 retval
= sctp_send_asconf(asoc
, chunk
);
878 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
879 int sctp_asconf_mgmt(struct sctp_sock
*sp
, struct sctp_sockaddr_entry
*addrw
)
881 struct sock
*sk
= sctp_opt2sk(sp
);
882 union sctp_addr
*addr
;
885 /* It is safe to write port space in caller. */
887 addr
->v4
.sin_port
= htons(sp
->ep
->base
.bind_addr
.port
);
888 af
= sctp_get_af_specific(addr
->sa
.sa_family
);
891 if (sctp_verify_addr(sk
, addr
, af
->sockaddr_len
))
894 if (addrw
->state
== SCTP_ADDR_NEW
)
895 return sctp_send_asconf_add_ip(sk
, (struct sockaddr
*)addr
, 1);
897 return sctp_send_asconf_del_ip(sk
, (struct sockaddr
*)addr
, 1);
900 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
903 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
906 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
907 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
910 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
911 * Section 3.1.2 for this usage.
913 * addrs is a pointer to an array of one or more socket addresses. Each
914 * address is contained in its appropriate structure (i.e. struct
915 * sockaddr_in or struct sockaddr_in6) the family of the address type
916 * must be used to distinguish the address length (note that this
917 * representation is termed a "packed array" of addresses). The caller
918 * specifies the number of addresses in the array with addrcnt.
920 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
921 * -1, and sets errno to the appropriate error code.
923 * For SCTP, the port given in each socket address must be the same, or
924 * sctp_bindx() will fail, setting errno to EINVAL.
926 * The flags parameter is formed from the bitwise OR of zero or more of
927 * the following currently defined flags:
929 * SCTP_BINDX_ADD_ADDR
931 * SCTP_BINDX_REM_ADDR
933 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
934 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
935 * addresses from the association. The two flags are mutually exclusive;
936 * if both are given, sctp_bindx() will fail with EINVAL. A caller may
937 * not remove all addresses from an association; sctp_bindx() will
938 * reject such an attempt with EINVAL.
940 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
941 * additional addresses with an endpoint after calling bind(). Or use
942 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
943 * socket is associated with so that no new association accepted will be
944 * associated with those addresses. If the endpoint supports dynamic
945 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
946 * endpoint to send the appropriate message to the peer to change the
947 * peers address lists.
949 * Adding and removing addresses from a connected association is
950 * optional functionality. Implementations that do not support this
951 * functionality should return EOPNOTSUPP.
953 * Basically do nothing but copying the addresses from user to kernel
954 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
955 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
958 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
961 * sk The sk of the socket
962 * addrs The pointer to the addresses in user land
963 * addrssize Size of the addrs buffer
964 * op Operation to perform (add or remove, see the flags of
967 * Returns 0 if ok, <0 errno code on error.
969 static int sctp_setsockopt_bindx(struct sock
*sk
,
970 struct sockaddr __user
*addrs
,
971 int addrs_size
, int op
)
973 struct sockaddr
*kaddrs
;
977 struct sockaddr
*sa_addr
;
981 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
982 __func__
, sk
, addrs
, addrs_size
, op
);
984 if (unlikely(addrs_size
<= 0))
987 kaddrs
= memdup_user(addrs
, addrs_size
);
988 if (unlikely(IS_ERR(kaddrs
)))
989 return PTR_ERR(kaddrs
);
991 /* Walk through the addrs buffer and count the number of addresses. */
993 while (walk_size
< addrs_size
) {
994 if (walk_size
+ sizeof(sa_family_t
) > addrs_size
) {
1000 af
= sctp_get_af_specific(sa_addr
->sa_family
);
1002 /* If the address family is not supported or if this address
1003 * causes the address buffer to overflow return EINVAL.
1005 if (!af
|| (walk_size
+ af
->sockaddr_len
) > addrs_size
) {
1010 addr_buf
+= af
->sockaddr_len
;
1011 walk_size
+= af
->sockaddr_len
;
1016 case SCTP_BINDX_ADD_ADDR
:
1017 /* Allow security module to validate bindx addresses. */
1018 err
= security_sctp_bind_connect(sk
, SCTP_SOCKOPT_BINDX_ADD
,
1019 (struct sockaddr
*)kaddrs
,
1023 err
= sctp_bindx_add(sk
, kaddrs
, addrcnt
);
1026 err
= sctp_send_asconf_add_ip(sk
, kaddrs
, addrcnt
);
1029 case SCTP_BINDX_REM_ADDR
:
1030 err
= sctp_bindx_rem(sk
, kaddrs
, addrcnt
);
1033 err
= sctp_send_asconf_del_ip(sk
, kaddrs
, addrcnt
);
1047 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
1049 * Common routine for handling connect() and sctp_connectx().
1050 * Connect will come in with just a single address.
1052 static int __sctp_connect(struct sock
*sk
,
1053 struct sockaddr
*kaddrs
,
1054 int addrs_size
, int flags
,
1055 sctp_assoc_t
*assoc_id
)
1057 struct net
*net
= sock_net(sk
);
1058 struct sctp_sock
*sp
;
1059 struct sctp_endpoint
*ep
;
1060 struct sctp_association
*asoc
= NULL
;
1061 struct sctp_association
*asoc2
;
1062 struct sctp_transport
*transport
;
1064 enum sctp_scope scope
;
1069 union sctp_addr
*sa_addr
= NULL
;
1071 unsigned short port
;
1076 /* connect() cannot be done on a socket that is already in ESTABLISHED
1077 * state - UDP-style peeled off socket or a TCP-style socket that
1078 * is already connected.
1079 * It cannot be done even on a TCP-style listening socket.
1081 if (sctp_sstate(sk
, ESTABLISHED
) || sctp_sstate(sk
, CLOSING
) ||
1082 (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))) {
1087 /* Walk through the addrs buffer and count the number of addresses. */
1089 while (walk_size
< addrs_size
) {
1092 if (walk_size
+ sizeof(sa_family_t
) > addrs_size
) {
1098 af
= sctp_get_af_specific(sa_addr
->sa
.sa_family
);
1100 /* If the address family is not supported or if this address
1101 * causes the address buffer to overflow return EINVAL.
1103 if (!af
|| (walk_size
+ af
->sockaddr_len
) > addrs_size
) {
1108 port
= ntohs(sa_addr
->v4
.sin_port
);
1110 /* Save current address so we can work with it */
1111 memcpy(&to
, sa_addr
, af
->sockaddr_len
);
1113 err
= sctp_verify_addr(sk
, &to
, af
->sockaddr_len
);
1117 /* Make sure the destination port is correctly set
1120 if (asoc
&& asoc
->peer
.port
&& asoc
->peer
.port
!= port
) {
1125 /* Check if there already is a matching association on the
1126 * endpoint (other than the one created here).
1128 asoc2
= sctp_endpoint_lookup_assoc(ep
, &to
, &transport
);
1129 if (asoc2
&& asoc2
!= asoc
) {
1130 if (asoc2
->state
>= SCTP_STATE_ESTABLISHED
)
1137 /* If we could not find a matching association on the endpoint,
1138 * make sure that there is no peeled-off association matching
1139 * the peer address even on another socket.
1141 if (sctp_endpoint_is_peeled_off(ep
, &to
)) {
1142 err
= -EADDRNOTAVAIL
;
1147 /* If a bind() or sctp_bindx() is not called prior to
1148 * an sctp_connectx() call, the system picks an
1149 * ephemeral port and will choose an address set
1150 * equivalent to binding with a wildcard address.
1152 if (!ep
->base
.bind_addr
.port
) {
1153 if (sctp_autobind(sk
)) {
1159 * If an unprivileged user inherits a 1-many
1160 * style socket with open associations on a
1161 * privileged port, it MAY be permitted to
1162 * accept new associations, but it SHOULD NOT
1163 * be permitted to open new associations.
1165 if (ep
->base
.bind_addr
.port
<
1166 inet_prot_sock(net
) &&
1167 !ns_capable(net
->user_ns
,
1168 CAP_NET_BIND_SERVICE
)) {
1174 scope
= sctp_scope(&to
);
1175 asoc
= sctp_association_new(ep
, sk
, scope
, GFP_KERNEL
);
1181 err
= sctp_assoc_set_bind_addr_from_ep(asoc
, scope
,
1189 /* Prime the peer's transport structures. */
1190 transport
= sctp_assoc_add_peer(asoc
, &to
, GFP_KERNEL
,
1198 addr_buf
+= af
->sockaddr_len
;
1199 walk_size
+= af
->sockaddr_len
;
1202 /* In case the user of sctp_connectx() wants an association
1203 * id back, assign one now.
1206 err
= sctp_assoc_set_id(asoc
, GFP_KERNEL
);
1211 err
= sctp_primitive_ASSOCIATE(net
, asoc
, NULL
);
1216 /* Initialize sk's dport and daddr for getpeername() */
1217 inet_sk(sk
)->inet_dport
= htons(asoc
->peer
.port
);
1218 sp
->pf
->to_sk_daddr(sa_addr
, sk
);
1221 timeo
= sock_sndtimeo(sk
, flags
& O_NONBLOCK
);
1224 *assoc_id
= asoc
->assoc_id
;
1226 err
= sctp_wait_for_connect(asoc
, &timeo
);
1227 /* Note: the asoc may be freed after the return of
1228 * sctp_wait_for_connect.
1231 /* Don't free association on exit. */
1235 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
1236 __func__
, asoc
, kaddrs
, err
);
1239 /* sctp_primitive_ASSOCIATE may have added this association
1240 * To the hash table, try to unhash it, just in case, its a noop
1241 * if it wasn't hashed so we're safe
1243 sctp_association_free(asoc
);
1248 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1251 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
1252 * sctp_assoc_t *asoc);
1254 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1255 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
1256 * or IPv6 addresses.
1258 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
1259 * Section 3.1.2 for this usage.
1261 * addrs is a pointer to an array of one or more socket addresses. Each
1262 * address is contained in its appropriate structure (i.e. struct
1263 * sockaddr_in or struct sockaddr_in6) the family of the address type
1264 * must be used to distengish the address length (note that this
1265 * representation is termed a "packed array" of addresses). The caller
1266 * specifies the number of addresses in the array with addrcnt.
1268 * On success, sctp_connectx() returns 0. It also sets the assoc_id to
1269 * the association id of the new association. On failure, sctp_connectx()
1270 * returns -1, and sets errno to the appropriate error code. The assoc_id
1271 * is not touched by the kernel.
1273 * For SCTP, the port given in each socket address must be the same, or
1274 * sctp_connectx() will fail, setting errno to EINVAL.
1276 * An application can use sctp_connectx to initiate an association with
1277 * an endpoint that is multi-homed. Much like sctp_bindx() this call
1278 * allows a caller to specify multiple addresses at which a peer can be
1279 * reached. The way the SCTP stack uses the list of addresses to set up
1280 * the association is implementation dependent. This function only
1281 * specifies that the stack will try to make use of all the addresses in
1282 * the list when needed.
1284 * Note that the list of addresses passed in is only used for setting up
1285 * the association. It does not necessarily equal the set of addresses
1286 * the peer uses for the resulting association. If the caller wants to
1287 * find out the set of peer addresses, it must use sctp_getpaddrs() to
1288 * retrieve them after the association has been set up.
1290 * Basically do nothing but copying the addresses from user to kernel
1291 * land and invoking either sctp_connectx(). This is used for tunneling
1292 * the sctp_connectx() request through sctp_setsockopt() from userspace.
1294 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
1297 * sk The sk of the socket
1298 * addrs The pointer to the addresses in user land
1299 * addrssize Size of the addrs buffer
1301 * Returns >=0 if ok, <0 errno code on error.
1303 static int __sctp_setsockopt_connectx(struct sock
*sk
,
1304 struct sockaddr __user
*addrs
,
1306 sctp_assoc_t
*assoc_id
)
1308 struct sockaddr
*kaddrs
;
1309 int err
= 0, flags
= 0;
1311 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
1312 __func__
, sk
, addrs
, addrs_size
);
1314 if (unlikely(addrs_size
<= 0))
1317 kaddrs
= memdup_user(addrs
, addrs_size
);
1318 if (unlikely(IS_ERR(kaddrs
)))
1319 return PTR_ERR(kaddrs
);
1321 /* Allow security module to validate connectx addresses. */
1322 err
= security_sctp_bind_connect(sk
, SCTP_SOCKOPT_CONNECTX
,
1323 (struct sockaddr
*)kaddrs
,
1328 /* in-kernel sockets don't generally have a file allocated to them
1329 * if all they do is call sock_create_kern().
1331 if (sk
->sk_socket
->file
)
1332 flags
= sk
->sk_socket
->file
->f_flags
;
1334 err
= __sctp_connect(sk
, kaddrs
, addrs_size
, flags
, assoc_id
);
1343 * This is an older interface. It's kept for backward compatibility
1344 * to the option that doesn't provide association id.
1346 static int sctp_setsockopt_connectx_old(struct sock
*sk
,
1347 struct sockaddr __user
*addrs
,
1350 return __sctp_setsockopt_connectx(sk
, addrs
, addrs_size
, NULL
);
1354 * New interface for the API. The since the API is done with a socket
1355 * option, to make it simple we feed back the association id is as a return
1356 * indication to the call. Error is always negative and association id is
1359 static int sctp_setsockopt_connectx(struct sock
*sk
,
1360 struct sockaddr __user
*addrs
,
1363 sctp_assoc_t assoc_id
= 0;
1366 err
= __sctp_setsockopt_connectx(sk
, addrs
, addrs_size
, &assoc_id
);
1375 * New (hopefully final) interface for the API.
1376 * We use the sctp_getaddrs_old structure so that use-space library
1377 * can avoid any unnecessary allocations. The only different part
1378 * is that we store the actual length of the address buffer into the
1379 * addrs_num structure member. That way we can re-use the existing
1382 #ifdef CONFIG_COMPAT
1383 struct compat_sctp_getaddrs_old
{
1384 sctp_assoc_t assoc_id
;
1386 compat_uptr_t addrs
; /* struct sockaddr * */
1390 static int sctp_getsockopt_connectx3(struct sock
*sk
, int len
,
1391 char __user
*optval
,
1394 struct sctp_getaddrs_old param
;
1395 sctp_assoc_t assoc_id
= 0;
1398 #ifdef CONFIG_COMPAT
1399 if (in_compat_syscall()) {
1400 struct compat_sctp_getaddrs_old param32
;
1402 if (len
< sizeof(param32
))
1404 if (copy_from_user(¶m32
, optval
, sizeof(param32
)))
1407 param
.assoc_id
= param32
.assoc_id
;
1408 param
.addr_num
= param32
.addr_num
;
1409 param
.addrs
= compat_ptr(param32
.addrs
);
1413 if (len
< sizeof(param
))
1415 if (copy_from_user(¶m
, optval
, sizeof(param
)))
1419 err
= __sctp_setsockopt_connectx(sk
, (struct sockaddr __user
*)
1420 param
.addrs
, param
.addr_num
,
1422 if (err
== 0 || err
== -EINPROGRESS
) {
1423 if (copy_to_user(optval
, &assoc_id
, sizeof(assoc_id
)))
1425 if (put_user(sizeof(assoc_id
), optlen
))
1432 /* API 3.1.4 close() - UDP Style Syntax
1433 * Applications use close() to perform graceful shutdown (as described in
1434 * Section 10.1 of [SCTP]) on ALL the associations currently represented
1435 * by a UDP-style socket.
1439 * ret = close(int sd);
1441 * sd - the socket descriptor of the associations to be closed.
1443 * To gracefully shutdown a specific association represented by the
1444 * UDP-style socket, an application should use the sendmsg() call,
1445 * passing no user data, but including the appropriate flag in the
1446 * ancillary data (see Section xxxx).
1448 * If sd in the close() call is a branched-off socket representing only
1449 * one association, the shutdown is performed on that association only.
1451 * 4.1.6 close() - TCP Style Syntax
1453 * Applications use close() to gracefully close down an association.
1457 * int close(int sd);
1459 * sd - the socket descriptor of the association to be closed.
1461 * After an application calls close() on a socket descriptor, no further
1462 * socket operations will succeed on that descriptor.
1464 * API 7.1.4 SO_LINGER
1466 * An application using the TCP-style socket can use this option to
1467 * perform the SCTP ABORT primitive. The linger option structure is:
1470 * int l_onoff; // option on/off
1471 * int l_linger; // linger time
1474 * To enable the option, set l_onoff to 1. If the l_linger value is set
1475 * to 0, calling close() is the same as the ABORT primitive. If the
1476 * value is set to a negative value, the setsockopt() call will return
1477 * an error. If the value is set to a positive value linger_time, the
1478 * close() can be blocked for at most linger_time ms. If the graceful
1479 * shutdown phase does not finish during this period, close() will
1480 * return but the graceful shutdown phase continues in the system.
1482 static void sctp_close(struct sock
*sk
, long timeout
)
1484 struct net
*net
= sock_net(sk
);
1485 struct sctp_endpoint
*ep
;
1486 struct sctp_association
*asoc
;
1487 struct list_head
*pos
, *temp
;
1488 unsigned int data_was_unread
;
1490 pr_debug("%s: sk:%p, timeout:%ld\n", __func__
, sk
, timeout
);
1492 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1493 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1494 inet_sk_set_state(sk
, SCTP_SS_CLOSING
);
1496 ep
= sctp_sk(sk
)->ep
;
1498 /* Clean up any skbs sitting on the receive queue. */
1499 data_was_unread
= sctp_queue_purge_ulpevents(&sk
->sk_receive_queue
);
1500 data_was_unread
+= sctp_queue_purge_ulpevents(&sctp_sk(sk
)->pd_lobby
);
1502 /* Walk all associations on an endpoint. */
1503 list_for_each_safe(pos
, temp
, &ep
->asocs
) {
1504 asoc
= list_entry(pos
, struct sctp_association
, asocs
);
1506 if (sctp_style(sk
, TCP
)) {
1507 /* A closed association can still be in the list if
1508 * it belongs to a TCP-style listening socket that is
1509 * not yet accepted. If so, free it. If not, send an
1510 * ABORT or SHUTDOWN based on the linger options.
1512 if (sctp_state(asoc
, CLOSED
)) {
1513 sctp_association_free(asoc
);
1518 if (data_was_unread
|| !skb_queue_empty(&asoc
->ulpq
.lobby
) ||
1519 !skb_queue_empty(&asoc
->ulpq
.reasm
) ||
1520 !skb_queue_empty(&asoc
->ulpq
.reasm_uo
) ||
1521 (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
)) {
1522 struct sctp_chunk
*chunk
;
1524 chunk
= sctp_make_abort_user(asoc
, NULL
, 0);
1525 sctp_primitive_ABORT(net
, asoc
, chunk
);
1527 sctp_primitive_SHUTDOWN(net
, asoc
, NULL
);
1530 /* On a TCP-style socket, block for at most linger_time if set. */
1531 if (sctp_style(sk
, TCP
) && timeout
)
1532 sctp_wait_for_close(sk
, timeout
);
1534 /* This will run the backlog queue. */
1537 /* Supposedly, no process has access to the socket, but
1538 * the net layers still may.
1539 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
1540 * held and that should be grabbed before socket lock.
1542 spin_lock_bh(&net
->sctp
.addr_wq_lock
);
1543 bh_lock_sock_nested(sk
);
1545 /* Hold the sock, since sk_common_release() will put sock_put()
1546 * and we have just a little more cleanup.
1549 sk_common_release(sk
);
1552 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
1556 SCTP_DBG_OBJCNT_DEC(sock
);
1559 /* Handle EPIPE error. */
1560 static int sctp_error(struct sock
*sk
, int flags
, int err
)
1563 err
= sock_error(sk
) ? : -EPIPE
;
1564 if (err
== -EPIPE
&& !(flags
& MSG_NOSIGNAL
))
1565 send_sig(SIGPIPE
, current
, 0);
1569 /* API 3.1.3 sendmsg() - UDP Style Syntax
1571 * An application uses sendmsg() and recvmsg() calls to transmit data to
1572 * and receive data from its peer.
1574 * ssize_t sendmsg(int socket, const struct msghdr *message,
1577 * socket - the socket descriptor of the endpoint.
1578 * message - pointer to the msghdr structure which contains a single
1579 * user message and possibly some ancillary data.
1581 * See Section 5 for complete description of the data
1584 * flags - flags sent or received with the user message, see Section
1585 * 5 for complete description of the flags.
1587 * Note: This function could use a rewrite especially when explicit
1588 * connect support comes in.
1590 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
1592 static int sctp_msghdr_parse(const struct msghdr
*msg
,
1593 struct sctp_cmsgs
*cmsgs
);
1595 static int sctp_sendmsg_parse(struct sock
*sk
, struct sctp_cmsgs
*cmsgs
,
1596 struct sctp_sndrcvinfo
*srinfo
,
1597 const struct msghdr
*msg
, size_t msg_len
)
1602 if (sctp_sstate(sk
, LISTENING
) && sctp_style(sk
, TCP
))
1605 if (msg_len
> sk
->sk_sndbuf
)
1608 memset(cmsgs
, 0, sizeof(*cmsgs
));
1609 err
= sctp_msghdr_parse(msg
, cmsgs
);
1611 pr_debug("%s: msghdr parse err:%x\n", __func__
, err
);
1615 memset(srinfo
, 0, sizeof(*srinfo
));
1616 if (cmsgs
->srinfo
) {
1617 srinfo
->sinfo_stream
= cmsgs
->srinfo
->sinfo_stream
;
1618 srinfo
->sinfo_flags
= cmsgs
->srinfo
->sinfo_flags
;
1619 srinfo
->sinfo_ppid
= cmsgs
->srinfo
->sinfo_ppid
;
1620 srinfo
->sinfo_context
= cmsgs
->srinfo
->sinfo_context
;
1621 srinfo
->sinfo_assoc_id
= cmsgs
->srinfo
->sinfo_assoc_id
;
1622 srinfo
->sinfo_timetolive
= cmsgs
->srinfo
->sinfo_timetolive
;
1626 srinfo
->sinfo_stream
= cmsgs
->sinfo
->snd_sid
;
1627 srinfo
->sinfo_flags
= cmsgs
->sinfo
->snd_flags
;
1628 srinfo
->sinfo_ppid
= cmsgs
->sinfo
->snd_ppid
;
1629 srinfo
->sinfo_context
= cmsgs
->sinfo
->snd_context
;
1630 srinfo
->sinfo_assoc_id
= cmsgs
->sinfo
->snd_assoc_id
;
1633 if (cmsgs
->prinfo
) {
1634 srinfo
->sinfo_timetolive
= cmsgs
->prinfo
->pr_value
;
1635 SCTP_PR_SET_POLICY(srinfo
->sinfo_flags
,
1636 cmsgs
->prinfo
->pr_policy
);
1639 sflags
= srinfo
->sinfo_flags
;
1640 if (!sflags
&& msg_len
)
1643 if (sctp_style(sk
, TCP
) && (sflags
& (SCTP_EOF
| SCTP_ABORT
)))
1646 if (((sflags
& SCTP_EOF
) && msg_len
> 0) ||
1647 (!(sflags
& (SCTP_EOF
| SCTP_ABORT
)) && msg_len
== 0))
1650 if ((sflags
& SCTP_ADDR_OVER
) && !msg
->msg_name
)
1656 static int sctp_sendmsg_new_asoc(struct sock
*sk
, __u16 sflags
,
1657 struct sctp_cmsgs
*cmsgs
,
1658 union sctp_addr
*daddr
,
1659 struct sctp_transport
**tp
)
1661 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
1662 struct net
*net
= sock_net(sk
);
1663 struct sctp_association
*asoc
;
1664 enum sctp_scope scope
;
1665 struct cmsghdr
*cmsg
;
1666 __be32 flowinfo
= 0;
1672 if (sflags
& (SCTP_EOF
| SCTP_ABORT
))
1675 if (sctp_style(sk
, TCP
) && (sctp_sstate(sk
, ESTABLISHED
) ||
1676 sctp_sstate(sk
, CLOSING
)))
1677 return -EADDRNOTAVAIL
;
1679 if (sctp_endpoint_is_peeled_off(ep
, daddr
))
1680 return -EADDRNOTAVAIL
;
1682 if (!ep
->base
.bind_addr
.port
) {
1683 if (sctp_autobind(sk
))
1686 if (ep
->base
.bind_addr
.port
< inet_prot_sock(net
) &&
1687 !ns_capable(net
->user_ns
, CAP_NET_BIND_SERVICE
))
1691 scope
= sctp_scope(daddr
);
1693 /* Label connection socket for first association 1-to-many
1694 * style for client sequence socket()->sendmsg(). This
1695 * needs to be done before sctp_assoc_add_peer() as that will
1696 * set up the initial packet that needs to account for any
1697 * security ip options (CIPSO/CALIPSO) added to the packet.
1699 af
= sctp_get_af_specific(daddr
->sa
.sa_family
);
1702 err
= security_sctp_bind_connect(sk
, SCTP_SENDMSG_CONNECT
,
1703 (struct sockaddr
*)daddr
,
1708 asoc
= sctp_association_new(ep
, sk
, scope
, GFP_KERNEL
);
1712 if (sctp_assoc_set_bind_addr_from_ep(asoc
, scope
, GFP_KERNEL
) < 0) {
1718 struct sctp_initmsg
*init
= cmsgs
->init
;
1720 if (init
->sinit_num_ostreams
) {
1721 __u16 outcnt
= init
->sinit_num_ostreams
;
1723 asoc
->c
.sinit_num_ostreams
= outcnt
;
1724 /* outcnt has been changed, need to re-init stream */
1725 err
= sctp_stream_init(&asoc
->stream
, outcnt
, 0,
1731 if (init
->sinit_max_instreams
)
1732 asoc
->c
.sinit_max_instreams
= init
->sinit_max_instreams
;
1734 if (init
->sinit_max_attempts
)
1735 asoc
->max_init_attempts
= init
->sinit_max_attempts
;
1737 if (init
->sinit_max_init_timeo
)
1738 asoc
->max_init_timeo
=
1739 msecs_to_jiffies(init
->sinit_max_init_timeo
);
1742 *tp
= sctp_assoc_add_peer(asoc
, daddr
, GFP_KERNEL
, SCTP_UNKNOWN
);
1748 if (!cmsgs
->addrs_msg
)
1751 if (daddr
->sa
.sa_family
== AF_INET6
)
1752 flowinfo
= daddr
->v6
.sin6_flowinfo
;
1754 /* sendv addr list parse */
1755 for_each_cmsghdr(cmsg
, cmsgs
->addrs_msg
) {
1756 struct sctp_transport
*transport
;
1757 struct sctp_association
*old
;
1758 union sctp_addr _daddr
;
1761 if (cmsg
->cmsg_level
!= IPPROTO_SCTP
||
1762 (cmsg
->cmsg_type
!= SCTP_DSTADDRV4
&&
1763 cmsg
->cmsg_type
!= SCTP_DSTADDRV6
))
1767 memset(daddr
, 0, sizeof(*daddr
));
1768 dlen
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1769 if (cmsg
->cmsg_type
== SCTP_DSTADDRV4
) {
1770 if (dlen
< sizeof(struct in_addr
)) {
1775 dlen
= sizeof(struct in_addr
);
1776 daddr
->v4
.sin_family
= AF_INET
;
1777 daddr
->v4
.sin_port
= htons(asoc
->peer
.port
);
1778 memcpy(&daddr
->v4
.sin_addr
, CMSG_DATA(cmsg
), dlen
);
1780 if (dlen
< sizeof(struct in6_addr
)) {
1785 dlen
= sizeof(struct in6_addr
);
1786 daddr
->v6
.sin6_flowinfo
= flowinfo
;
1787 daddr
->v6
.sin6_family
= AF_INET6
;
1788 daddr
->v6
.sin6_port
= htons(asoc
->peer
.port
);
1789 memcpy(&daddr
->v6
.sin6_addr
, CMSG_DATA(cmsg
), dlen
);
1791 err
= sctp_verify_addr(sk
, daddr
, sizeof(*daddr
));
1795 old
= sctp_endpoint_lookup_assoc(ep
, daddr
, &transport
);
1796 if (old
&& old
!= asoc
) {
1797 if (old
->state
>= SCTP_STATE_ESTABLISHED
)
1804 if (sctp_endpoint_is_peeled_off(ep
, daddr
)) {
1805 err
= -EADDRNOTAVAIL
;
1809 transport
= sctp_assoc_add_peer(asoc
, daddr
, GFP_KERNEL
,
1820 sctp_association_free(asoc
);
1824 static int sctp_sendmsg_check_sflags(struct sctp_association
*asoc
,
1825 __u16 sflags
, struct msghdr
*msg
,
1828 struct sock
*sk
= asoc
->base
.sk
;
1829 struct net
*net
= sock_net(sk
);
1831 if (sctp_state(asoc
, CLOSED
) && sctp_style(sk
, TCP
))
1834 if ((sflags
& SCTP_SENDALL
) && sctp_style(sk
, UDP
) &&
1835 !sctp_state(asoc
, ESTABLISHED
))
1838 if (sflags
& SCTP_EOF
) {
1839 pr_debug("%s: shutting down association:%p\n", __func__
, asoc
);
1840 sctp_primitive_SHUTDOWN(net
, asoc
, NULL
);
1845 if (sflags
& SCTP_ABORT
) {
1846 struct sctp_chunk
*chunk
;
1848 chunk
= sctp_make_abort_user(asoc
, msg
, msg_len
);
1852 pr_debug("%s: aborting association:%p\n", __func__
, asoc
);
1853 sctp_primitive_ABORT(net
, asoc
, chunk
);
1854 iov_iter_revert(&msg
->msg_iter
, msg_len
);
1862 static int sctp_sendmsg_to_asoc(struct sctp_association
*asoc
,
1863 struct msghdr
*msg
, size_t msg_len
,
1864 struct sctp_transport
*transport
,
1865 struct sctp_sndrcvinfo
*sinfo
)
1867 struct sock
*sk
= asoc
->base
.sk
;
1868 struct sctp_sock
*sp
= sctp_sk(sk
);
1869 struct net
*net
= sock_net(sk
);
1870 struct sctp_datamsg
*datamsg
;
1871 bool wait_connect
= false;
1872 struct sctp_chunk
*chunk
;
1876 if (sinfo
->sinfo_stream
>= asoc
->stream
.outcnt
) {
1881 if (unlikely(!SCTP_SO(&asoc
->stream
, sinfo
->sinfo_stream
)->ext
)) {
1882 err
= sctp_stream_init_ext(&asoc
->stream
, sinfo
->sinfo_stream
);
1887 if (sp
->disable_fragments
&& msg_len
> asoc
->frag_point
) {
1892 if (asoc
->pmtu_pending
) {
1893 if (sp
->param_flags
& SPP_PMTUD_ENABLE
)
1894 sctp_assoc_sync_pmtu(asoc
);
1895 asoc
->pmtu_pending
= 0;
1898 if (sctp_wspace(asoc
) < (int)msg_len
)
1899 sctp_prsctp_prune(asoc
, sinfo
, msg_len
- sctp_wspace(asoc
));
1901 if (sk_under_memory_pressure(sk
))
1904 if (sctp_wspace(asoc
) <= 0 || !sk_wmem_schedule(sk
, msg_len
)) {
1905 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1906 err
= sctp_wait_for_sndbuf(asoc
, &timeo
, msg_len
);
1911 if (sctp_state(asoc
, CLOSED
)) {
1912 err
= sctp_primitive_ASSOCIATE(net
, asoc
, NULL
);
1916 if (asoc
->ep
->intl_enable
) {
1917 timeo
= sock_sndtimeo(sk
, 0);
1918 err
= sctp_wait_for_connect(asoc
, &timeo
);
1924 wait_connect
= true;
1927 pr_debug("%s: we associated primitively\n", __func__
);
1930 datamsg
= sctp_datamsg_from_user(asoc
, sinfo
, &msg
->msg_iter
);
1931 if (IS_ERR(datamsg
)) {
1932 err
= PTR_ERR(datamsg
);
1936 asoc
->force_delay
= !!(msg
->msg_flags
& MSG_MORE
);
1938 list_for_each_entry(chunk
, &datamsg
->chunks
, frag_list
) {
1939 sctp_chunk_hold(chunk
);
1940 sctp_set_owner_w(chunk
);
1941 chunk
->transport
= transport
;
1944 err
= sctp_primitive_SEND(net
, asoc
, datamsg
);
1946 sctp_datamsg_free(datamsg
);
1950 pr_debug("%s: we sent primitively\n", __func__
);
1952 sctp_datamsg_put(datamsg
);
1954 if (unlikely(wait_connect
)) {
1955 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1956 sctp_wait_for_connect(asoc
, &timeo
);
1965 static union sctp_addr
*sctp_sendmsg_get_daddr(struct sock
*sk
,
1966 const struct msghdr
*msg
,
1967 struct sctp_cmsgs
*cmsgs
)
1969 union sctp_addr
*daddr
= NULL
;
1972 if (!sctp_style(sk
, UDP_HIGH_BANDWIDTH
) && msg
->msg_name
) {
1973 int len
= msg
->msg_namelen
;
1975 if (len
> sizeof(*daddr
))
1976 len
= sizeof(*daddr
);
1978 daddr
= (union sctp_addr
*)msg
->msg_name
;
1980 err
= sctp_verify_addr(sk
, daddr
, len
);
1982 return ERR_PTR(err
);
1988 static void sctp_sendmsg_update_sinfo(struct sctp_association
*asoc
,
1989 struct sctp_sndrcvinfo
*sinfo
,
1990 struct sctp_cmsgs
*cmsgs
)
1992 if (!cmsgs
->srinfo
&& !cmsgs
->sinfo
) {
1993 sinfo
->sinfo_stream
= asoc
->default_stream
;
1994 sinfo
->sinfo_ppid
= asoc
->default_ppid
;
1995 sinfo
->sinfo_context
= asoc
->default_context
;
1996 sinfo
->sinfo_assoc_id
= sctp_assoc2id(asoc
);
1999 sinfo
->sinfo_flags
= asoc
->default_flags
;
2002 if (!cmsgs
->srinfo
&& !cmsgs
->prinfo
)
2003 sinfo
->sinfo_timetolive
= asoc
->default_timetolive
;
2005 if (cmsgs
->authinfo
) {
2006 /* Reuse sinfo_tsn to indicate that authinfo was set and
2007 * sinfo_ssn to save the keyid on tx path.
2009 sinfo
->sinfo_tsn
= 1;
2010 sinfo
->sinfo_ssn
= cmsgs
->authinfo
->auth_keynumber
;
2014 static int sctp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t msg_len
)
2016 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
2017 struct sctp_transport
*transport
= NULL
;
2018 struct sctp_sndrcvinfo _sinfo
, *sinfo
;
2019 struct sctp_association
*asoc
, *tmp
;
2020 struct sctp_cmsgs cmsgs
;
2021 union sctp_addr
*daddr
;
2026 /* Parse and get snd_info */
2027 err
= sctp_sendmsg_parse(sk
, &cmsgs
, &_sinfo
, msg
, msg_len
);
2032 sflags
= sinfo
->sinfo_flags
;
2034 /* Get daddr from msg */
2035 daddr
= sctp_sendmsg_get_daddr(sk
, msg
, &cmsgs
);
2036 if (IS_ERR(daddr
)) {
2037 err
= PTR_ERR(daddr
);
2043 /* SCTP_SENDALL process */
2044 if ((sflags
& SCTP_SENDALL
) && sctp_style(sk
, UDP
)) {
2045 list_for_each_entry_safe(asoc
, tmp
, &ep
->asocs
, asocs
) {
2046 err
= sctp_sendmsg_check_sflags(asoc
, sflags
, msg
,
2053 sctp_sendmsg_update_sinfo(asoc
, sinfo
, &cmsgs
);
2055 err
= sctp_sendmsg_to_asoc(asoc
, msg
, msg_len
,
2060 iov_iter_revert(&msg
->msg_iter
, err
);
2066 /* Get and check or create asoc */
2068 asoc
= sctp_endpoint_lookup_assoc(ep
, daddr
, &transport
);
2070 err
= sctp_sendmsg_check_sflags(asoc
, sflags
, msg
,
2075 err
= sctp_sendmsg_new_asoc(sk
, sflags
, &cmsgs
, daddr
,
2080 asoc
= transport
->asoc
;
2084 if (!sctp_style(sk
, TCP
) && !(sflags
& SCTP_ADDR_OVER
))
2087 asoc
= sctp_id2assoc(sk
, sinfo
->sinfo_assoc_id
);
2093 err
= sctp_sendmsg_check_sflags(asoc
, sflags
, msg
, msg_len
);
2098 /* Update snd_info with the asoc */
2099 sctp_sendmsg_update_sinfo(asoc
, sinfo
, &cmsgs
);
2101 /* Send msg to the asoc */
2102 err
= sctp_sendmsg_to_asoc(asoc
, msg
, msg_len
, transport
, sinfo
);
2103 if (err
< 0 && err
!= -ESRCH
&& new)
2104 sctp_association_free(asoc
);
2109 return sctp_error(sk
, msg
->msg_flags
, err
);
2112 /* This is an extended version of skb_pull() that removes the data from the
2113 * start of a skb even when data is spread across the list of skb's in the
2114 * frag_list. len specifies the total amount of data that needs to be removed.
2115 * when 'len' bytes could be removed from the skb, it returns 0.
2116 * If 'len' exceeds the total skb length, it returns the no. of bytes that
2117 * could not be removed.
2119 static int sctp_skb_pull(struct sk_buff
*skb
, int len
)
2121 struct sk_buff
*list
;
2122 int skb_len
= skb_headlen(skb
);
2125 if (len
<= skb_len
) {
2126 __skb_pull(skb
, len
);
2130 __skb_pull(skb
, skb_len
);
2132 skb_walk_frags(skb
, list
) {
2133 rlen
= sctp_skb_pull(list
, len
);
2134 skb
->len
-= (len
-rlen
);
2135 skb
->data_len
-= (len
-rlen
);
2146 /* API 3.1.3 recvmsg() - UDP Style Syntax
2148 * ssize_t recvmsg(int socket, struct msghdr *message,
2151 * socket - the socket descriptor of the endpoint.
2152 * message - pointer to the msghdr structure which contains a single
2153 * user message and possibly some ancillary data.
2155 * See Section 5 for complete description of the data
2158 * flags - flags sent or received with the user message, see Section
2159 * 5 for complete description of the flags.
2161 static int sctp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
2162 int noblock
, int flags
, int *addr_len
)
2164 struct sctp_ulpevent
*event
= NULL
;
2165 struct sctp_sock
*sp
= sctp_sk(sk
);
2166 struct sk_buff
*skb
, *head_skb
;
2171 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
2172 "addr_len:%p)\n", __func__
, sk
, msg
, len
, noblock
, flags
,
2177 if (sctp_style(sk
, TCP
) && !sctp_sstate(sk
, ESTABLISHED
) &&
2178 !sctp_sstate(sk
, CLOSING
) && !sctp_sstate(sk
, CLOSED
)) {
2183 skb
= sctp_skb_recv_datagram(sk
, flags
, noblock
, &err
);
2187 /* Get the total length of the skb including any skb's in the
2196 err
= skb_copy_datagram_msg(skb
, 0, msg
, copied
);
2198 event
= sctp_skb2event(skb
);
2203 if (event
->chunk
&& event
->chunk
->head_skb
)
2204 head_skb
= event
->chunk
->head_skb
;
2207 sock_recv_ts_and_drops(msg
, sk
, head_skb
);
2208 if (sctp_ulpevent_is_notification(event
)) {
2209 msg
->msg_flags
|= MSG_NOTIFICATION
;
2210 sp
->pf
->event_msgname(event
, msg
->msg_name
, addr_len
);
2212 sp
->pf
->skb_msgname(head_skb
, msg
->msg_name
, addr_len
);
2215 /* Check if we allow SCTP_NXTINFO. */
2216 if (sp
->recvnxtinfo
)
2217 sctp_ulpevent_read_nxtinfo(event
, msg
, sk
);
2218 /* Check if we allow SCTP_RCVINFO. */
2219 if (sp
->recvrcvinfo
)
2220 sctp_ulpevent_read_rcvinfo(event
, msg
);
2221 /* Check if we allow SCTP_SNDRCVINFO. */
2222 if (sctp_ulpevent_type_enabled(sp
->subscribe
, SCTP_DATA_IO_EVENT
))
2223 sctp_ulpevent_read_sndrcvinfo(event
, msg
);
2227 /* If skb's length exceeds the user's buffer, update the skb and
2228 * push it back to the receive_queue so that the next call to
2229 * recvmsg() will return the remaining data. Don't set MSG_EOR.
2231 if (skb_len
> copied
) {
2232 msg
->msg_flags
&= ~MSG_EOR
;
2233 if (flags
& MSG_PEEK
)
2235 sctp_skb_pull(skb
, copied
);
2236 skb_queue_head(&sk
->sk_receive_queue
, skb
);
2238 /* When only partial message is copied to the user, increase
2239 * rwnd by that amount. If all the data in the skb is read,
2240 * rwnd is updated when the event is freed.
2242 if (!sctp_ulpevent_is_notification(event
))
2243 sctp_assoc_rwnd_increase(event
->asoc
, copied
);
2245 } else if ((event
->msg_flags
& MSG_NOTIFICATION
) ||
2246 (event
->msg_flags
& MSG_EOR
))
2247 msg
->msg_flags
|= MSG_EOR
;
2249 msg
->msg_flags
&= ~MSG_EOR
;
2252 if (flags
& MSG_PEEK
) {
2253 /* Release the skb reference acquired after peeking the skb in
2254 * sctp_skb_recv_datagram().
2258 /* Free the event which includes releasing the reference to
2259 * the owner of the skb, freeing the skb and updating the
2262 sctp_ulpevent_free(event
);
2269 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
2271 * This option is a on/off flag. If enabled no SCTP message
2272 * fragmentation will be performed. Instead if a message being sent
2273 * exceeds the current PMTU size, the message will NOT be sent and
2274 * instead a error will be indicated to the user.
2276 static int sctp_setsockopt_disable_fragments(struct sock
*sk
,
2277 char __user
*optval
,
2278 unsigned int optlen
)
2282 if (optlen
< sizeof(int))
2285 if (get_user(val
, (int __user
*)optval
))
2288 sctp_sk(sk
)->disable_fragments
= (val
== 0) ? 0 : 1;
2293 static int sctp_setsockopt_events(struct sock
*sk
, char __user
*optval
,
2294 unsigned int optlen
)
2296 struct sctp_event_subscribe subscribe
;
2297 __u8
*sn_type
= (__u8
*)&subscribe
;
2298 struct sctp_sock
*sp
= sctp_sk(sk
);
2299 struct sctp_association
*asoc
;
2302 if (optlen
> sizeof(struct sctp_event_subscribe
))
2305 if (copy_from_user(&subscribe
, optval
, optlen
))
2308 for (i
= 0; i
< optlen
; i
++)
2309 sctp_ulpevent_type_set(&sp
->subscribe
, SCTP_SN_TYPE_BASE
+ i
,
2312 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
)
2313 asoc
->subscribe
= sctp_sk(sk
)->subscribe
;
2315 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2316 * if there is no data to be sent or retransmit, the stack will
2317 * immediately send up this notification.
2319 if (sctp_ulpevent_type_enabled(sp
->subscribe
, SCTP_SENDER_DRY_EVENT
)) {
2320 struct sctp_ulpevent
*event
;
2322 asoc
= sctp_id2assoc(sk
, 0);
2323 if (asoc
&& sctp_outq_is_empty(&asoc
->outqueue
)) {
2324 event
= sctp_ulpevent_make_sender_dry_event(asoc
,
2325 GFP_USER
| __GFP_NOWARN
);
2329 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
, event
);
2336 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
2338 * This socket option is applicable to the UDP-style socket only. When
2339 * set it will cause associations that are idle for more than the
2340 * specified number of seconds to automatically close. An association
2341 * being idle is defined an association that has NOT sent or received
2342 * user data. The special value of '0' indicates that no automatic
2343 * close of any associations should be performed. The option expects an
2344 * integer defining the number of seconds of idle time before an
2345 * association is closed.
2347 static int sctp_setsockopt_autoclose(struct sock
*sk
, char __user
*optval
,
2348 unsigned int optlen
)
2350 struct sctp_sock
*sp
= sctp_sk(sk
);
2351 struct net
*net
= sock_net(sk
);
2353 /* Applicable to UDP-style socket only */
2354 if (sctp_style(sk
, TCP
))
2356 if (optlen
!= sizeof(int))
2358 if (copy_from_user(&sp
->autoclose
, optval
, optlen
))
2361 if (sp
->autoclose
> net
->sctp
.max_autoclose
)
2362 sp
->autoclose
= net
->sctp
.max_autoclose
;
2367 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
2369 * Applications can enable or disable heartbeats for any peer address of
2370 * an association, modify an address's heartbeat interval, force a
2371 * heartbeat to be sent immediately, and adjust the address's maximum
2372 * number of retransmissions sent before an address is considered
2373 * unreachable. The following structure is used to access and modify an
2374 * address's parameters:
2376 * struct sctp_paddrparams {
2377 * sctp_assoc_t spp_assoc_id;
2378 * struct sockaddr_storage spp_address;
2379 * uint32_t spp_hbinterval;
2380 * uint16_t spp_pathmaxrxt;
2381 * uint32_t spp_pathmtu;
2382 * uint32_t spp_sackdelay;
2383 * uint32_t spp_flags;
2384 * uint32_t spp_ipv6_flowlabel;
2388 * spp_assoc_id - (one-to-many style socket) This is filled in the
2389 * application, and identifies the association for
2391 * spp_address - This specifies which address is of interest.
2392 * spp_hbinterval - This contains the value of the heartbeat interval,
2393 * in milliseconds. If a value of zero
2394 * is present in this field then no changes are to
2395 * be made to this parameter.
2396 * spp_pathmaxrxt - This contains the maximum number of
2397 * retransmissions before this address shall be
2398 * considered unreachable. If a value of zero
2399 * is present in this field then no changes are to
2400 * be made to this parameter.
2401 * spp_pathmtu - When Path MTU discovery is disabled the value
2402 * specified here will be the "fixed" path mtu.
2403 * Note that if the spp_address field is empty
2404 * then all associations on this address will
2405 * have this fixed path mtu set upon them.
2407 * spp_sackdelay - When delayed sack is enabled, this value specifies
2408 * the number of milliseconds that sacks will be delayed
2409 * for. This value will apply to all addresses of an
2410 * association if the spp_address field is empty. Note
2411 * also, that if delayed sack is enabled and this
2412 * value is set to 0, no change is made to the last
2413 * recorded delayed sack timer value.
2415 * spp_flags - These flags are used to control various features
2416 * on an association. The flag field may contain
2417 * zero or more of the following options.
2419 * SPP_HB_ENABLE - Enable heartbeats on the
2420 * specified address. Note that if the address
2421 * field is empty all addresses for the association
2422 * have heartbeats enabled upon them.
2424 * SPP_HB_DISABLE - Disable heartbeats on the
2425 * speicifed address. Note that if the address
2426 * field is empty all addresses for the association
2427 * will have their heartbeats disabled. Note also
2428 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
2429 * mutually exclusive, only one of these two should
2430 * be specified. Enabling both fields will have
2431 * undetermined results.
2433 * SPP_HB_DEMAND - Request a user initiated heartbeat
2434 * to be made immediately.
2436 * SPP_HB_TIME_IS_ZERO - Specify's that the time for
2437 * heartbeat delayis to be set to the value of 0
2440 * SPP_PMTUD_ENABLE - This field will enable PMTU
2441 * discovery upon the specified address. Note that
2442 * if the address feild is empty then all addresses
2443 * on the association are effected.
2445 * SPP_PMTUD_DISABLE - This field will disable PMTU
2446 * discovery upon the specified address. Note that
2447 * if the address feild is empty then all addresses
2448 * on the association are effected. Not also that
2449 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
2450 * exclusive. Enabling both will have undetermined
2453 * SPP_SACKDELAY_ENABLE - Setting this flag turns
2454 * on delayed sack. The time specified in spp_sackdelay
2455 * is used to specify the sack delay for this address. Note
2456 * that if spp_address is empty then all addresses will
2457 * enable delayed sack and take on the sack delay
2458 * value specified in spp_sackdelay.
2459 * SPP_SACKDELAY_DISABLE - Setting this flag turns
2460 * off delayed sack. If the spp_address field is blank then
2461 * delayed sack is disabled for the entire association. Note
2462 * also that this field is mutually exclusive to
2463 * SPP_SACKDELAY_ENABLE, setting both will have undefined
2466 * SPP_IPV6_FLOWLABEL: Setting this flag enables the
2467 * setting of the IPV6 flow label value. The value is
2468 * contained in the spp_ipv6_flowlabel field.
2469 * Upon retrieval, this flag will be set to indicate that
2470 * the spp_ipv6_flowlabel field has a valid value returned.
2471 * If a specific destination address is set (in the
2472 * spp_address field), then the value returned is that of
2473 * the address. If just an association is specified (and
2474 * no address), then the association's default flow label
2475 * is returned. If neither an association nor a destination
2476 * is specified, then the socket's default flow label is
2477 * returned. For non-IPv6 sockets, this flag will be left
2480 * SPP_DSCP: Setting this flag enables the setting of the
2481 * Differentiated Services Code Point (DSCP) value
2482 * associated with either the association or a specific
2483 * address. The value is obtained in the spp_dscp field.
2484 * Upon retrieval, this flag will be set to indicate that
2485 * the spp_dscp field has a valid value returned. If a
2486 * specific destination address is set when called (in the
2487 * spp_address field), then that specific destination
2488 * address's DSCP value is returned. If just an association
2489 * is specified, then the association's default DSCP is
2490 * returned. If neither an association nor a destination is
2491 * specified, then the socket's default DSCP is returned.
2493 * spp_ipv6_flowlabel
2494 * - This field is used in conjunction with the
2495 * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
2496 * The 20 least significant bits are used for the flow
2497 * label. This setting has precedence over any IPv6-layer
2500 * spp_dscp - This field is used in conjunction with the SPP_DSCP flag
2501 * and contains the DSCP. The 6 most significant bits are
2502 * used for the DSCP. This setting has precedence over any
2503 * IPv4- or IPv6- layer setting.
2505 static int sctp_apply_peer_addr_params(struct sctp_paddrparams
*params
,
2506 struct sctp_transport
*trans
,
2507 struct sctp_association
*asoc
,
2508 struct sctp_sock
*sp
,
2511 int sackdelay_change
)
2515 if (params
->spp_flags
& SPP_HB_DEMAND
&& trans
) {
2516 struct net
*net
= sock_net(trans
->asoc
->base
.sk
);
2518 error
= sctp_primitive_REQUESTHEARTBEAT(net
, trans
->asoc
, trans
);
2523 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
2524 * this field is ignored. Note also that a value of zero indicates
2525 * the current setting should be left unchanged.
2527 if (params
->spp_flags
& SPP_HB_ENABLE
) {
2529 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
2530 * set. This lets us use 0 value when this flag
2533 if (params
->spp_flags
& SPP_HB_TIME_IS_ZERO
)
2534 params
->spp_hbinterval
= 0;
2536 if (params
->spp_hbinterval
||
2537 (params
->spp_flags
& SPP_HB_TIME_IS_ZERO
)) {
2540 msecs_to_jiffies(params
->spp_hbinterval
);
2543 msecs_to_jiffies(params
->spp_hbinterval
);
2545 sp
->hbinterval
= params
->spp_hbinterval
;
2552 trans
->param_flags
=
2553 (trans
->param_flags
& ~SPP_HB
) | hb_change
;
2556 (asoc
->param_flags
& ~SPP_HB
) | hb_change
;
2559 (sp
->param_flags
& ~SPP_HB
) | hb_change
;
2563 /* When Path MTU discovery is disabled the value specified here will
2564 * be the "fixed" path mtu (i.e. the value of the spp_flags field must
2565 * include the flag SPP_PMTUD_DISABLE for this field to have any
2568 if ((params
->spp_flags
& SPP_PMTUD_DISABLE
) && params
->spp_pathmtu
) {
2570 trans
->pathmtu
= params
->spp_pathmtu
;
2571 sctp_assoc_sync_pmtu(asoc
);
2573 sctp_assoc_set_pmtu(asoc
, params
->spp_pathmtu
);
2575 sp
->pathmtu
= params
->spp_pathmtu
;
2581 int update
= (trans
->param_flags
& SPP_PMTUD_DISABLE
) &&
2582 (params
->spp_flags
& SPP_PMTUD_ENABLE
);
2583 trans
->param_flags
=
2584 (trans
->param_flags
& ~SPP_PMTUD
) | pmtud_change
;
2586 sctp_transport_pmtu(trans
, sctp_opt2sk(sp
));
2587 sctp_assoc_sync_pmtu(asoc
);
2591 (asoc
->param_flags
& ~SPP_PMTUD
) | pmtud_change
;
2594 (sp
->param_flags
& ~SPP_PMTUD
) | pmtud_change
;
2598 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
2599 * value of this field is ignored. Note also that a value of zero
2600 * indicates the current setting should be left unchanged.
2602 if ((params
->spp_flags
& SPP_SACKDELAY_ENABLE
) && params
->spp_sackdelay
) {
2605 msecs_to_jiffies(params
->spp_sackdelay
);
2608 msecs_to_jiffies(params
->spp_sackdelay
);
2610 sp
->sackdelay
= params
->spp_sackdelay
;
2614 if (sackdelay_change
) {
2616 trans
->param_flags
=
2617 (trans
->param_flags
& ~SPP_SACKDELAY
) |
2621 (asoc
->param_flags
& ~SPP_SACKDELAY
) |
2625 (sp
->param_flags
& ~SPP_SACKDELAY
) |
2630 /* Note that a value of zero indicates the current setting should be
2633 if (params
->spp_pathmaxrxt
) {
2635 trans
->pathmaxrxt
= params
->spp_pathmaxrxt
;
2637 asoc
->pathmaxrxt
= params
->spp_pathmaxrxt
;
2639 sp
->pathmaxrxt
= params
->spp_pathmaxrxt
;
2643 if (params
->spp_flags
& SPP_IPV6_FLOWLABEL
) {
2645 if (trans
->ipaddr
.sa
.sa_family
== AF_INET6
) {
2646 trans
->flowlabel
= params
->spp_ipv6_flowlabel
&
2647 SCTP_FLOWLABEL_VAL_MASK
;
2648 trans
->flowlabel
|= SCTP_FLOWLABEL_SET_MASK
;
2651 struct sctp_transport
*t
;
2653 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
2655 if (t
->ipaddr
.sa
.sa_family
!= AF_INET6
)
2657 t
->flowlabel
= params
->spp_ipv6_flowlabel
&
2658 SCTP_FLOWLABEL_VAL_MASK
;
2659 t
->flowlabel
|= SCTP_FLOWLABEL_SET_MASK
;
2661 asoc
->flowlabel
= params
->spp_ipv6_flowlabel
&
2662 SCTP_FLOWLABEL_VAL_MASK
;
2663 asoc
->flowlabel
|= SCTP_FLOWLABEL_SET_MASK
;
2664 } else if (sctp_opt2sk(sp
)->sk_family
== AF_INET6
) {
2665 sp
->flowlabel
= params
->spp_ipv6_flowlabel
&
2666 SCTP_FLOWLABEL_VAL_MASK
;
2667 sp
->flowlabel
|= SCTP_FLOWLABEL_SET_MASK
;
2671 if (params
->spp_flags
& SPP_DSCP
) {
2673 trans
->dscp
= params
->spp_dscp
& SCTP_DSCP_VAL_MASK
;
2674 trans
->dscp
|= SCTP_DSCP_SET_MASK
;
2676 struct sctp_transport
*t
;
2678 list_for_each_entry(t
, &asoc
->peer
.transport_addr_list
,
2680 t
->dscp
= params
->spp_dscp
&
2682 t
->dscp
|= SCTP_DSCP_SET_MASK
;
2684 asoc
->dscp
= params
->spp_dscp
& SCTP_DSCP_VAL_MASK
;
2685 asoc
->dscp
|= SCTP_DSCP_SET_MASK
;
2687 sp
->dscp
= params
->spp_dscp
& SCTP_DSCP_VAL_MASK
;
2688 sp
->dscp
|= SCTP_DSCP_SET_MASK
;
2695 static int sctp_setsockopt_peer_addr_params(struct sock
*sk
,
2696 char __user
*optval
,
2697 unsigned int optlen
)
2699 struct sctp_paddrparams params
;
2700 struct sctp_transport
*trans
= NULL
;
2701 struct sctp_association
*asoc
= NULL
;
2702 struct sctp_sock
*sp
= sctp_sk(sk
);
2704 int hb_change
, pmtud_change
, sackdelay_change
;
2706 if (optlen
== sizeof(params
)) {
2707 if (copy_from_user(¶ms
, optval
, optlen
))
2709 } else if (optlen
== ALIGN(offsetof(struct sctp_paddrparams
,
2710 spp_ipv6_flowlabel
), 4)) {
2711 if (copy_from_user(¶ms
, optval
, optlen
))
2713 if (params
.spp_flags
& (SPP_DSCP
| SPP_IPV6_FLOWLABEL
))
2719 /* Validate flags and value parameters. */
2720 hb_change
= params
.spp_flags
& SPP_HB
;
2721 pmtud_change
= params
.spp_flags
& SPP_PMTUD
;
2722 sackdelay_change
= params
.spp_flags
& SPP_SACKDELAY
;
2724 if (hb_change
== SPP_HB
||
2725 pmtud_change
== SPP_PMTUD
||
2726 sackdelay_change
== SPP_SACKDELAY
||
2727 params
.spp_sackdelay
> 500 ||
2728 (params
.spp_pathmtu
&&
2729 params
.spp_pathmtu
< SCTP_DEFAULT_MINSEGMENT
))
2732 /* If an address other than INADDR_ANY is specified, and
2733 * no transport is found, then the request is invalid.
2735 if (!sctp_is_any(sk
, (union sctp_addr
*)¶ms
.spp_address
)) {
2736 trans
= sctp_addr_id2transport(sk
, ¶ms
.spp_address
,
2737 params
.spp_assoc_id
);
2742 /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
2743 * socket is a one to many style socket, and an association
2744 * was not found, then the id was invalid.
2746 asoc
= sctp_id2assoc(sk
, params
.spp_assoc_id
);
2747 if (!asoc
&& params
.spp_assoc_id
!= SCTP_FUTURE_ASSOC
&&
2748 sctp_style(sk
, UDP
))
2751 /* Heartbeat demand can only be sent on a transport or
2752 * association, but not a socket.
2754 if (params
.spp_flags
& SPP_HB_DEMAND
&& !trans
&& !asoc
)
2757 /* Process parameters. */
2758 error
= sctp_apply_peer_addr_params(¶ms
, trans
, asoc
, sp
,
2759 hb_change
, pmtud_change
,
2765 /* If changes are for association, also apply parameters to each
2768 if (!trans
&& asoc
) {
2769 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
2771 sctp_apply_peer_addr_params(¶ms
, trans
, asoc
, sp
,
2772 hb_change
, pmtud_change
,
2780 static inline __u32
sctp_spp_sackdelay_enable(__u32 param_flags
)
2782 return (param_flags
& ~SPP_SACKDELAY
) | SPP_SACKDELAY_ENABLE
;
2785 static inline __u32
sctp_spp_sackdelay_disable(__u32 param_flags
)
2787 return (param_flags
& ~SPP_SACKDELAY
) | SPP_SACKDELAY_DISABLE
;
2790 static void sctp_apply_asoc_delayed_ack(struct sctp_sack_info
*params
,
2791 struct sctp_association
*asoc
)
2793 struct sctp_transport
*trans
;
2795 if (params
->sack_delay
) {
2796 asoc
->sackdelay
= msecs_to_jiffies(params
->sack_delay
);
2798 sctp_spp_sackdelay_enable(asoc
->param_flags
);
2800 if (params
->sack_freq
== 1) {
2802 sctp_spp_sackdelay_disable(asoc
->param_flags
);
2803 } else if (params
->sack_freq
> 1) {
2804 asoc
->sackfreq
= params
->sack_freq
;
2806 sctp_spp_sackdelay_enable(asoc
->param_flags
);
2809 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
2811 if (params
->sack_delay
) {
2812 trans
->sackdelay
= msecs_to_jiffies(params
->sack_delay
);
2813 trans
->param_flags
=
2814 sctp_spp_sackdelay_enable(trans
->param_flags
);
2816 if (params
->sack_freq
== 1) {
2817 trans
->param_flags
=
2818 sctp_spp_sackdelay_disable(trans
->param_flags
);
2819 } else if (params
->sack_freq
> 1) {
2820 trans
->sackfreq
= params
->sack_freq
;
2821 trans
->param_flags
=
2822 sctp_spp_sackdelay_enable(trans
->param_flags
);
2828 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
2830 * This option will effect the way delayed acks are performed. This
2831 * option allows you to get or set the delayed ack time, in
2832 * milliseconds. It also allows changing the delayed ack frequency.
2833 * Changing the frequency to 1 disables the delayed sack algorithm. If
2834 * the assoc_id is 0, then this sets or gets the endpoints default
2835 * values. If the assoc_id field is non-zero, then the set or get
2836 * effects the specified association for the one to many model (the
2837 * assoc_id field is ignored by the one to one model). Note that if
2838 * sack_delay or sack_freq are 0 when setting this option, then the
2839 * current values will remain unchanged.
2841 * struct sctp_sack_info {
2842 * sctp_assoc_t sack_assoc_id;
2843 * uint32_t sack_delay;
2844 * uint32_t sack_freq;
2847 * sack_assoc_id - This parameter, indicates which association the user
2848 * is performing an action upon. Note that if this field's value is
2849 * zero then the endpoints default value is changed (effecting future
2850 * associations only).
2852 * sack_delay - This parameter contains the number of milliseconds that
2853 * the user is requesting the delayed ACK timer be set to. Note that
2854 * this value is defined in the standard to be between 200 and 500
2857 * sack_freq - This parameter contains the number of packets that must
2858 * be received before a sack is sent without waiting for the delay
2859 * timer to expire. The default value for this is 2, setting this
2860 * value to 1 will disable the delayed sack algorithm.
2863 static int sctp_setsockopt_delayed_ack(struct sock
*sk
,
2864 char __user
*optval
, unsigned int optlen
)
2866 struct sctp_sock
*sp
= sctp_sk(sk
);
2867 struct sctp_association
*asoc
;
2868 struct sctp_sack_info params
;
2870 if (optlen
== sizeof(struct sctp_sack_info
)) {
2871 if (copy_from_user(¶ms
, optval
, optlen
))
2874 if (params
.sack_delay
== 0 && params
.sack_freq
== 0)
2876 } else if (optlen
== sizeof(struct sctp_assoc_value
)) {
2877 pr_warn_ratelimited(DEPRECATED
2879 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
2880 "Use struct sctp_sack_info instead\n",
2881 current
->comm
, task_pid_nr(current
));
2882 if (copy_from_user(¶ms
, optval
, optlen
))
2885 if (params
.sack_delay
== 0)
2886 params
.sack_freq
= 1;
2888 params
.sack_freq
= 0;
2892 /* Validate value parameter. */
2893 if (params
.sack_delay
> 500)
2896 /* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
2897 * socket is a one to many style socket, and an association
2898 * was not found, then the id was invalid.
2900 asoc
= sctp_id2assoc(sk
, params
.sack_assoc_id
);
2901 if (!asoc
&& params
.sack_assoc_id
> SCTP_ALL_ASSOC
&&
2902 sctp_style(sk
, UDP
))
2906 sctp_apply_asoc_delayed_ack(¶ms
, asoc
);
2911 if (sctp_style(sk
, TCP
))
2912 params
.sack_assoc_id
= SCTP_FUTURE_ASSOC
;
2914 if (params
.sack_assoc_id
== SCTP_FUTURE_ASSOC
||
2915 params
.sack_assoc_id
== SCTP_ALL_ASSOC
) {
2916 if (params
.sack_delay
) {
2917 sp
->sackdelay
= params
.sack_delay
;
2919 sctp_spp_sackdelay_enable(sp
->param_flags
);
2921 if (params
.sack_freq
== 1) {
2923 sctp_spp_sackdelay_disable(sp
->param_flags
);
2924 } else if (params
.sack_freq
> 1) {
2925 sp
->sackfreq
= params
.sack_freq
;
2927 sctp_spp_sackdelay_enable(sp
->param_flags
);
2931 if (params
.sack_assoc_id
== SCTP_CURRENT_ASSOC
||
2932 params
.sack_assoc_id
== SCTP_ALL_ASSOC
)
2933 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
)
2934 sctp_apply_asoc_delayed_ack(¶ms
, asoc
);
2939 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
2941 * Applications can specify protocol parameters for the default association
2942 * initialization. The option name argument to setsockopt() and getsockopt()
2945 * Setting initialization parameters is effective only on an unconnected
2946 * socket (for UDP-style sockets only future associations are effected
2947 * by the change). With TCP-style sockets, this option is inherited by
2948 * sockets derived from a listener socket.
2950 static int sctp_setsockopt_initmsg(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
2952 struct sctp_initmsg sinit
;
2953 struct sctp_sock
*sp
= sctp_sk(sk
);
2955 if (optlen
!= sizeof(struct sctp_initmsg
))
2957 if (copy_from_user(&sinit
, optval
, optlen
))
2960 if (sinit
.sinit_num_ostreams
)
2961 sp
->initmsg
.sinit_num_ostreams
= sinit
.sinit_num_ostreams
;
2962 if (sinit
.sinit_max_instreams
)
2963 sp
->initmsg
.sinit_max_instreams
= sinit
.sinit_max_instreams
;
2964 if (sinit
.sinit_max_attempts
)
2965 sp
->initmsg
.sinit_max_attempts
= sinit
.sinit_max_attempts
;
2966 if (sinit
.sinit_max_init_timeo
)
2967 sp
->initmsg
.sinit_max_init_timeo
= sinit
.sinit_max_init_timeo
;
2973 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
2975 * Applications that wish to use the sendto() system call may wish to
2976 * specify a default set of parameters that would normally be supplied
2977 * through the inclusion of ancillary data. This socket option allows
2978 * such an application to set the default sctp_sndrcvinfo structure.
2979 * The application that wishes to use this socket option simply passes
2980 * in to this call the sctp_sndrcvinfo structure defined in Section
2981 * 5.2.2) The input parameters accepted by this call include
2982 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
2983 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
2984 * to this call if the caller is using the UDP model.
2986 static int sctp_setsockopt_default_send_param(struct sock
*sk
,
2987 char __user
*optval
,
2988 unsigned int optlen
)
2990 struct sctp_sock
*sp
= sctp_sk(sk
);
2991 struct sctp_association
*asoc
;
2992 struct sctp_sndrcvinfo info
;
2994 if (optlen
!= sizeof(info
))
2996 if (copy_from_user(&info
, optval
, optlen
))
2998 if (info
.sinfo_flags
&
2999 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
3000 SCTP_ABORT
| SCTP_EOF
))
3003 asoc
= sctp_id2assoc(sk
, info
.sinfo_assoc_id
);
3004 if (!asoc
&& info
.sinfo_assoc_id
> SCTP_ALL_ASSOC
&&
3005 sctp_style(sk
, UDP
))
3009 asoc
->default_stream
= info
.sinfo_stream
;
3010 asoc
->default_flags
= info
.sinfo_flags
;
3011 asoc
->default_ppid
= info
.sinfo_ppid
;
3012 asoc
->default_context
= info
.sinfo_context
;
3013 asoc
->default_timetolive
= info
.sinfo_timetolive
;
3018 if (sctp_style(sk
, TCP
))
3019 info
.sinfo_assoc_id
= SCTP_FUTURE_ASSOC
;
3021 if (info
.sinfo_assoc_id
== SCTP_FUTURE_ASSOC
||
3022 info
.sinfo_assoc_id
== SCTP_ALL_ASSOC
) {
3023 sp
->default_stream
= info
.sinfo_stream
;
3024 sp
->default_flags
= info
.sinfo_flags
;
3025 sp
->default_ppid
= info
.sinfo_ppid
;
3026 sp
->default_context
= info
.sinfo_context
;
3027 sp
->default_timetolive
= info
.sinfo_timetolive
;
3030 if (info
.sinfo_assoc_id
== SCTP_CURRENT_ASSOC
||
3031 info
.sinfo_assoc_id
== SCTP_ALL_ASSOC
) {
3032 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
) {
3033 asoc
->default_stream
= info
.sinfo_stream
;
3034 asoc
->default_flags
= info
.sinfo_flags
;
3035 asoc
->default_ppid
= info
.sinfo_ppid
;
3036 asoc
->default_context
= info
.sinfo_context
;
3037 asoc
->default_timetolive
= info
.sinfo_timetolive
;
3044 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
3045 * (SCTP_DEFAULT_SNDINFO)
3047 static int sctp_setsockopt_default_sndinfo(struct sock
*sk
,
3048 char __user
*optval
,
3049 unsigned int optlen
)
3051 struct sctp_sock
*sp
= sctp_sk(sk
);
3052 struct sctp_association
*asoc
;
3053 struct sctp_sndinfo info
;
3055 if (optlen
!= sizeof(info
))
3057 if (copy_from_user(&info
, optval
, optlen
))
3059 if (info
.snd_flags
&
3060 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
3061 SCTP_ABORT
| SCTP_EOF
))
3064 asoc
= sctp_id2assoc(sk
, info
.snd_assoc_id
);
3065 if (!asoc
&& info
.snd_assoc_id
> SCTP_ALL_ASSOC
&&
3066 sctp_style(sk
, UDP
))
3070 asoc
->default_stream
= info
.snd_sid
;
3071 asoc
->default_flags
= info
.snd_flags
;
3072 asoc
->default_ppid
= info
.snd_ppid
;
3073 asoc
->default_context
= info
.snd_context
;
3078 if (sctp_style(sk
, TCP
))
3079 info
.snd_assoc_id
= SCTP_FUTURE_ASSOC
;
3081 if (info
.snd_assoc_id
== SCTP_FUTURE_ASSOC
||
3082 info
.snd_assoc_id
== SCTP_ALL_ASSOC
) {
3083 sp
->default_stream
= info
.snd_sid
;
3084 sp
->default_flags
= info
.snd_flags
;
3085 sp
->default_ppid
= info
.snd_ppid
;
3086 sp
->default_context
= info
.snd_context
;
3089 if (info
.snd_assoc_id
== SCTP_CURRENT_ASSOC
||
3090 info
.snd_assoc_id
== SCTP_ALL_ASSOC
) {
3091 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
) {
3092 asoc
->default_stream
= info
.snd_sid
;
3093 asoc
->default_flags
= info
.snd_flags
;
3094 asoc
->default_ppid
= info
.snd_ppid
;
3095 asoc
->default_context
= info
.snd_context
;
3102 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
3104 * Requests that the local SCTP stack use the enclosed peer address as
3105 * the association primary. The enclosed address must be one of the
3106 * association peer's addresses.
3108 static int sctp_setsockopt_primary_addr(struct sock
*sk
, char __user
*optval
,
3109 unsigned int optlen
)
3111 struct sctp_prim prim
;
3112 struct sctp_transport
*trans
;
3116 if (optlen
!= sizeof(struct sctp_prim
))
3119 if (copy_from_user(&prim
, optval
, sizeof(struct sctp_prim
)))
3122 /* Allow security module to validate address but need address len. */
3123 af
= sctp_get_af_specific(prim
.ssp_addr
.ss_family
);
3127 err
= security_sctp_bind_connect(sk
, SCTP_PRIMARY_ADDR
,
3128 (struct sockaddr
*)&prim
.ssp_addr
,
3133 trans
= sctp_addr_id2transport(sk
, &prim
.ssp_addr
, prim
.ssp_assoc_id
);
3137 sctp_assoc_set_primary(trans
->asoc
, trans
);
3143 * 7.1.5 SCTP_NODELAY
3145 * Turn on/off any Nagle-like algorithm. This means that packets are
3146 * generally sent as soon as possible and no unnecessary delays are
3147 * introduced, at the cost of more packets in the network. Expects an
3148 * integer boolean flag.
3150 static int sctp_setsockopt_nodelay(struct sock
*sk
, char __user
*optval
,
3151 unsigned int optlen
)
3155 if (optlen
< sizeof(int))
3157 if (get_user(val
, (int __user
*)optval
))
3160 sctp_sk(sk
)->nodelay
= (val
== 0) ? 0 : 1;
3166 * 7.1.1 SCTP_RTOINFO
3168 * The protocol parameters used to initialize and bound retransmission
3169 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
3170 * and modify these parameters.
3171 * All parameters are time values, in milliseconds. A value of 0, when
3172 * modifying the parameters, indicates that the current value should not
3176 static int sctp_setsockopt_rtoinfo(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
3178 struct sctp_rtoinfo rtoinfo
;
3179 struct sctp_association
*asoc
;
3180 unsigned long rto_min
, rto_max
;
3181 struct sctp_sock
*sp
= sctp_sk(sk
);
3183 if (optlen
!= sizeof (struct sctp_rtoinfo
))
3186 if (copy_from_user(&rtoinfo
, optval
, optlen
))
3189 asoc
= sctp_id2assoc(sk
, rtoinfo
.srto_assoc_id
);
3191 /* Set the values to the specific association */
3192 if (!asoc
&& rtoinfo
.srto_assoc_id
!= SCTP_FUTURE_ASSOC
&&
3193 sctp_style(sk
, UDP
))
3196 rto_max
= rtoinfo
.srto_max
;
3197 rto_min
= rtoinfo
.srto_min
;
3200 rto_max
= asoc
? msecs_to_jiffies(rto_max
) : rto_max
;
3202 rto_max
= asoc
? asoc
->rto_max
: sp
->rtoinfo
.srto_max
;
3205 rto_min
= asoc
? msecs_to_jiffies(rto_min
) : rto_min
;
3207 rto_min
= asoc
? asoc
->rto_min
: sp
->rtoinfo
.srto_min
;
3209 if (rto_min
> rto_max
)
3213 if (rtoinfo
.srto_initial
!= 0)
3215 msecs_to_jiffies(rtoinfo
.srto_initial
);
3216 asoc
->rto_max
= rto_max
;
3217 asoc
->rto_min
= rto_min
;
3219 /* If there is no association or the association-id = 0
3220 * set the values to the endpoint.
3222 if (rtoinfo
.srto_initial
!= 0)
3223 sp
->rtoinfo
.srto_initial
= rtoinfo
.srto_initial
;
3224 sp
->rtoinfo
.srto_max
= rto_max
;
3225 sp
->rtoinfo
.srto_min
= rto_min
;
3233 * 7.1.2 SCTP_ASSOCINFO
3235 * This option is used to tune the maximum retransmission attempts
3236 * of the association.
3237 * Returns an error if the new association retransmission value is
3238 * greater than the sum of the retransmission value of the peer.
3239 * See [SCTP] for more information.
3242 static int sctp_setsockopt_associnfo(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
3245 struct sctp_assocparams assocparams
;
3246 struct sctp_association
*asoc
;
3248 if (optlen
!= sizeof(struct sctp_assocparams
))
3250 if (copy_from_user(&assocparams
, optval
, optlen
))
3253 asoc
= sctp_id2assoc(sk
, assocparams
.sasoc_assoc_id
);
3255 if (!asoc
&& assocparams
.sasoc_assoc_id
!= SCTP_FUTURE_ASSOC
&&
3256 sctp_style(sk
, UDP
))
3259 /* Set the values to the specific association */
3261 if (assocparams
.sasoc_asocmaxrxt
!= 0) {
3264 struct sctp_transport
*peer_addr
;
3266 list_for_each_entry(peer_addr
, &asoc
->peer
.transport_addr_list
,
3268 path_sum
+= peer_addr
->pathmaxrxt
;
3272 /* Only validate asocmaxrxt if we have more than
3273 * one path/transport. We do this because path
3274 * retransmissions are only counted when we have more
3278 assocparams
.sasoc_asocmaxrxt
> path_sum
)
3281 asoc
->max_retrans
= assocparams
.sasoc_asocmaxrxt
;
3284 if (assocparams
.sasoc_cookie_life
!= 0)
3285 asoc
->cookie_life
= ms_to_ktime(assocparams
.sasoc_cookie_life
);
3287 /* Set the values to the endpoint */
3288 struct sctp_sock
*sp
= sctp_sk(sk
);
3290 if (assocparams
.sasoc_asocmaxrxt
!= 0)
3291 sp
->assocparams
.sasoc_asocmaxrxt
=
3292 assocparams
.sasoc_asocmaxrxt
;
3293 if (assocparams
.sasoc_cookie_life
!= 0)
3294 sp
->assocparams
.sasoc_cookie_life
=
3295 assocparams
.sasoc_cookie_life
;
3301 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
3303 * This socket option is a boolean flag which turns on or off mapped V4
3304 * addresses. If this option is turned on and the socket is type
3305 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
3306 * If this option is turned off, then no mapping will be done of V4
3307 * addresses and a user will receive both PF_INET6 and PF_INET type
3308 * addresses on the socket.
3310 static int sctp_setsockopt_mappedv4(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
3313 struct sctp_sock
*sp
= sctp_sk(sk
);
3315 if (optlen
< sizeof(int))
3317 if (get_user(val
, (int __user
*)optval
))
3328 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
3329 * This option will get or set the maximum size to put in any outgoing
3330 * SCTP DATA chunk. If a message is larger than this size it will be
3331 * fragmented by SCTP into the specified size. Note that the underlying
3332 * SCTP implementation may fragment into smaller sized chunks when the
3333 * PMTU of the underlying association is smaller than the value set by
3334 * the user. The default value for this option is '0' which indicates
3335 * the user is NOT limiting fragmentation and only the PMTU will effect
3336 * SCTP's choice of DATA chunk size. Note also that values set larger
3337 * than the maximum size of an IP datagram will effectively let SCTP
3338 * control fragmentation (i.e. the same as setting this option to 0).
3340 * The following structure is used to access and modify this parameter:
3342 * struct sctp_assoc_value {
3343 * sctp_assoc_t assoc_id;
3344 * uint32_t assoc_value;
3347 * assoc_id: This parameter is ignored for one-to-one style sockets.
3348 * For one-to-many style sockets this parameter indicates which
3349 * association the user is performing an action upon. Note that if
3350 * this field's value is zero then the endpoints default value is
3351 * changed (effecting future associations only).
3352 * assoc_value: This parameter specifies the maximum size in bytes.
3354 static int sctp_setsockopt_maxseg(struct sock
*sk
, char __user
*optval
, unsigned int optlen
)
3356 struct sctp_sock
*sp
= sctp_sk(sk
);
3357 struct sctp_assoc_value params
;
3358 struct sctp_association
*asoc
;
3361 if (optlen
== sizeof(int)) {
3362 pr_warn_ratelimited(DEPRECATED
3364 "Use of int in maxseg socket option.\n"
3365 "Use struct sctp_assoc_value instead\n",
3366 current
->comm
, task_pid_nr(current
));
3367 if (copy_from_user(&val
, optval
, optlen
))
3369 params
.assoc_id
= SCTP_FUTURE_ASSOC
;
3370 } else if (optlen
== sizeof(struct sctp_assoc_value
)) {
3371 if (copy_from_user(¶ms
, optval
, optlen
))
3373 val
= params
.assoc_value
;
3378 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
3379 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
3380 sctp_style(sk
, UDP
))
3384 int min_len
, max_len
;
3385 __u16 datasize
= asoc
? sctp_datachk_len(&asoc
->stream
) :
3386 sizeof(struct sctp_data_chunk
);
3388 min_len
= sctp_min_frag_point(sp
, datasize
);
3389 max_len
= SCTP_MAX_CHUNK_LEN
- datasize
;
3391 if (val
< min_len
|| val
> max_len
)
3396 asoc
->user_frag
= val
;
3397 sctp_assoc_update_frag_point(asoc
);
3399 sp
->user_frag
= val
;
3407 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
3409 * Requests that the peer mark the enclosed address as the association
3410 * primary. The enclosed address must be one of the association's
3411 * locally bound addresses. The following structure is used to make a
3412 * set primary request:
3414 static int sctp_setsockopt_peer_primary_addr(struct sock
*sk
, char __user
*optval
,
3415 unsigned int optlen
)
3417 struct net
*net
= sock_net(sk
);
3418 struct sctp_sock
*sp
;
3419 struct sctp_association
*asoc
= NULL
;
3420 struct sctp_setpeerprim prim
;
3421 struct sctp_chunk
*chunk
;
3427 if (!net
->sctp
.addip_enable
)
3430 if (optlen
!= sizeof(struct sctp_setpeerprim
))
3433 if (copy_from_user(&prim
, optval
, optlen
))
3436 asoc
= sctp_id2assoc(sk
, prim
.sspp_assoc_id
);
3440 if (!asoc
->peer
.asconf_capable
)
3443 if (asoc
->peer
.addip_disabled_mask
& SCTP_PARAM_SET_PRIMARY
)
3446 if (!sctp_state(asoc
, ESTABLISHED
))
3449 af
= sctp_get_af_specific(prim
.sspp_addr
.ss_family
);
3453 if (!af
->addr_valid((union sctp_addr
*)&prim
.sspp_addr
, sp
, NULL
))
3454 return -EADDRNOTAVAIL
;
3456 if (!sctp_assoc_lookup_laddr(asoc
, (union sctp_addr
*)&prim
.sspp_addr
))
3457 return -EADDRNOTAVAIL
;
3459 /* Allow security module to validate address. */
3460 err
= security_sctp_bind_connect(sk
, SCTP_SET_PEER_PRIMARY_ADDR
,
3461 (struct sockaddr
*)&prim
.sspp_addr
,
3466 /* Create an ASCONF chunk with SET_PRIMARY parameter */
3467 chunk
= sctp_make_asconf_set_prim(asoc
,
3468 (union sctp_addr
*)&prim
.sspp_addr
);
3472 err
= sctp_send_asconf(asoc
, chunk
);
3474 pr_debug("%s: we set peer primary addr primitively\n", __func__
);
3479 static int sctp_setsockopt_adaptation_layer(struct sock
*sk
, char __user
*optval
,
3480 unsigned int optlen
)
3482 struct sctp_setadaptation adaptation
;
3484 if (optlen
!= sizeof(struct sctp_setadaptation
))
3486 if (copy_from_user(&adaptation
, optval
, optlen
))
3489 sctp_sk(sk
)->adaptation_ind
= adaptation
.ssb_adaptation_ind
;
3495 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
3497 * The context field in the sctp_sndrcvinfo structure is normally only
3498 * used when a failed message is retrieved holding the value that was
3499 * sent down on the actual send call. This option allows the setting of
3500 * a default context on an association basis that will be received on
3501 * reading messages from the peer. This is especially helpful in the
3502 * one-2-many model for an application to keep some reference to an
3503 * internal state machine that is processing messages on the
3504 * association. Note that the setting of this value only effects
3505 * received messages from the peer and does not effect the value that is
3506 * saved with outbound messages.
3508 static int sctp_setsockopt_context(struct sock
*sk
, char __user
*optval
,
3509 unsigned int optlen
)
3511 struct sctp_sock
*sp
= sctp_sk(sk
);
3512 struct sctp_assoc_value params
;
3513 struct sctp_association
*asoc
;
3515 if (optlen
!= sizeof(struct sctp_assoc_value
))
3517 if (copy_from_user(¶ms
, optval
, optlen
))
3520 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
3521 if (!asoc
&& params
.assoc_id
> SCTP_ALL_ASSOC
&&
3522 sctp_style(sk
, UDP
))
3526 asoc
->default_rcv_context
= params
.assoc_value
;
3531 if (sctp_style(sk
, TCP
))
3532 params
.assoc_id
= SCTP_FUTURE_ASSOC
;
3534 if (params
.assoc_id
== SCTP_FUTURE_ASSOC
||
3535 params
.assoc_id
== SCTP_ALL_ASSOC
)
3536 sp
->default_rcv_context
= params
.assoc_value
;
3538 if (params
.assoc_id
== SCTP_CURRENT_ASSOC
||
3539 params
.assoc_id
== SCTP_ALL_ASSOC
)
3540 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
)
3541 asoc
->default_rcv_context
= params
.assoc_value
;
3547 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
3549 * This options will at a minimum specify if the implementation is doing
3550 * fragmented interleave. Fragmented interleave, for a one to many
3551 * socket, is when subsequent calls to receive a message may return
3552 * parts of messages from different associations. Some implementations
3553 * may allow you to turn this value on or off. If so, when turned off,
3554 * no fragment interleave will occur (which will cause a head of line
3555 * blocking amongst multiple associations sharing the same one to many
3556 * socket). When this option is turned on, then each receive call may
3557 * come from a different association (thus the user must receive data
3558 * with the extended calls (e.g. sctp_recvmsg) to keep track of which
3559 * association each receive belongs to.
3561 * This option takes a boolean value. A non-zero value indicates that
3562 * fragmented interleave is on. A value of zero indicates that
3563 * fragmented interleave is off.
3565 * Note that it is important that an implementation that allows this
3566 * option to be turned on, have it off by default. Otherwise an unaware
3567 * application using the one to many model may become confused and act
3570 static int sctp_setsockopt_fragment_interleave(struct sock
*sk
,
3571 char __user
*optval
,
3572 unsigned int optlen
)
3576 if (optlen
!= sizeof(int))
3578 if (get_user(val
, (int __user
*)optval
))
3581 sctp_sk(sk
)->frag_interleave
= !!val
;
3583 if (!sctp_sk(sk
)->frag_interleave
)
3584 sctp_sk(sk
)->ep
->intl_enable
= 0;
3590 * 8.1.21. Set or Get the SCTP Partial Delivery Point
3591 * (SCTP_PARTIAL_DELIVERY_POINT)
3593 * This option will set or get the SCTP partial delivery point. This
3594 * point is the size of a message where the partial delivery API will be
3595 * invoked to help free up rwnd space for the peer. Setting this to a
3596 * lower value will cause partial deliveries to happen more often. The
3597 * calls argument is an integer that sets or gets the partial delivery
3598 * point. Note also that the call will fail if the user attempts to set
3599 * this value larger than the socket receive buffer size.
3601 * Note that any single message having a length smaller than or equal to
3602 * the SCTP partial delivery point will be delivered in one single read
3603 * call as long as the user provided buffer is large enough to hold the
3606 static int sctp_setsockopt_partial_delivery_point(struct sock
*sk
,
3607 char __user
*optval
,
3608 unsigned int optlen
)
3612 if (optlen
!= sizeof(u32
))
3614 if (get_user(val
, (int __user
*)optval
))
3617 /* Note: We double the receive buffer from what the user sets
3618 * it to be, also initial rwnd is based on rcvbuf/2.
3620 if (val
> (sk
->sk_rcvbuf
>> 1))
3623 sctp_sk(sk
)->pd_point
= val
;
3625 return 0; /* is this the right error code? */
3629 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
3631 * This option will allow a user to change the maximum burst of packets
3632 * that can be emitted by this association. Note that the default value
3633 * is 4, and some implementations may restrict this setting so that it
3634 * can only be lowered.
3636 * NOTE: This text doesn't seem right. Do this on a socket basis with
3637 * future associations inheriting the socket value.
3639 static int sctp_setsockopt_maxburst(struct sock
*sk
,
3640 char __user
*optval
,
3641 unsigned int optlen
)
3643 struct sctp_sock
*sp
= sctp_sk(sk
);
3644 struct sctp_assoc_value params
;
3645 struct sctp_association
*asoc
;
3647 if (optlen
== sizeof(int)) {
3648 pr_warn_ratelimited(DEPRECATED
3650 "Use of int in max_burst socket option deprecated.\n"
3651 "Use struct sctp_assoc_value instead\n",
3652 current
->comm
, task_pid_nr(current
));
3653 if (copy_from_user(¶ms
.assoc_value
, optval
, optlen
))
3655 params
.assoc_id
= SCTP_FUTURE_ASSOC
;
3656 } else if (optlen
== sizeof(struct sctp_assoc_value
)) {
3657 if (copy_from_user(¶ms
, optval
, optlen
))
3662 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
3663 if (!asoc
&& params
.assoc_id
> SCTP_ALL_ASSOC
&&
3664 sctp_style(sk
, UDP
))
3668 asoc
->max_burst
= params
.assoc_value
;
3673 if (sctp_style(sk
, TCP
))
3674 params
.assoc_id
= SCTP_FUTURE_ASSOC
;
3676 if (params
.assoc_id
== SCTP_FUTURE_ASSOC
||
3677 params
.assoc_id
== SCTP_ALL_ASSOC
)
3678 sp
->max_burst
= params
.assoc_value
;
3680 if (params
.assoc_id
== SCTP_CURRENT_ASSOC
||
3681 params
.assoc_id
== SCTP_ALL_ASSOC
)
3682 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
)
3683 asoc
->max_burst
= params
.assoc_value
;
3689 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
3691 * This set option adds a chunk type that the user is requesting to be
3692 * received only in an authenticated way. Changes to the list of chunks
3693 * will only effect future associations on the socket.
3695 static int sctp_setsockopt_auth_chunk(struct sock
*sk
,
3696 char __user
*optval
,
3697 unsigned int optlen
)
3699 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3700 struct sctp_authchunk val
;
3702 if (!ep
->auth_enable
)
3705 if (optlen
!= sizeof(struct sctp_authchunk
))
3707 if (copy_from_user(&val
, optval
, optlen
))
3710 switch (val
.sauth_chunk
) {
3712 case SCTP_CID_INIT_ACK
:
3713 case SCTP_CID_SHUTDOWN_COMPLETE
:
3718 /* add this chunk id to the endpoint */
3719 return sctp_auth_ep_add_chunkid(ep
, val
.sauth_chunk
);
3723 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
3725 * This option gets or sets the list of HMAC algorithms that the local
3726 * endpoint requires the peer to use.
3728 static int sctp_setsockopt_hmac_ident(struct sock
*sk
,
3729 char __user
*optval
,
3730 unsigned int optlen
)
3732 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3733 struct sctp_hmacalgo
*hmacs
;
3737 if (!ep
->auth_enable
)
3740 if (optlen
< sizeof(struct sctp_hmacalgo
))
3742 optlen
= min_t(unsigned int, optlen
, sizeof(struct sctp_hmacalgo
) +
3743 SCTP_AUTH_NUM_HMACS
* sizeof(u16
));
3745 hmacs
= memdup_user(optval
, optlen
);
3747 return PTR_ERR(hmacs
);
3749 idents
= hmacs
->shmac_num_idents
;
3750 if (idents
== 0 || idents
> SCTP_AUTH_NUM_HMACS
||
3751 (idents
* sizeof(u16
)) > (optlen
- sizeof(struct sctp_hmacalgo
))) {
3756 err
= sctp_auth_ep_set_hmacs(ep
, hmacs
);
3763 * 7.1.20. Set a shared key (SCTP_AUTH_KEY)
3765 * This option will set a shared secret key which is used to build an
3766 * association shared key.
3768 static int sctp_setsockopt_auth_key(struct sock
*sk
,
3769 char __user
*optval
,
3770 unsigned int optlen
)
3772 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3773 struct sctp_authkey
*authkey
;
3774 struct sctp_association
*asoc
;
3777 if (!ep
->auth_enable
)
3780 if (optlen
<= sizeof(struct sctp_authkey
))
3782 /* authkey->sca_keylength is u16, so optlen can't be bigger than
3785 optlen
= min_t(unsigned int, optlen
, USHRT_MAX
+ sizeof(*authkey
));
3787 authkey
= memdup_user(optval
, optlen
);
3788 if (IS_ERR(authkey
))
3789 return PTR_ERR(authkey
);
3791 if (authkey
->sca_keylength
> optlen
- sizeof(*authkey
))
3794 asoc
= sctp_id2assoc(sk
, authkey
->sca_assoc_id
);
3795 if (!asoc
&& authkey
->sca_assoc_id
> SCTP_ALL_ASSOC
&&
3796 sctp_style(sk
, UDP
))
3800 ret
= sctp_auth_set_key(ep
, asoc
, authkey
);
3804 if (sctp_style(sk
, TCP
))
3805 authkey
->sca_assoc_id
= SCTP_FUTURE_ASSOC
;
3807 if (authkey
->sca_assoc_id
== SCTP_FUTURE_ASSOC
||
3808 authkey
->sca_assoc_id
== SCTP_ALL_ASSOC
) {
3809 ret
= sctp_auth_set_key(ep
, asoc
, authkey
);
3816 if (authkey
->sca_assoc_id
== SCTP_CURRENT_ASSOC
||
3817 authkey
->sca_assoc_id
== SCTP_ALL_ASSOC
) {
3818 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
3819 int res
= sctp_auth_set_key(ep
, asoc
, authkey
);
3832 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
3834 * This option will get or set the active shared key to be used to build
3835 * the association shared key.
3837 static int sctp_setsockopt_active_key(struct sock
*sk
,
3838 char __user
*optval
,
3839 unsigned int optlen
)
3841 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3842 struct sctp_association
*asoc
;
3843 struct sctp_authkeyid val
;
3846 if (!ep
->auth_enable
)
3849 if (optlen
!= sizeof(struct sctp_authkeyid
))
3851 if (copy_from_user(&val
, optval
, optlen
))
3854 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
3855 if (!asoc
&& val
.scact_assoc_id
> SCTP_ALL_ASSOC
&&
3856 sctp_style(sk
, UDP
))
3860 return sctp_auth_set_active_key(ep
, asoc
, val
.scact_keynumber
);
3862 if (sctp_style(sk
, TCP
))
3863 val
.scact_assoc_id
= SCTP_FUTURE_ASSOC
;
3865 if (val
.scact_assoc_id
== SCTP_FUTURE_ASSOC
||
3866 val
.scact_assoc_id
== SCTP_ALL_ASSOC
) {
3867 ret
= sctp_auth_set_active_key(ep
, asoc
, val
.scact_keynumber
);
3872 if (val
.scact_assoc_id
== SCTP_CURRENT_ASSOC
||
3873 val
.scact_assoc_id
== SCTP_ALL_ASSOC
) {
3874 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
3875 int res
= sctp_auth_set_active_key(ep
, asoc
,
3876 val
.scact_keynumber
);
3887 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
3889 * This set option will delete a shared secret key from use.
3891 static int sctp_setsockopt_del_key(struct sock
*sk
,
3892 char __user
*optval
,
3893 unsigned int optlen
)
3895 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3896 struct sctp_association
*asoc
;
3897 struct sctp_authkeyid val
;
3900 if (!ep
->auth_enable
)
3903 if (optlen
!= sizeof(struct sctp_authkeyid
))
3905 if (copy_from_user(&val
, optval
, optlen
))
3908 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
3909 if (!asoc
&& val
.scact_assoc_id
> SCTP_ALL_ASSOC
&&
3910 sctp_style(sk
, UDP
))
3914 return sctp_auth_del_key_id(ep
, asoc
, val
.scact_keynumber
);
3916 if (sctp_style(sk
, TCP
))
3917 val
.scact_assoc_id
= SCTP_FUTURE_ASSOC
;
3919 if (val
.scact_assoc_id
== SCTP_FUTURE_ASSOC
||
3920 val
.scact_assoc_id
== SCTP_ALL_ASSOC
) {
3921 ret
= sctp_auth_del_key_id(ep
, asoc
, val
.scact_keynumber
);
3926 if (val
.scact_assoc_id
== SCTP_CURRENT_ASSOC
||
3927 val
.scact_assoc_id
== SCTP_ALL_ASSOC
) {
3928 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
3929 int res
= sctp_auth_del_key_id(ep
, asoc
,
3930 val
.scact_keynumber
);
3941 * 8.3.4 Deactivate a Shared Key (SCTP_AUTH_DEACTIVATE_KEY)
3943 * This set option will deactivate a shared secret key.
3945 static int sctp_setsockopt_deactivate_key(struct sock
*sk
, char __user
*optval
,
3946 unsigned int optlen
)
3948 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
3949 struct sctp_association
*asoc
;
3950 struct sctp_authkeyid val
;
3953 if (!ep
->auth_enable
)
3956 if (optlen
!= sizeof(struct sctp_authkeyid
))
3958 if (copy_from_user(&val
, optval
, optlen
))
3961 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
3962 if (!asoc
&& val
.scact_assoc_id
> SCTP_ALL_ASSOC
&&
3963 sctp_style(sk
, UDP
))
3967 return sctp_auth_deact_key_id(ep
, asoc
, val
.scact_keynumber
);
3969 if (sctp_style(sk
, TCP
))
3970 val
.scact_assoc_id
= SCTP_FUTURE_ASSOC
;
3972 if (val
.scact_assoc_id
== SCTP_FUTURE_ASSOC
||
3973 val
.scact_assoc_id
== SCTP_ALL_ASSOC
) {
3974 ret
= sctp_auth_deact_key_id(ep
, asoc
, val
.scact_keynumber
);
3979 if (val
.scact_assoc_id
== SCTP_CURRENT_ASSOC
||
3980 val
.scact_assoc_id
== SCTP_ALL_ASSOC
) {
3981 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
3982 int res
= sctp_auth_deact_key_id(ep
, asoc
,
3983 val
.scact_keynumber
);
3994 * 8.1.23 SCTP_AUTO_ASCONF
3996 * This option will enable or disable the use of the automatic generation of
3997 * ASCONF chunks to add and delete addresses to an existing association. Note
3998 * that this option has two caveats namely: a) it only affects sockets that
3999 * are bound to all addresses available to the SCTP stack, and b) the system
4000 * administrator may have an overriding control that turns the ASCONF feature
4001 * off no matter what setting the socket option may have.
4002 * This option expects an integer boolean flag, where a non-zero value turns on
4003 * the option, and a zero value turns off the option.
4004 * Note. In this implementation, socket operation overrides default parameter
4005 * being set by sysctl as well as FreeBSD implementation
4007 static int sctp_setsockopt_auto_asconf(struct sock
*sk
, char __user
*optval
,
4008 unsigned int optlen
)
4011 struct sctp_sock
*sp
= sctp_sk(sk
);
4013 if (optlen
< sizeof(int))
4015 if (get_user(val
, (int __user
*)optval
))
4017 if (!sctp_is_ep_boundall(sk
) && val
)
4019 if ((val
&& sp
->do_auto_asconf
) || (!val
&& !sp
->do_auto_asconf
))
4022 spin_lock_bh(&sock_net(sk
)->sctp
.addr_wq_lock
);
4023 if (val
== 0 && sp
->do_auto_asconf
) {
4024 list_del(&sp
->auto_asconf_list
);
4025 sp
->do_auto_asconf
= 0;
4026 } else if (val
&& !sp
->do_auto_asconf
) {
4027 list_add_tail(&sp
->auto_asconf_list
,
4028 &sock_net(sk
)->sctp
.auto_asconf_splist
);
4029 sp
->do_auto_asconf
= 1;
4031 spin_unlock_bh(&sock_net(sk
)->sctp
.addr_wq_lock
);
4036 * SCTP_PEER_ADDR_THLDS
4038 * This option allows us to alter the partially failed threshold for one or all
4039 * transports in an association. See Section 6.1 of:
4040 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
4042 static int sctp_setsockopt_paddr_thresholds(struct sock
*sk
,
4043 char __user
*optval
,
4044 unsigned int optlen
)
4046 struct sctp_paddrthlds val
;
4047 struct sctp_transport
*trans
;
4048 struct sctp_association
*asoc
;
4050 if (optlen
< sizeof(struct sctp_paddrthlds
))
4052 if (copy_from_user(&val
, (struct sctp_paddrthlds __user
*)optval
,
4053 sizeof(struct sctp_paddrthlds
)))
4056 if (!sctp_is_any(sk
, (const union sctp_addr
*)&val
.spt_address
)) {
4057 trans
= sctp_addr_id2transport(sk
, &val
.spt_address
,
4062 if (val
.spt_pathmaxrxt
)
4063 trans
->pathmaxrxt
= val
.spt_pathmaxrxt
;
4064 trans
->pf_retrans
= val
.spt_pathpfthld
;
4069 asoc
= sctp_id2assoc(sk
, val
.spt_assoc_id
);
4070 if (!asoc
&& val
.spt_assoc_id
!= SCTP_FUTURE_ASSOC
&&
4071 sctp_style(sk
, UDP
))
4075 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
4077 if (val
.spt_pathmaxrxt
)
4078 trans
->pathmaxrxt
= val
.spt_pathmaxrxt
;
4079 trans
->pf_retrans
= val
.spt_pathpfthld
;
4082 if (val
.spt_pathmaxrxt
)
4083 asoc
->pathmaxrxt
= val
.spt_pathmaxrxt
;
4084 asoc
->pf_retrans
= val
.spt_pathpfthld
;
4086 struct sctp_sock
*sp
= sctp_sk(sk
);
4088 if (val
.spt_pathmaxrxt
)
4089 sp
->pathmaxrxt
= val
.spt_pathmaxrxt
;
4090 sp
->pf_retrans
= val
.spt_pathpfthld
;
4096 static int sctp_setsockopt_recvrcvinfo(struct sock
*sk
,
4097 char __user
*optval
,
4098 unsigned int optlen
)
4102 if (optlen
< sizeof(int))
4104 if (get_user(val
, (int __user
*) optval
))
4107 sctp_sk(sk
)->recvrcvinfo
= (val
== 0) ? 0 : 1;
4112 static int sctp_setsockopt_recvnxtinfo(struct sock
*sk
,
4113 char __user
*optval
,
4114 unsigned int optlen
)
4118 if (optlen
< sizeof(int))
4120 if (get_user(val
, (int __user
*) optval
))
4123 sctp_sk(sk
)->recvnxtinfo
= (val
== 0) ? 0 : 1;
4128 static int sctp_setsockopt_pr_supported(struct sock
*sk
,
4129 char __user
*optval
,
4130 unsigned int optlen
)
4132 struct sctp_assoc_value params
;
4133 struct sctp_association
*asoc
;
4135 if (optlen
!= sizeof(params
))
4138 if (copy_from_user(¶ms
, optval
, optlen
))
4141 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4142 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
4143 sctp_style(sk
, UDP
))
4146 sctp_sk(sk
)->ep
->prsctp_enable
= !!params
.assoc_value
;
4151 static int sctp_setsockopt_default_prinfo(struct sock
*sk
,
4152 char __user
*optval
,
4153 unsigned int optlen
)
4155 struct sctp_sock
*sp
= sctp_sk(sk
);
4156 struct sctp_default_prinfo info
;
4157 struct sctp_association
*asoc
;
4158 int retval
= -EINVAL
;
4160 if (optlen
!= sizeof(info
))
4163 if (copy_from_user(&info
, optval
, sizeof(info
))) {
4168 if (info
.pr_policy
& ~SCTP_PR_SCTP_MASK
)
4171 if (info
.pr_policy
== SCTP_PR_SCTP_NONE
)
4174 asoc
= sctp_id2assoc(sk
, info
.pr_assoc_id
);
4175 if (!asoc
&& info
.pr_assoc_id
> SCTP_ALL_ASSOC
&&
4176 sctp_style(sk
, UDP
))
4182 SCTP_PR_SET_POLICY(asoc
->default_flags
, info
.pr_policy
);
4183 asoc
->default_timetolive
= info
.pr_value
;
4187 if (sctp_style(sk
, TCP
))
4188 info
.pr_assoc_id
= SCTP_FUTURE_ASSOC
;
4190 if (info
.pr_assoc_id
== SCTP_FUTURE_ASSOC
||
4191 info
.pr_assoc_id
== SCTP_ALL_ASSOC
) {
4192 SCTP_PR_SET_POLICY(sp
->default_flags
, info
.pr_policy
);
4193 sp
->default_timetolive
= info
.pr_value
;
4196 if (info
.pr_assoc_id
== SCTP_CURRENT_ASSOC
||
4197 info
.pr_assoc_id
== SCTP_ALL_ASSOC
) {
4198 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
) {
4199 SCTP_PR_SET_POLICY(asoc
->default_flags
, info
.pr_policy
);
4200 asoc
->default_timetolive
= info
.pr_value
;
4208 static int sctp_setsockopt_reconfig_supported(struct sock
*sk
,
4209 char __user
*optval
,
4210 unsigned int optlen
)
4212 struct sctp_assoc_value params
;
4213 struct sctp_association
*asoc
;
4214 int retval
= -EINVAL
;
4216 if (optlen
!= sizeof(params
))
4219 if (copy_from_user(¶ms
, optval
, optlen
)) {
4224 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4225 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
4226 sctp_style(sk
, UDP
))
4229 sctp_sk(sk
)->ep
->reconf_enable
= !!params
.assoc_value
;
4237 static int sctp_setsockopt_enable_strreset(struct sock
*sk
,
4238 char __user
*optval
,
4239 unsigned int optlen
)
4241 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
4242 struct sctp_assoc_value params
;
4243 struct sctp_association
*asoc
;
4244 int retval
= -EINVAL
;
4246 if (optlen
!= sizeof(params
))
4249 if (copy_from_user(¶ms
, optval
, optlen
)) {
4254 if (params
.assoc_value
& (~SCTP_ENABLE_STRRESET_MASK
))
4257 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4258 if (!asoc
&& params
.assoc_id
> SCTP_ALL_ASSOC
&&
4259 sctp_style(sk
, UDP
))
4265 asoc
->strreset_enable
= params
.assoc_value
;
4269 if (sctp_style(sk
, TCP
))
4270 params
.assoc_id
= SCTP_FUTURE_ASSOC
;
4272 if (params
.assoc_id
== SCTP_FUTURE_ASSOC
||
4273 params
.assoc_id
== SCTP_ALL_ASSOC
)
4274 ep
->strreset_enable
= params
.assoc_value
;
4276 if (params
.assoc_id
== SCTP_CURRENT_ASSOC
||
4277 params
.assoc_id
== SCTP_ALL_ASSOC
)
4278 list_for_each_entry(asoc
, &ep
->asocs
, asocs
)
4279 asoc
->strreset_enable
= params
.assoc_value
;
4285 static int sctp_setsockopt_reset_streams(struct sock
*sk
,
4286 char __user
*optval
,
4287 unsigned int optlen
)
4289 struct sctp_reset_streams
*params
;
4290 struct sctp_association
*asoc
;
4291 int retval
= -EINVAL
;
4293 if (optlen
< sizeof(*params
))
4295 /* srs_number_streams is u16, so optlen can't be bigger than this. */
4296 optlen
= min_t(unsigned int, optlen
, USHRT_MAX
+
4297 sizeof(__u16
) * sizeof(*params
));
4299 params
= memdup_user(optval
, optlen
);
4301 return PTR_ERR(params
);
4303 if (params
->srs_number_streams
* sizeof(__u16
) >
4304 optlen
- sizeof(*params
))
4307 asoc
= sctp_id2assoc(sk
, params
->srs_assoc_id
);
4311 retval
= sctp_send_reset_streams(asoc
, params
);
4318 static int sctp_setsockopt_reset_assoc(struct sock
*sk
,
4319 char __user
*optval
,
4320 unsigned int optlen
)
4322 struct sctp_association
*asoc
;
4323 sctp_assoc_t associd
;
4324 int retval
= -EINVAL
;
4326 if (optlen
!= sizeof(associd
))
4329 if (copy_from_user(&associd
, optval
, optlen
)) {
4334 asoc
= sctp_id2assoc(sk
, associd
);
4338 retval
= sctp_send_reset_assoc(asoc
);
4344 static int sctp_setsockopt_add_streams(struct sock
*sk
,
4345 char __user
*optval
,
4346 unsigned int optlen
)
4348 struct sctp_association
*asoc
;
4349 struct sctp_add_streams params
;
4350 int retval
= -EINVAL
;
4352 if (optlen
!= sizeof(params
))
4355 if (copy_from_user(¶ms
, optval
, optlen
)) {
4360 asoc
= sctp_id2assoc(sk
, params
.sas_assoc_id
);
4364 retval
= sctp_send_add_streams(asoc
, ¶ms
);
4370 static int sctp_setsockopt_scheduler(struct sock
*sk
,
4371 char __user
*optval
,
4372 unsigned int optlen
)
4374 struct sctp_sock
*sp
= sctp_sk(sk
);
4375 struct sctp_association
*asoc
;
4376 struct sctp_assoc_value params
;
4379 if (optlen
< sizeof(params
))
4382 optlen
= sizeof(params
);
4383 if (copy_from_user(¶ms
, optval
, optlen
))
4386 if (params
.assoc_value
> SCTP_SS_MAX
)
4389 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4390 if (!asoc
&& params
.assoc_id
> SCTP_ALL_ASSOC
&&
4391 sctp_style(sk
, UDP
))
4395 return sctp_sched_set_sched(asoc
, params
.assoc_value
);
4397 if (sctp_style(sk
, TCP
))
4398 params
.assoc_id
= SCTP_FUTURE_ASSOC
;
4400 if (params
.assoc_id
== SCTP_FUTURE_ASSOC
||
4401 params
.assoc_id
== SCTP_ALL_ASSOC
)
4402 sp
->default_ss
= params
.assoc_value
;
4404 if (params
.assoc_id
== SCTP_CURRENT_ASSOC
||
4405 params
.assoc_id
== SCTP_ALL_ASSOC
) {
4406 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
) {
4407 int ret
= sctp_sched_set_sched(asoc
,
4408 params
.assoc_value
);
4418 static int sctp_setsockopt_scheduler_value(struct sock
*sk
,
4419 char __user
*optval
,
4420 unsigned int optlen
)
4422 struct sctp_stream_value params
;
4423 struct sctp_association
*asoc
;
4424 int retval
= -EINVAL
;
4426 if (optlen
< sizeof(params
))
4429 optlen
= sizeof(params
);
4430 if (copy_from_user(¶ms
, optval
, optlen
)) {
4435 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4436 if (!asoc
&& params
.assoc_id
!= SCTP_CURRENT_ASSOC
&&
4437 sctp_style(sk
, UDP
))
4441 retval
= sctp_sched_set_value(asoc
, params
.stream_id
,
4442 params
.stream_value
, GFP_KERNEL
);
4448 list_for_each_entry(asoc
, &sctp_sk(sk
)->ep
->asocs
, asocs
) {
4449 int ret
= sctp_sched_set_value(asoc
, params
.stream_id
,
4450 params
.stream_value
, GFP_KERNEL
);
4451 if (ret
&& !retval
) /* try to return the 1st error. */
4459 static int sctp_setsockopt_interleaving_supported(struct sock
*sk
,
4460 char __user
*optval
,
4461 unsigned int optlen
)
4463 struct sctp_sock
*sp
= sctp_sk(sk
);
4464 struct sctp_assoc_value params
;
4465 struct sctp_association
*asoc
;
4466 int retval
= -EINVAL
;
4468 if (optlen
< sizeof(params
))
4471 optlen
= sizeof(params
);
4472 if (copy_from_user(¶ms
, optval
, optlen
)) {
4477 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4478 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
4479 sctp_style(sk
, UDP
))
4482 if (!sock_net(sk
)->sctp
.intl_enable
|| !sp
->frag_interleave
) {
4487 sp
->ep
->intl_enable
= !!params
.assoc_value
;
4495 static int sctp_setsockopt_reuse_port(struct sock
*sk
, char __user
*optval
,
4496 unsigned int optlen
)
4500 if (!sctp_style(sk
, TCP
))
4503 if (sctp_sk(sk
)->ep
->base
.bind_addr
.port
)
4506 if (optlen
< sizeof(int))
4509 if (get_user(val
, (int __user
*)optval
))
4512 sctp_sk(sk
)->reuse
= !!val
;
4517 static int sctp_assoc_ulpevent_type_set(struct sctp_event
*param
,
4518 struct sctp_association
*asoc
)
4520 struct sctp_ulpevent
*event
;
4522 sctp_ulpevent_type_set(&asoc
->subscribe
, param
->se_type
, param
->se_on
);
4524 if (param
->se_type
== SCTP_SENDER_DRY_EVENT
&& param
->se_on
) {
4525 if (sctp_outq_is_empty(&asoc
->outqueue
)) {
4526 event
= sctp_ulpevent_make_sender_dry_event(asoc
,
4527 GFP_USER
| __GFP_NOWARN
);
4531 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
, event
);
4538 static int sctp_setsockopt_event(struct sock
*sk
, char __user
*optval
,
4539 unsigned int optlen
)
4541 struct sctp_sock
*sp
= sctp_sk(sk
);
4542 struct sctp_association
*asoc
;
4543 struct sctp_event param
;
4546 if (optlen
< sizeof(param
))
4549 optlen
= sizeof(param
);
4550 if (copy_from_user(¶m
, optval
, optlen
))
4553 if (param
.se_type
< SCTP_SN_TYPE_BASE
||
4554 param
.se_type
> SCTP_SN_TYPE_MAX
)
4557 asoc
= sctp_id2assoc(sk
, param
.se_assoc_id
);
4558 if (!asoc
&& param
.se_assoc_id
> SCTP_ALL_ASSOC
&&
4559 sctp_style(sk
, UDP
))
4563 return sctp_assoc_ulpevent_type_set(¶m
, asoc
);
4565 if (sctp_style(sk
, TCP
))
4566 param
.se_assoc_id
= SCTP_FUTURE_ASSOC
;
4568 if (param
.se_assoc_id
== SCTP_FUTURE_ASSOC
||
4569 param
.se_assoc_id
== SCTP_ALL_ASSOC
)
4570 sctp_ulpevent_type_set(&sp
->subscribe
,
4571 param
.se_type
, param
.se_on
);
4573 if (param
.se_assoc_id
== SCTP_CURRENT_ASSOC
||
4574 param
.se_assoc_id
== SCTP_ALL_ASSOC
) {
4575 list_for_each_entry(asoc
, &sp
->ep
->asocs
, asocs
) {
4576 int ret
= sctp_assoc_ulpevent_type_set(¶m
, asoc
);
4586 /* API 6.2 setsockopt(), getsockopt()
4588 * Applications use setsockopt() and getsockopt() to set or retrieve
4589 * socket options. Socket options are used to change the default
4590 * behavior of sockets calls. They are described in Section 7.
4594 * ret = getsockopt(int sd, int level, int optname, void __user *optval,
4595 * int __user *optlen);
4596 * ret = setsockopt(int sd, int level, int optname, const void __user *optval,
4599 * sd - the socket descript.
4600 * level - set to IPPROTO_SCTP for all SCTP options.
4601 * optname - the option name.
4602 * optval - the buffer to store the value of the option.
4603 * optlen - the size of the buffer.
4605 static int sctp_setsockopt(struct sock
*sk
, int level
, int optname
,
4606 char __user
*optval
, unsigned int optlen
)
4610 pr_debug("%s: sk:%p, optname:%d\n", __func__
, sk
, optname
);
4612 /* I can hardly begin to describe how wrong this is. This is
4613 * so broken as to be worse than useless. The API draft
4614 * REALLY is NOT helpful here... I am not convinced that the
4615 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP
4616 * are at all well-founded.
4618 if (level
!= SOL_SCTP
) {
4619 struct sctp_af
*af
= sctp_sk(sk
)->pf
->af
;
4620 retval
= af
->setsockopt(sk
, level
, optname
, optval
, optlen
);
4627 case SCTP_SOCKOPT_BINDX_ADD
:
4628 /* 'optlen' is the size of the addresses buffer. */
4629 retval
= sctp_setsockopt_bindx(sk
, (struct sockaddr __user
*)optval
,
4630 optlen
, SCTP_BINDX_ADD_ADDR
);
4633 case SCTP_SOCKOPT_BINDX_REM
:
4634 /* 'optlen' is the size of the addresses buffer. */
4635 retval
= sctp_setsockopt_bindx(sk
, (struct sockaddr __user
*)optval
,
4636 optlen
, SCTP_BINDX_REM_ADDR
);
4639 case SCTP_SOCKOPT_CONNECTX_OLD
:
4640 /* 'optlen' is the size of the addresses buffer. */
4641 retval
= sctp_setsockopt_connectx_old(sk
,
4642 (struct sockaddr __user
*)optval
,
4646 case SCTP_SOCKOPT_CONNECTX
:
4647 /* 'optlen' is the size of the addresses buffer. */
4648 retval
= sctp_setsockopt_connectx(sk
,
4649 (struct sockaddr __user
*)optval
,
4653 case SCTP_DISABLE_FRAGMENTS
:
4654 retval
= sctp_setsockopt_disable_fragments(sk
, optval
, optlen
);
4658 retval
= sctp_setsockopt_events(sk
, optval
, optlen
);
4661 case SCTP_AUTOCLOSE
:
4662 retval
= sctp_setsockopt_autoclose(sk
, optval
, optlen
);
4665 case SCTP_PEER_ADDR_PARAMS
:
4666 retval
= sctp_setsockopt_peer_addr_params(sk
, optval
, optlen
);
4669 case SCTP_DELAYED_SACK
:
4670 retval
= sctp_setsockopt_delayed_ack(sk
, optval
, optlen
);
4672 case SCTP_PARTIAL_DELIVERY_POINT
:
4673 retval
= sctp_setsockopt_partial_delivery_point(sk
, optval
, optlen
);
4677 retval
= sctp_setsockopt_initmsg(sk
, optval
, optlen
);
4679 case SCTP_DEFAULT_SEND_PARAM
:
4680 retval
= sctp_setsockopt_default_send_param(sk
, optval
,
4683 case SCTP_DEFAULT_SNDINFO
:
4684 retval
= sctp_setsockopt_default_sndinfo(sk
, optval
, optlen
);
4686 case SCTP_PRIMARY_ADDR
:
4687 retval
= sctp_setsockopt_primary_addr(sk
, optval
, optlen
);
4689 case SCTP_SET_PEER_PRIMARY_ADDR
:
4690 retval
= sctp_setsockopt_peer_primary_addr(sk
, optval
, optlen
);
4693 retval
= sctp_setsockopt_nodelay(sk
, optval
, optlen
);
4696 retval
= sctp_setsockopt_rtoinfo(sk
, optval
, optlen
);
4698 case SCTP_ASSOCINFO
:
4699 retval
= sctp_setsockopt_associnfo(sk
, optval
, optlen
);
4701 case SCTP_I_WANT_MAPPED_V4_ADDR
:
4702 retval
= sctp_setsockopt_mappedv4(sk
, optval
, optlen
);
4705 retval
= sctp_setsockopt_maxseg(sk
, optval
, optlen
);
4707 case SCTP_ADAPTATION_LAYER
:
4708 retval
= sctp_setsockopt_adaptation_layer(sk
, optval
, optlen
);
4711 retval
= sctp_setsockopt_context(sk
, optval
, optlen
);
4713 case SCTP_FRAGMENT_INTERLEAVE
:
4714 retval
= sctp_setsockopt_fragment_interleave(sk
, optval
, optlen
);
4716 case SCTP_MAX_BURST
:
4717 retval
= sctp_setsockopt_maxburst(sk
, optval
, optlen
);
4719 case SCTP_AUTH_CHUNK
:
4720 retval
= sctp_setsockopt_auth_chunk(sk
, optval
, optlen
);
4722 case SCTP_HMAC_IDENT
:
4723 retval
= sctp_setsockopt_hmac_ident(sk
, optval
, optlen
);
4726 retval
= sctp_setsockopt_auth_key(sk
, optval
, optlen
);
4728 case SCTP_AUTH_ACTIVE_KEY
:
4729 retval
= sctp_setsockopt_active_key(sk
, optval
, optlen
);
4731 case SCTP_AUTH_DELETE_KEY
:
4732 retval
= sctp_setsockopt_del_key(sk
, optval
, optlen
);
4734 case SCTP_AUTH_DEACTIVATE_KEY
:
4735 retval
= sctp_setsockopt_deactivate_key(sk
, optval
, optlen
);
4737 case SCTP_AUTO_ASCONF
:
4738 retval
= sctp_setsockopt_auto_asconf(sk
, optval
, optlen
);
4740 case SCTP_PEER_ADDR_THLDS
:
4741 retval
= sctp_setsockopt_paddr_thresholds(sk
, optval
, optlen
);
4743 case SCTP_RECVRCVINFO
:
4744 retval
= sctp_setsockopt_recvrcvinfo(sk
, optval
, optlen
);
4746 case SCTP_RECVNXTINFO
:
4747 retval
= sctp_setsockopt_recvnxtinfo(sk
, optval
, optlen
);
4749 case SCTP_PR_SUPPORTED
:
4750 retval
= sctp_setsockopt_pr_supported(sk
, optval
, optlen
);
4752 case SCTP_DEFAULT_PRINFO
:
4753 retval
= sctp_setsockopt_default_prinfo(sk
, optval
, optlen
);
4755 case SCTP_RECONFIG_SUPPORTED
:
4756 retval
= sctp_setsockopt_reconfig_supported(sk
, optval
, optlen
);
4758 case SCTP_ENABLE_STREAM_RESET
:
4759 retval
= sctp_setsockopt_enable_strreset(sk
, optval
, optlen
);
4761 case SCTP_RESET_STREAMS
:
4762 retval
= sctp_setsockopt_reset_streams(sk
, optval
, optlen
);
4764 case SCTP_RESET_ASSOC
:
4765 retval
= sctp_setsockopt_reset_assoc(sk
, optval
, optlen
);
4767 case SCTP_ADD_STREAMS
:
4768 retval
= sctp_setsockopt_add_streams(sk
, optval
, optlen
);
4770 case SCTP_STREAM_SCHEDULER
:
4771 retval
= sctp_setsockopt_scheduler(sk
, optval
, optlen
);
4773 case SCTP_STREAM_SCHEDULER_VALUE
:
4774 retval
= sctp_setsockopt_scheduler_value(sk
, optval
, optlen
);
4776 case SCTP_INTERLEAVING_SUPPORTED
:
4777 retval
= sctp_setsockopt_interleaving_supported(sk
, optval
,
4780 case SCTP_REUSE_PORT
:
4781 retval
= sctp_setsockopt_reuse_port(sk
, optval
, optlen
);
4784 retval
= sctp_setsockopt_event(sk
, optval
, optlen
);
4787 retval
= -ENOPROTOOPT
;
4797 /* API 3.1.6 connect() - UDP Style Syntax
4799 * An application may use the connect() call in the UDP model to initiate an
4800 * association without sending data.
4804 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
4806 * sd: the socket descriptor to have a new association added to.
4808 * nam: the address structure (either struct sockaddr_in or struct
4809 * sockaddr_in6 defined in RFC2553 [7]).
4811 * len: the size of the address.
4813 static int sctp_connect(struct sock
*sk
, struct sockaddr
*addr
,
4814 int addr_len
, int flags
)
4820 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__
, sk
,
4823 /* Validate addr_len before calling common connect/connectx routine. */
4824 af
= sctp_get_af_specific(addr
->sa_family
);
4825 if (af
&& addr_len
>= af
->sockaddr_len
)
4826 err
= __sctp_connect(sk
, addr
, af
->sockaddr_len
, flags
, NULL
);
4832 int sctp_inet_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
4833 int addr_len
, int flags
)
4835 if (addr_len
< sizeof(uaddr
->sa_family
))
4838 if (uaddr
->sa_family
== AF_UNSPEC
)
4841 return sctp_connect(sock
->sk
, uaddr
, addr_len
, flags
);
4844 /* FIXME: Write comments. */
4845 static int sctp_disconnect(struct sock
*sk
, int flags
)
4847 return -EOPNOTSUPP
; /* STUB */
4850 /* 4.1.4 accept() - TCP Style Syntax
4852 * Applications use accept() call to remove an established SCTP
4853 * association from the accept queue of the endpoint. A new socket
4854 * descriptor will be returned from accept() to represent the newly
4855 * formed association.
4857 static struct sock
*sctp_accept(struct sock
*sk
, int flags
, int *err
, bool kern
)
4859 struct sctp_sock
*sp
;
4860 struct sctp_endpoint
*ep
;
4861 struct sock
*newsk
= NULL
;
4862 struct sctp_association
*asoc
;
4871 if (!sctp_style(sk
, TCP
)) {
4872 error
= -EOPNOTSUPP
;
4876 if (!sctp_sstate(sk
, LISTENING
)) {
4881 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
4883 error
= sctp_wait_for_accept(sk
, timeo
);
4887 /* We treat the list of associations on the endpoint as the accept
4888 * queue and pick the first association on the list.
4890 asoc
= list_entry(ep
->asocs
.next
, struct sctp_association
, asocs
);
4892 newsk
= sp
->pf
->create_accept_sk(sk
, asoc
, kern
);
4898 /* Populate the fields of the newsk from the oldsk and migrate the
4899 * asoc to the newsk.
4901 error
= sctp_sock_migrate(sk
, newsk
, asoc
, SCTP_SOCKET_TCP
);
4903 sk_common_release(newsk
);
4913 /* The SCTP ioctl handler. */
4914 static int sctp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
4921 * SEQPACKET-style sockets in LISTENING state are valid, for
4922 * SCTP, so only discard TCP-style sockets in LISTENING state.
4924 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))
4929 struct sk_buff
*skb
;
4930 unsigned int amount
= 0;
4932 skb
= skb_peek(&sk
->sk_receive_queue
);
4935 * We will only return the amount of this packet since
4936 * that is all that will be read.
4940 rc
= put_user(amount
, (int __user
*)arg
);
4952 /* This is the function which gets called during socket creation to
4953 * initialized the SCTP-specific portion of the sock.
4954 * The sock structure should already be zero-filled memory.
4956 static int sctp_init_sock(struct sock
*sk
)
4958 struct net
*net
= sock_net(sk
);
4959 struct sctp_sock
*sp
;
4961 pr_debug("%s: sk:%p\n", __func__
, sk
);
4965 /* Initialize the SCTP per socket area. */
4966 switch (sk
->sk_type
) {
4967 case SOCK_SEQPACKET
:
4968 sp
->type
= SCTP_SOCKET_UDP
;
4971 sp
->type
= SCTP_SOCKET_TCP
;
4974 return -ESOCKTNOSUPPORT
;
4977 sk
->sk_gso_type
= SKB_GSO_SCTP
;
4979 /* Initialize default send parameters. These parameters can be
4980 * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
4982 sp
->default_stream
= 0;
4983 sp
->default_ppid
= 0;
4984 sp
->default_flags
= 0;
4985 sp
->default_context
= 0;
4986 sp
->default_timetolive
= 0;
4988 sp
->default_rcv_context
= 0;
4989 sp
->max_burst
= net
->sctp
.max_burst
;
4991 sp
->sctp_hmac_alg
= net
->sctp
.sctp_hmac_alg
;
4993 /* Initialize default setup parameters. These parameters
4994 * can be modified with the SCTP_INITMSG socket option or
4995 * overridden by the SCTP_INIT CMSG.
4997 sp
->initmsg
.sinit_num_ostreams
= sctp_max_outstreams
;
4998 sp
->initmsg
.sinit_max_instreams
= sctp_max_instreams
;
4999 sp
->initmsg
.sinit_max_attempts
= net
->sctp
.max_retrans_init
;
5000 sp
->initmsg
.sinit_max_init_timeo
= net
->sctp
.rto_max
;
5002 /* Initialize default RTO related parameters. These parameters can
5003 * be modified for with the SCTP_RTOINFO socket option.
5005 sp
->rtoinfo
.srto_initial
= net
->sctp
.rto_initial
;
5006 sp
->rtoinfo
.srto_max
= net
->sctp
.rto_max
;
5007 sp
->rtoinfo
.srto_min
= net
->sctp
.rto_min
;
5009 /* Initialize default association related parameters. These parameters
5010 * can be modified with the SCTP_ASSOCINFO socket option.
5012 sp
->assocparams
.sasoc_asocmaxrxt
= net
->sctp
.max_retrans_association
;
5013 sp
->assocparams
.sasoc_number_peer_destinations
= 0;
5014 sp
->assocparams
.sasoc_peer_rwnd
= 0;
5015 sp
->assocparams
.sasoc_local_rwnd
= 0;
5016 sp
->assocparams
.sasoc_cookie_life
= net
->sctp
.valid_cookie_life
;
5018 /* Initialize default event subscriptions. By default, all the
5023 /* Default Peer Address Parameters. These defaults can
5024 * be modified via SCTP_PEER_ADDR_PARAMS
5026 sp
->hbinterval
= net
->sctp
.hb_interval
;
5027 sp
->pathmaxrxt
= net
->sctp
.max_retrans_path
;
5028 sp
->pf_retrans
= net
->sctp
.pf_retrans
;
5029 sp
->pathmtu
= 0; /* allow default discovery */
5030 sp
->sackdelay
= net
->sctp
.sack_timeout
;
5032 sp
->param_flags
= SPP_HB_ENABLE
|
5034 SPP_SACKDELAY_ENABLE
;
5035 sp
->default_ss
= SCTP_SS_DEFAULT
;
5037 /* If enabled no SCTP message fragmentation will be performed.
5038 * Configure through SCTP_DISABLE_FRAGMENTS socket option.
5040 sp
->disable_fragments
= 0;
5042 /* Enable Nagle algorithm by default. */
5045 sp
->recvrcvinfo
= 0;
5046 sp
->recvnxtinfo
= 0;
5048 /* Enable by default. */
5051 /* Auto-close idle associations after the configured
5052 * number of seconds. A value of 0 disables this
5053 * feature. Configure through the SCTP_AUTOCLOSE socket option,
5054 * for UDP-style sockets only.
5058 /* User specified fragmentation limit. */
5061 sp
->adaptation_ind
= 0;
5063 sp
->pf
= sctp_get_pf_specific(sk
->sk_family
);
5065 /* Control variables for partial data delivery. */
5066 atomic_set(&sp
->pd_mode
, 0);
5067 skb_queue_head_init(&sp
->pd_lobby
);
5068 sp
->frag_interleave
= 0;
5070 /* Create a per socket endpoint structure. Even if we
5071 * change the data structure relationships, this may still
5072 * be useful for storing pre-connect address information.
5074 sp
->ep
= sctp_endpoint_new(sk
, GFP_KERNEL
);
5080 sk
->sk_destruct
= sctp_destruct_sock
;
5082 SCTP_DBG_OBJCNT_INC(sock
);
5085 sk_sockets_allocated_inc(sk
);
5086 sock_prot_inuse_add(net
, sk
->sk_prot
, 1);
5088 /* Nothing can fail after this block, otherwise
5089 * sctp_destroy_sock() will be called without addr_wq_lock held
5091 if (net
->sctp
.default_auto_asconf
) {
5092 spin_lock(&sock_net(sk
)->sctp
.addr_wq_lock
);
5093 list_add_tail(&sp
->auto_asconf_list
,
5094 &net
->sctp
.auto_asconf_splist
);
5095 sp
->do_auto_asconf
= 1;
5096 spin_unlock(&sock_net(sk
)->sctp
.addr_wq_lock
);
5098 sp
->do_auto_asconf
= 0;
5106 /* Cleanup any SCTP per socket resources. Must be called with
5107 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
5109 static void sctp_destroy_sock(struct sock
*sk
)
5111 struct sctp_sock
*sp
;
5113 pr_debug("%s: sk:%p\n", __func__
, sk
);
5115 /* Release our hold on the endpoint. */
5117 /* This could happen during socket init, thus we bail out
5118 * early, since the rest of the below is not setup either.
5123 if (sp
->do_auto_asconf
) {
5124 sp
->do_auto_asconf
= 0;
5125 list_del(&sp
->auto_asconf_list
);
5127 sctp_endpoint_free(sp
->ep
);
5129 sk_sockets_allocated_dec(sk
);
5130 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
5134 /* Triggered when there are no references on the socket anymore */
5135 static void sctp_destruct_sock(struct sock
*sk
)
5137 struct sctp_sock
*sp
= sctp_sk(sk
);
5139 /* Free up the HMAC transform. */
5140 crypto_free_shash(sp
->hmac
);
5142 inet_sock_destruct(sk
);
5145 /* API 4.1.7 shutdown() - TCP Style Syntax
5146 * int shutdown(int socket, int how);
5148 * sd - the socket descriptor of the association to be closed.
5149 * how - Specifies the type of shutdown. The values are
5152 * Disables further receive operations. No SCTP
5153 * protocol action is taken.
5155 * Disables further send operations, and initiates
5156 * the SCTP shutdown sequence.
5158 * Disables further send and receive operations
5159 * and initiates the SCTP shutdown sequence.
5161 static void sctp_shutdown(struct sock
*sk
, int how
)
5163 struct net
*net
= sock_net(sk
);
5164 struct sctp_endpoint
*ep
;
5166 if (!sctp_style(sk
, TCP
))
5169 ep
= sctp_sk(sk
)->ep
;
5170 if (how
& SEND_SHUTDOWN
&& !list_empty(&ep
->asocs
)) {
5171 struct sctp_association
*asoc
;
5173 inet_sk_set_state(sk
, SCTP_SS_CLOSING
);
5174 asoc
= list_entry(ep
->asocs
.next
,
5175 struct sctp_association
, asocs
);
5176 sctp_primitive_SHUTDOWN(net
, asoc
, NULL
);
5180 int sctp_get_sctp_info(struct sock
*sk
, struct sctp_association
*asoc
,
5181 struct sctp_info
*info
)
5183 struct sctp_transport
*prim
;
5184 struct list_head
*pos
;
5187 memset(info
, 0, sizeof(*info
));
5189 struct sctp_sock
*sp
= sctp_sk(sk
);
5191 info
->sctpi_s_autoclose
= sp
->autoclose
;
5192 info
->sctpi_s_adaptation_ind
= sp
->adaptation_ind
;
5193 info
->sctpi_s_pd_point
= sp
->pd_point
;
5194 info
->sctpi_s_nodelay
= sp
->nodelay
;
5195 info
->sctpi_s_disable_fragments
= sp
->disable_fragments
;
5196 info
->sctpi_s_v4mapped
= sp
->v4mapped
;
5197 info
->sctpi_s_frag_interleave
= sp
->frag_interleave
;
5198 info
->sctpi_s_type
= sp
->type
;
5203 info
->sctpi_tag
= asoc
->c
.my_vtag
;
5204 info
->sctpi_state
= asoc
->state
;
5205 info
->sctpi_rwnd
= asoc
->a_rwnd
;
5206 info
->sctpi_unackdata
= asoc
->unack_data
;
5207 info
->sctpi_penddata
= sctp_tsnmap_pending(&asoc
->peer
.tsn_map
);
5208 info
->sctpi_instrms
= asoc
->stream
.incnt
;
5209 info
->sctpi_outstrms
= asoc
->stream
.outcnt
;
5210 list_for_each(pos
, &asoc
->base
.inqueue
.in_chunk_list
)
5211 info
->sctpi_inqueue
++;
5212 list_for_each(pos
, &asoc
->outqueue
.out_chunk_list
)
5213 info
->sctpi_outqueue
++;
5214 info
->sctpi_overall_error
= asoc
->overall_error_count
;
5215 info
->sctpi_max_burst
= asoc
->max_burst
;
5216 info
->sctpi_maxseg
= asoc
->frag_point
;
5217 info
->sctpi_peer_rwnd
= asoc
->peer
.rwnd
;
5218 info
->sctpi_peer_tag
= asoc
->c
.peer_vtag
;
5220 mask
= asoc
->peer
.ecn_capable
<< 1;
5221 mask
= (mask
| asoc
->peer
.ipv4_address
) << 1;
5222 mask
= (mask
| asoc
->peer
.ipv6_address
) << 1;
5223 mask
= (mask
| asoc
->peer
.hostname_address
) << 1;
5224 mask
= (mask
| asoc
->peer
.asconf_capable
) << 1;
5225 mask
= (mask
| asoc
->peer
.prsctp_capable
) << 1;
5226 mask
= (mask
| asoc
->peer
.auth_capable
);
5227 info
->sctpi_peer_capable
= mask
;
5228 mask
= asoc
->peer
.sack_needed
<< 1;
5229 mask
= (mask
| asoc
->peer
.sack_generation
) << 1;
5230 mask
= (mask
| asoc
->peer
.zero_window_announced
);
5231 info
->sctpi_peer_sack
= mask
;
5233 info
->sctpi_isacks
= asoc
->stats
.isacks
;
5234 info
->sctpi_osacks
= asoc
->stats
.osacks
;
5235 info
->sctpi_opackets
= asoc
->stats
.opackets
;
5236 info
->sctpi_ipackets
= asoc
->stats
.ipackets
;
5237 info
->sctpi_rtxchunks
= asoc
->stats
.rtxchunks
;
5238 info
->sctpi_outofseqtsns
= asoc
->stats
.outofseqtsns
;
5239 info
->sctpi_idupchunks
= asoc
->stats
.idupchunks
;
5240 info
->sctpi_gapcnt
= asoc
->stats
.gapcnt
;
5241 info
->sctpi_ouodchunks
= asoc
->stats
.ouodchunks
;
5242 info
->sctpi_iuodchunks
= asoc
->stats
.iuodchunks
;
5243 info
->sctpi_oodchunks
= asoc
->stats
.oodchunks
;
5244 info
->sctpi_iodchunks
= asoc
->stats
.iodchunks
;
5245 info
->sctpi_octrlchunks
= asoc
->stats
.octrlchunks
;
5246 info
->sctpi_ictrlchunks
= asoc
->stats
.ictrlchunks
;
5248 prim
= asoc
->peer
.primary_path
;
5249 memcpy(&info
->sctpi_p_address
, &prim
->ipaddr
, sizeof(prim
->ipaddr
));
5250 info
->sctpi_p_state
= prim
->state
;
5251 info
->sctpi_p_cwnd
= prim
->cwnd
;
5252 info
->sctpi_p_srtt
= prim
->srtt
;
5253 info
->sctpi_p_rto
= jiffies_to_msecs(prim
->rto
);
5254 info
->sctpi_p_hbinterval
= prim
->hbinterval
;
5255 info
->sctpi_p_pathmaxrxt
= prim
->pathmaxrxt
;
5256 info
->sctpi_p_sackdelay
= jiffies_to_msecs(prim
->sackdelay
);
5257 info
->sctpi_p_ssthresh
= prim
->ssthresh
;
5258 info
->sctpi_p_partial_bytes_acked
= prim
->partial_bytes_acked
;
5259 info
->sctpi_p_flight_size
= prim
->flight_size
;
5260 info
->sctpi_p_error
= prim
->error_count
;
5264 EXPORT_SYMBOL_GPL(sctp_get_sctp_info
);
5266 /* use callback to avoid exporting the core structure */
5267 void sctp_transport_walk_start(struct rhashtable_iter
*iter
)
5269 rhltable_walk_enter(&sctp_transport_hashtable
, iter
);
5271 rhashtable_walk_start(iter
);
5274 void sctp_transport_walk_stop(struct rhashtable_iter
*iter
)
5276 rhashtable_walk_stop(iter
);
5277 rhashtable_walk_exit(iter
);
5280 struct sctp_transport
*sctp_transport_get_next(struct net
*net
,
5281 struct rhashtable_iter
*iter
)
5283 struct sctp_transport
*t
;
5285 t
= rhashtable_walk_next(iter
);
5286 for (; t
; t
= rhashtable_walk_next(iter
)) {
5288 if (PTR_ERR(t
) == -EAGAIN
)
5293 if (!sctp_transport_hold(t
))
5296 if (net_eq(sock_net(t
->asoc
->base
.sk
), net
) &&
5297 t
->asoc
->peer
.primary_path
== t
)
5300 sctp_transport_put(t
);
5306 struct sctp_transport
*sctp_transport_get_idx(struct net
*net
,
5307 struct rhashtable_iter
*iter
,
5310 struct sctp_transport
*t
;
5313 return SEQ_START_TOKEN
;
5315 while ((t
= sctp_transport_get_next(net
, iter
)) && !IS_ERR(t
)) {
5318 sctp_transport_put(t
);
5324 int sctp_for_each_endpoint(int (*cb
)(struct sctp_endpoint
*, void *),
5328 struct sctp_ep_common
*epb
;
5329 struct sctp_hashbucket
*head
;
5331 for (head
= sctp_ep_hashtable
; hash
< sctp_ep_hashsize
;
5333 read_lock_bh(&head
->lock
);
5334 sctp_for_each_hentry(epb
, &head
->chain
) {
5335 err
= cb(sctp_ep(epb
), p
);
5339 read_unlock_bh(&head
->lock
);
5344 EXPORT_SYMBOL_GPL(sctp_for_each_endpoint
);
5346 int sctp_transport_lookup_process(int (*cb
)(struct sctp_transport
*, void *),
5348 const union sctp_addr
*laddr
,
5349 const union sctp_addr
*paddr
, void *p
)
5351 struct sctp_transport
*transport
;
5355 transport
= sctp_addrs_lookup_transport(net
, laddr
, paddr
);
5360 err
= cb(transport
, p
);
5361 sctp_transport_put(transport
);
5365 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process
);
5367 int sctp_for_each_transport(int (*cb
)(struct sctp_transport
*, void *),
5368 int (*cb_done
)(struct sctp_transport
*, void *),
5369 struct net
*net
, int *pos
, void *p
) {
5370 struct rhashtable_iter hti
;
5371 struct sctp_transport
*tsp
;
5376 sctp_transport_walk_start(&hti
);
5378 tsp
= sctp_transport_get_idx(net
, &hti
, *pos
+ 1);
5379 for (; !IS_ERR_OR_NULL(tsp
); tsp
= sctp_transport_get_next(net
, &hti
)) {
5384 sctp_transport_put(tsp
);
5386 sctp_transport_walk_stop(&hti
);
5389 if (cb_done
&& !cb_done(tsp
, p
)) {
5391 sctp_transport_put(tsp
);
5394 sctp_transport_put(tsp
);
5399 EXPORT_SYMBOL_GPL(sctp_for_each_transport
);
5401 /* 7.2.1 Association Status (SCTP_STATUS)
5403 * Applications can retrieve current status information about an
5404 * association, including association state, peer receiver window size,
5405 * number of unacked data chunks, and number of data chunks pending
5406 * receipt. This information is read-only.
5408 static int sctp_getsockopt_sctp_status(struct sock
*sk
, int len
,
5409 char __user
*optval
,
5412 struct sctp_status status
;
5413 struct sctp_association
*asoc
= NULL
;
5414 struct sctp_transport
*transport
;
5415 sctp_assoc_t associd
;
5418 if (len
< sizeof(status
)) {
5423 len
= sizeof(status
);
5424 if (copy_from_user(&status
, optval
, len
)) {
5429 associd
= status
.sstat_assoc_id
;
5430 asoc
= sctp_id2assoc(sk
, associd
);
5436 transport
= asoc
->peer
.primary_path
;
5438 status
.sstat_assoc_id
= sctp_assoc2id(asoc
);
5439 status
.sstat_state
= sctp_assoc_to_state(asoc
);
5440 status
.sstat_rwnd
= asoc
->peer
.rwnd
;
5441 status
.sstat_unackdata
= asoc
->unack_data
;
5443 status
.sstat_penddata
= sctp_tsnmap_pending(&asoc
->peer
.tsn_map
);
5444 status
.sstat_instrms
= asoc
->stream
.incnt
;
5445 status
.sstat_outstrms
= asoc
->stream
.outcnt
;
5446 status
.sstat_fragmentation_point
= asoc
->frag_point
;
5447 status
.sstat_primary
.spinfo_assoc_id
= sctp_assoc2id(transport
->asoc
);
5448 memcpy(&status
.sstat_primary
.spinfo_address
, &transport
->ipaddr
,
5449 transport
->af_specific
->sockaddr_len
);
5450 /* Map ipv4 address into v4-mapped-on-v6 address. */
5451 sctp_get_pf_specific(sk
->sk_family
)->addr_to_user(sctp_sk(sk
),
5452 (union sctp_addr
*)&status
.sstat_primary
.spinfo_address
);
5453 status
.sstat_primary
.spinfo_state
= transport
->state
;
5454 status
.sstat_primary
.spinfo_cwnd
= transport
->cwnd
;
5455 status
.sstat_primary
.spinfo_srtt
= transport
->srtt
;
5456 status
.sstat_primary
.spinfo_rto
= jiffies_to_msecs(transport
->rto
);
5457 status
.sstat_primary
.spinfo_mtu
= transport
->pathmtu
;
5459 if (status
.sstat_primary
.spinfo_state
== SCTP_UNKNOWN
)
5460 status
.sstat_primary
.spinfo_state
= SCTP_ACTIVE
;
5462 if (put_user(len
, optlen
)) {
5467 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
5468 __func__
, len
, status
.sstat_state
, status
.sstat_rwnd
,
5469 status
.sstat_assoc_id
);
5471 if (copy_to_user(optval
, &status
, len
)) {
5481 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
5483 * Applications can retrieve information about a specific peer address
5484 * of an association, including its reachability state, congestion
5485 * window, and retransmission timer values. This information is
5488 static int sctp_getsockopt_peer_addr_info(struct sock
*sk
, int len
,
5489 char __user
*optval
,
5492 struct sctp_paddrinfo pinfo
;
5493 struct sctp_transport
*transport
;
5496 if (len
< sizeof(pinfo
)) {
5501 len
= sizeof(pinfo
);
5502 if (copy_from_user(&pinfo
, optval
, len
)) {
5507 transport
= sctp_addr_id2transport(sk
, &pinfo
.spinfo_address
,
5508 pinfo
.spinfo_assoc_id
);
5512 pinfo
.spinfo_assoc_id
= sctp_assoc2id(transport
->asoc
);
5513 pinfo
.spinfo_state
= transport
->state
;
5514 pinfo
.spinfo_cwnd
= transport
->cwnd
;
5515 pinfo
.spinfo_srtt
= transport
->srtt
;
5516 pinfo
.spinfo_rto
= jiffies_to_msecs(transport
->rto
);
5517 pinfo
.spinfo_mtu
= transport
->pathmtu
;
5519 if (pinfo
.spinfo_state
== SCTP_UNKNOWN
)
5520 pinfo
.spinfo_state
= SCTP_ACTIVE
;
5522 if (put_user(len
, optlen
)) {
5527 if (copy_to_user(optval
, &pinfo
, len
)) {
5536 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
5538 * This option is a on/off flag. If enabled no SCTP message
5539 * fragmentation will be performed. Instead if a message being sent
5540 * exceeds the current PMTU size, the message will NOT be sent and
5541 * instead a error will be indicated to the user.
5543 static int sctp_getsockopt_disable_fragments(struct sock
*sk
, int len
,
5544 char __user
*optval
, int __user
*optlen
)
5548 if (len
< sizeof(int))
5552 val
= (sctp_sk(sk
)->disable_fragments
== 1);
5553 if (put_user(len
, optlen
))
5555 if (copy_to_user(optval
, &val
, len
))
5560 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
5562 * This socket option is used to specify various notifications and
5563 * ancillary data the user wishes to receive.
5565 static int sctp_getsockopt_events(struct sock
*sk
, int len
, char __user
*optval
,
5568 struct sctp_event_subscribe subscribe
;
5569 __u8
*sn_type
= (__u8
*)&subscribe
;
5574 if (len
> sizeof(struct sctp_event_subscribe
))
5575 len
= sizeof(struct sctp_event_subscribe
);
5576 if (put_user(len
, optlen
))
5579 for (i
= 0; i
< len
; i
++)
5580 sn_type
[i
] = sctp_ulpevent_type_enabled(sctp_sk(sk
)->subscribe
,
5581 SCTP_SN_TYPE_BASE
+ i
);
5583 if (copy_to_user(optval
, &subscribe
, len
))
5589 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
5591 * This socket option is applicable to the UDP-style socket only. When
5592 * set it will cause associations that are idle for more than the
5593 * specified number of seconds to automatically close. An association
5594 * being idle is defined an association that has NOT sent or received
5595 * user data. The special value of '0' indicates that no automatic
5596 * close of any associations should be performed. The option expects an
5597 * integer defining the number of seconds of idle time before an
5598 * association is closed.
5600 static int sctp_getsockopt_autoclose(struct sock
*sk
, int len
, char __user
*optval
, int __user
*optlen
)
5602 /* Applicable to UDP-style socket only */
5603 if (sctp_style(sk
, TCP
))
5605 if (len
< sizeof(int))
5608 if (put_user(len
, optlen
))
5610 if (put_user(sctp_sk(sk
)->autoclose
, (int __user
*)optval
))
5615 /* Helper routine to branch off an association to a new socket. */
5616 int sctp_do_peeloff(struct sock
*sk
, sctp_assoc_t id
, struct socket
**sockp
)
5618 struct sctp_association
*asoc
= sctp_id2assoc(sk
, id
);
5619 struct sctp_sock
*sp
= sctp_sk(sk
);
5620 struct socket
*sock
;
5623 /* Do not peel off from one netns to another one. */
5624 if (!net_eq(current
->nsproxy
->net_ns
, sock_net(sk
)))
5630 /* An association cannot be branched off from an already peeled-off
5631 * socket, nor is this supported for tcp style sockets.
5633 if (!sctp_style(sk
, UDP
))
5636 /* Create a new socket. */
5637 err
= sock_create(sk
->sk_family
, SOCK_SEQPACKET
, IPPROTO_SCTP
, &sock
);
5641 sctp_copy_sock(sock
->sk
, sk
, asoc
);
5643 /* Make peeled-off sockets more like 1-1 accepted sockets.
5644 * Set the daddr and initialize id to something more random and also
5645 * copy over any ip options.
5647 sp
->pf
->to_sk_daddr(&asoc
->peer
.primary_addr
, sk
);
5648 sp
->pf
->copy_ip_options(sk
, sock
->sk
);
5650 /* Populate the fields of the newsk from the oldsk and migrate the
5651 * asoc to the newsk.
5653 err
= sctp_sock_migrate(sk
, sock
->sk
, asoc
,
5654 SCTP_SOCKET_UDP_HIGH_BANDWIDTH
);
5664 EXPORT_SYMBOL(sctp_do_peeloff
);
5666 static int sctp_getsockopt_peeloff_common(struct sock
*sk
, sctp_peeloff_arg_t
*peeloff
,
5667 struct file
**newfile
, unsigned flags
)
5669 struct socket
*newsock
;
5672 retval
= sctp_do_peeloff(sk
, peeloff
->associd
, &newsock
);
5676 /* Map the socket to an unused fd that can be returned to the user. */
5677 retval
= get_unused_fd_flags(flags
& SOCK_CLOEXEC
);
5679 sock_release(newsock
);
5683 *newfile
= sock_alloc_file(newsock
, 0, NULL
);
5684 if (IS_ERR(*newfile
)) {
5685 put_unused_fd(retval
);
5686 retval
= PTR_ERR(*newfile
);
5691 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__
, sk
, newsock
->sk
,
5694 peeloff
->sd
= retval
;
5696 if (flags
& SOCK_NONBLOCK
)
5697 (*newfile
)->f_flags
|= O_NONBLOCK
;
5702 static int sctp_getsockopt_peeloff(struct sock
*sk
, int len
, char __user
*optval
, int __user
*optlen
)
5704 sctp_peeloff_arg_t peeloff
;
5705 struct file
*newfile
= NULL
;
5708 if (len
< sizeof(sctp_peeloff_arg_t
))
5710 len
= sizeof(sctp_peeloff_arg_t
);
5711 if (copy_from_user(&peeloff
, optval
, len
))
5714 retval
= sctp_getsockopt_peeloff_common(sk
, &peeloff
, &newfile
, 0);
5718 /* Return the fd mapped to the new socket. */
5719 if (put_user(len
, optlen
)) {
5721 put_unused_fd(retval
);
5725 if (copy_to_user(optval
, &peeloff
, len
)) {
5727 put_unused_fd(retval
);
5730 fd_install(retval
, newfile
);
5735 static int sctp_getsockopt_peeloff_flags(struct sock
*sk
, int len
,
5736 char __user
*optval
, int __user
*optlen
)
5738 sctp_peeloff_flags_arg_t peeloff
;
5739 struct file
*newfile
= NULL
;
5742 if (len
< sizeof(sctp_peeloff_flags_arg_t
))
5744 len
= sizeof(sctp_peeloff_flags_arg_t
);
5745 if (copy_from_user(&peeloff
, optval
, len
))
5748 retval
= sctp_getsockopt_peeloff_common(sk
, &peeloff
.p_arg
,
5749 &newfile
, peeloff
.flags
);
5753 /* Return the fd mapped to the new socket. */
5754 if (put_user(len
, optlen
)) {
5756 put_unused_fd(retval
);
5760 if (copy_to_user(optval
, &peeloff
, len
)) {
5762 put_unused_fd(retval
);
5765 fd_install(retval
, newfile
);
5770 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
5772 * Applications can enable or disable heartbeats for any peer address of
5773 * an association, modify an address's heartbeat interval, force a
5774 * heartbeat to be sent immediately, and adjust the address's maximum
5775 * number of retransmissions sent before an address is considered
5776 * unreachable. The following structure is used to access and modify an
5777 * address's parameters:
5779 * struct sctp_paddrparams {
5780 * sctp_assoc_t spp_assoc_id;
5781 * struct sockaddr_storage spp_address;
5782 * uint32_t spp_hbinterval;
5783 * uint16_t spp_pathmaxrxt;
5784 * uint32_t spp_pathmtu;
5785 * uint32_t spp_sackdelay;
5786 * uint32_t spp_flags;
5789 * spp_assoc_id - (one-to-many style socket) This is filled in the
5790 * application, and identifies the association for
5792 * spp_address - This specifies which address is of interest.
5793 * spp_hbinterval - This contains the value of the heartbeat interval,
5794 * in milliseconds. If a value of zero
5795 * is present in this field then no changes are to
5796 * be made to this parameter.
5797 * spp_pathmaxrxt - This contains the maximum number of
5798 * retransmissions before this address shall be
5799 * considered unreachable. If a value of zero
5800 * is present in this field then no changes are to
5801 * be made to this parameter.
5802 * spp_pathmtu - When Path MTU discovery is disabled the value
5803 * specified here will be the "fixed" path mtu.
5804 * Note that if the spp_address field is empty
5805 * then all associations on this address will
5806 * have this fixed path mtu set upon them.
5808 * spp_sackdelay - When delayed sack is enabled, this value specifies
5809 * the number of milliseconds that sacks will be delayed
5810 * for. This value will apply to all addresses of an
5811 * association if the spp_address field is empty. Note
5812 * also, that if delayed sack is enabled and this
5813 * value is set to 0, no change is made to the last
5814 * recorded delayed sack timer value.
5816 * spp_flags - These flags are used to control various features
5817 * on an association. The flag field may contain
5818 * zero or more of the following options.
5820 * SPP_HB_ENABLE - Enable heartbeats on the
5821 * specified address. Note that if the address
5822 * field is empty all addresses for the association
5823 * have heartbeats enabled upon them.
5825 * SPP_HB_DISABLE - Disable heartbeats on the
5826 * speicifed address. Note that if the address
5827 * field is empty all addresses for the association
5828 * will have their heartbeats disabled. Note also
5829 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
5830 * mutually exclusive, only one of these two should
5831 * be specified. Enabling both fields will have
5832 * undetermined results.
5834 * SPP_HB_DEMAND - Request a user initiated heartbeat
5835 * to be made immediately.
5837 * SPP_PMTUD_ENABLE - This field will enable PMTU
5838 * discovery upon the specified address. Note that
5839 * if the address feild is empty then all addresses
5840 * on the association are effected.
5842 * SPP_PMTUD_DISABLE - This field will disable PMTU
5843 * discovery upon the specified address. Note that
5844 * if the address feild is empty then all addresses
5845 * on the association are effected. Not also that
5846 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
5847 * exclusive. Enabling both will have undetermined
5850 * SPP_SACKDELAY_ENABLE - Setting this flag turns
5851 * on delayed sack. The time specified in spp_sackdelay
5852 * is used to specify the sack delay for this address. Note
5853 * that if spp_address is empty then all addresses will
5854 * enable delayed sack and take on the sack delay
5855 * value specified in spp_sackdelay.
5856 * SPP_SACKDELAY_DISABLE - Setting this flag turns
5857 * off delayed sack. If the spp_address field is blank then
5858 * delayed sack is disabled for the entire association. Note
5859 * also that this field is mutually exclusive to
5860 * SPP_SACKDELAY_ENABLE, setting both will have undefined
5863 * SPP_IPV6_FLOWLABEL: Setting this flag enables the
5864 * setting of the IPV6 flow label value. The value is
5865 * contained in the spp_ipv6_flowlabel field.
5866 * Upon retrieval, this flag will be set to indicate that
5867 * the spp_ipv6_flowlabel field has a valid value returned.
5868 * If a specific destination address is set (in the
5869 * spp_address field), then the value returned is that of
5870 * the address. If just an association is specified (and
5871 * no address), then the association's default flow label
5872 * is returned. If neither an association nor a destination
5873 * is specified, then the socket's default flow label is
5874 * returned. For non-IPv6 sockets, this flag will be left
5877 * SPP_DSCP: Setting this flag enables the setting of the
5878 * Differentiated Services Code Point (DSCP) value
5879 * associated with either the association or a specific
5880 * address. The value is obtained in the spp_dscp field.
5881 * Upon retrieval, this flag will be set to indicate that
5882 * the spp_dscp field has a valid value returned. If a
5883 * specific destination address is set when called (in the
5884 * spp_address field), then that specific destination
5885 * address's DSCP value is returned. If just an association
5886 * is specified, then the association's default DSCP is
5887 * returned. If neither an association nor a destination is
5888 * specified, then the socket's default DSCP is returned.
5890 * spp_ipv6_flowlabel
5891 * - This field is used in conjunction with the
5892 * SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
5893 * The 20 least significant bits are used for the flow
5894 * label. This setting has precedence over any IPv6-layer
5897 * spp_dscp - This field is used in conjunction with the SPP_DSCP flag
5898 * and contains the DSCP. The 6 most significant bits are
5899 * used for the DSCP. This setting has precedence over any
5900 * IPv4- or IPv6- layer setting.
5902 static int sctp_getsockopt_peer_addr_params(struct sock
*sk
, int len
,
5903 char __user
*optval
, int __user
*optlen
)
5905 struct sctp_paddrparams params
;
5906 struct sctp_transport
*trans
= NULL
;
5907 struct sctp_association
*asoc
= NULL
;
5908 struct sctp_sock
*sp
= sctp_sk(sk
);
5910 if (len
>= sizeof(params
))
5911 len
= sizeof(params
);
5912 else if (len
>= ALIGN(offsetof(struct sctp_paddrparams
,
5913 spp_ipv6_flowlabel
), 4))
5914 len
= ALIGN(offsetof(struct sctp_paddrparams
,
5915 spp_ipv6_flowlabel
), 4);
5919 if (copy_from_user(¶ms
, optval
, len
))
5922 /* If an address other than INADDR_ANY is specified, and
5923 * no transport is found, then the request is invalid.
5925 if (!sctp_is_any(sk
, (union sctp_addr
*)¶ms
.spp_address
)) {
5926 trans
= sctp_addr_id2transport(sk
, ¶ms
.spp_address
,
5927 params
.spp_assoc_id
);
5929 pr_debug("%s: failed no transport\n", __func__
);
5934 /* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
5935 * socket is a one to many style socket, and an association
5936 * was not found, then the id was invalid.
5938 asoc
= sctp_id2assoc(sk
, params
.spp_assoc_id
);
5939 if (!asoc
&& params
.spp_assoc_id
!= SCTP_FUTURE_ASSOC
&&
5940 sctp_style(sk
, UDP
)) {
5941 pr_debug("%s: failed no association\n", __func__
);
5946 /* Fetch transport values. */
5947 params
.spp_hbinterval
= jiffies_to_msecs(trans
->hbinterval
);
5948 params
.spp_pathmtu
= trans
->pathmtu
;
5949 params
.spp_pathmaxrxt
= trans
->pathmaxrxt
;
5950 params
.spp_sackdelay
= jiffies_to_msecs(trans
->sackdelay
);
5952 /*draft-11 doesn't say what to return in spp_flags*/
5953 params
.spp_flags
= trans
->param_flags
;
5954 if (trans
->flowlabel
& SCTP_FLOWLABEL_SET_MASK
) {
5955 params
.spp_ipv6_flowlabel
= trans
->flowlabel
&
5956 SCTP_FLOWLABEL_VAL_MASK
;
5957 params
.spp_flags
|= SPP_IPV6_FLOWLABEL
;
5959 if (trans
->dscp
& SCTP_DSCP_SET_MASK
) {
5960 params
.spp_dscp
= trans
->dscp
& SCTP_DSCP_VAL_MASK
;
5961 params
.spp_flags
|= SPP_DSCP
;
5964 /* Fetch association values. */
5965 params
.spp_hbinterval
= jiffies_to_msecs(asoc
->hbinterval
);
5966 params
.spp_pathmtu
= asoc
->pathmtu
;
5967 params
.spp_pathmaxrxt
= asoc
->pathmaxrxt
;
5968 params
.spp_sackdelay
= jiffies_to_msecs(asoc
->sackdelay
);
5970 /*draft-11 doesn't say what to return in spp_flags*/
5971 params
.spp_flags
= asoc
->param_flags
;
5972 if (asoc
->flowlabel
& SCTP_FLOWLABEL_SET_MASK
) {
5973 params
.spp_ipv6_flowlabel
= asoc
->flowlabel
&
5974 SCTP_FLOWLABEL_VAL_MASK
;
5975 params
.spp_flags
|= SPP_IPV6_FLOWLABEL
;
5977 if (asoc
->dscp
& SCTP_DSCP_SET_MASK
) {
5978 params
.spp_dscp
= asoc
->dscp
& SCTP_DSCP_VAL_MASK
;
5979 params
.spp_flags
|= SPP_DSCP
;
5982 /* Fetch socket values. */
5983 params
.spp_hbinterval
= sp
->hbinterval
;
5984 params
.spp_pathmtu
= sp
->pathmtu
;
5985 params
.spp_sackdelay
= sp
->sackdelay
;
5986 params
.spp_pathmaxrxt
= sp
->pathmaxrxt
;
5988 /*draft-11 doesn't say what to return in spp_flags*/
5989 params
.spp_flags
= sp
->param_flags
;
5990 if (sp
->flowlabel
& SCTP_FLOWLABEL_SET_MASK
) {
5991 params
.spp_ipv6_flowlabel
= sp
->flowlabel
&
5992 SCTP_FLOWLABEL_VAL_MASK
;
5993 params
.spp_flags
|= SPP_IPV6_FLOWLABEL
;
5995 if (sp
->dscp
& SCTP_DSCP_SET_MASK
) {
5996 params
.spp_dscp
= sp
->dscp
& SCTP_DSCP_VAL_MASK
;
5997 params
.spp_flags
|= SPP_DSCP
;
6001 if (copy_to_user(optval
, ¶ms
, len
))
6004 if (put_user(len
, optlen
))
6011 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
6013 * This option will effect the way delayed acks are performed. This
6014 * option allows you to get or set the delayed ack time, in
6015 * milliseconds. It also allows changing the delayed ack frequency.
6016 * Changing the frequency to 1 disables the delayed sack algorithm. If
6017 * the assoc_id is 0, then this sets or gets the endpoints default
6018 * values. If the assoc_id field is non-zero, then the set or get
6019 * effects the specified association for the one to many model (the
6020 * assoc_id field is ignored by the one to one model). Note that if
6021 * sack_delay or sack_freq are 0 when setting this option, then the
6022 * current values will remain unchanged.
6024 * struct sctp_sack_info {
6025 * sctp_assoc_t sack_assoc_id;
6026 * uint32_t sack_delay;
6027 * uint32_t sack_freq;
6030 * sack_assoc_id - This parameter, indicates which association the user
6031 * is performing an action upon. Note that if this field's value is
6032 * zero then the endpoints default value is changed (effecting future
6033 * associations only).
6035 * sack_delay - This parameter contains the number of milliseconds that
6036 * the user is requesting the delayed ACK timer be set to. Note that
6037 * this value is defined in the standard to be between 200 and 500
6040 * sack_freq - This parameter contains the number of packets that must
6041 * be received before a sack is sent without waiting for the delay
6042 * timer to expire. The default value for this is 2, setting this
6043 * value to 1 will disable the delayed sack algorithm.
6045 static int sctp_getsockopt_delayed_ack(struct sock
*sk
, int len
,
6046 char __user
*optval
,
6049 struct sctp_sack_info params
;
6050 struct sctp_association
*asoc
= NULL
;
6051 struct sctp_sock
*sp
= sctp_sk(sk
);
6053 if (len
>= sizeof(struct sctp_sack_info
)) {
6054 len
= sizeof(struct sctp_sack_info
);
6056 if (copy_from_user(¶ms
, optval
, len
))
6058 } else if (len
== sizeof(struct sctp_assoc_value
)) {
6059 pr_warn_ratelimited(DEPRECATED
6061 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
6062 "Use struct sctp_sack_info instead\n",
6063 current
->comm
, task_pid_nr(current
));
6064 if (copy_from_user(¶ms
, optval
, len
))
6069 /* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
6070 * socket is a one to many style socket, and an association
6071 * was not found, then the id was invalid.
6073 asoc
= sctp_id2assoc(sk
, params
.sack_assoc_id
);
6074 if (!asoc
&& params
.sack_assoc_id
!= SCTP_FUTURE_ASSOC
&&
6075 sctp_style(sk
, UDP
))
6079 /* Fetch association values. */
6080 if (asoc
->param_flags
& SPP_SACKDELAY_ENABLE
) {
6081 params
.sack_delay
= jiffies_to_msecs(asoc
->sackdelay
);
6082 params
.sack_freq
= asoc
->sackfreq
;
6085 params
.sack_delay
= 0;
6086 params
.sack_freq
= 1;
6089 /* Fetch socket values. */
6090 if (sp
->param_flags
& SPP_SACKDELAY_ENABLE
) {
6091 params
.sack_delay
= sp
->sackdelay
;
6092 params
.sack_freq
= sp
->sackfreq
;
6094 params
.sack_delay
= 0;
6095 params
.sack_freq
= 1;
6099 if (copy_to_user(optval
, ¶ms
, len
))
6102 if (put_user(len
, optlen
))
6108 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
6110 * Applications can specify protocol parameters for the default association
6111 * initialization. The option name argument to setsockopt() and getsockopt()
6114 * Setting initialization parameters is effective only on an unconnected
6115 * socket (for UDP-style sockets only future associations are effected
6116 * by the change). With TCP-style sockets, this option is inherited by
6117 * sockets derived from a listener socket.
6119 static int sctp_getsockopt_initmsg(struct sock
*sk
, int len
, char __user
*optval
, int __user
*optlen
)
6121 if (len
< sizeof(struct sctp_initmsg
))
6123 len
= sizeof(struct sctp_initmsg
);
6124 if (put_user(len
, optlen
))
6126 if (copy_to_user(optval
, &sctp_sk(sk
)->initmsg
, len
))
6132 static int sctp_getsockopt_peer_addrs(struct sock
*sk
, int len
,
6133 char __user
*optval
, int __user
*optlen
)
6135 struct sctp_association
*asoc
;
6137 struct sctp_getaddrs getaddrs
;
6138 struct sctp_transport
*from
;
6140 union sctp_addr temp
;
6141 struct sctp_sock
*sp
= sctp_sk(sk
);
6146 if (len
< sizeof(struct sctp_getaddrs
))
6149 if (copy_from_user(&getaddrs
, optval
, sizeof(struct sctp_getaddrs
)))
6152 /* For UDP-style sockets, id specifies the association to query. */
6153 asoc
= sctp_id2assoc(sk
, getaddrs
.assoc_id
);
6157 to
= optval
+ offsetof(struct sctp_getaddrs
, addrs
);
6158 space_left
= len
- offsetof(struct sctp_getaddrs
, addrs
);
6160 list_for_each_entry(from
, &asoc
->peer
.transport_addr_list
,
6162 memcpy(&temp
, &from
->ipaddr
, sizeof(temp
));
6163 addrlen
= sctp_get_pf_specific(sk
->sk_family
)
6164 ->addr_to_user(sp
, &temp
);
6165 if (space_left
< addrlen
)
6167 if (copy_to_user(to
, &temp
, addrlen
))
6171 space_left
-= addrlen
;
6174 if (put_user(cnt
, &((struct sctp_getaddrs __user
*)optval
)->addr_num
))
6176 bytes_copied
= ((char __user
*)to
) - optval
;
6177 if (put_user(bytes_copied
, optlen
))
6183 static int sctp_copy_laddrs(struct sock
*sk
, __u16 port
, void *to
,
6184 size_t space_left
, int *bytes_copied
)
6186 struct sctp_sockaddr_entry
*addr
;
6187 union sctp_addr temp
;
6190 struct net
*net
= sock_net(sk
);
6193 list_for_each_entry_rcu(addr
, &net
->sctp
.local_addr_list
, list
) {
6197 if ((PF_INET
== sk
->sk_family
) &&
6198 (AF_INET6
== addr
->a
.sa
.sa_family
))
6200 if ((PF_INET6
== sk
->sk_family
) &&
6201 inet_v6_ipv6only(sk
) &&
6202 (AF_INET
== addr
->a
.sa
.sa_family
))
6204 memcpy(&temp
, &addr
->a
, sizeof(temp
));
6205 if (!temp
.v4
.sin_port
)
6206 temp
.v4
.sin_port
= htons(port
);
6208 addrlen
= sctp_get_pf_specific(sk
->sk_family
)
6209 ->addr_to_user(sctp_sk(sk
), &temp
);
6211 if (space_left
< addrlen
) {
6215 memcpy(to
, &temp
, addrlen
);
6219 space_left
-= addrlen
;
6220 *bytes_copied
+= addrlen
;
6228 static int sctp_getsockopt_local_addrs(struct sock
*sk
, int len
,
6229 char __user
*optval
, int __user
*optlen
)
6231 struct sctp_bind_addr
*bp
;
6232 struct sctp_association
*asoc
;
6234 struct sctp_getaddrs getaddrs
;
6235 struct sctp_sockaddr_entry
*addr
;
6237 union sctp_addr temp
;
6238 struct sctp_sock
*sp
= sctp_sk(sk
);
6242 int bytes_copied
= 0;
6246 if (len
< sizeof(struct sctp_getaddrs
))
6249 if (copy_from_user(&getaddrs
, optval
, sizeof(struct sctp_getaddrs
)))
6253 * For UDP-style sockets, id specifies the association to query.
6254 * If the id field is set to the value '0' then the locally bound
6255 * addresses are returned without regard to any particular
6258 if (0 == getaddrs
.assoc_id
) {
6259 bp
= &sctp_sk(sk
)->ep
->base
.bind_addr
;
6261 asoc
= sctp_id2assoc(sk
, getaddrs
.assoc_id
);
6264 bp
= &asoc
->base
.bind_addr
;
6267 to
= optval
+ offsetof(struct sctp_getaddrs
, addrs
);
6268 space_left
= len
- offsetof(struct sctp_getaddrs
, addrs
);
6270 addrs
= kmalloc(space_left
, GFP_USER
| __GFP_NOWARN
);
6274 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
6275 * addresses from the global local address list.
6277 if (sctp_list_single_entry(&bp
->address_list
)) {
6278 addr
= list_entry(bp
->address_list
.next
,
6279 struct sctp_sockaddr_entry
, list
);
6280 if (sctp_is_any(sk
, &addr
->a
)) {
6281 cnt
= sctp_copy_laddrs(sk
, bp
->port
, addrs
,
6282 space_left
, &bytes_copied
);
6292 /* Protection on the bound address list is not needed since
6293 * in the socket option context we hold a socket lock and
6294 * thus the bound address list can't change.
6296 list_for_each_entry(addr
, &bp
->address_list
, list
) {
6297 memcpy(&temp
, &addr
->a
, sizeof(temp
));
6298 addrlen
= sctp_get_pf_specific(sk
->sk_family
)
6299 ->addr_to_user(sp
, &temp
);
6300 if (space_left
< addrlen
) {
6301 err
= -ENOMEM
; /*fixme: right error?*/
6304 memcpy(buf
, &temp
, addrlen
);
6306 bytes_copied
+= addrlen
;
6308 space_left
-= addrlen
;
6312 if (copy_to_user(to
, addrs
, bytes_copied
)) {
6316 if (put_user(cnt
, &((struct sctp_getaddrs __user
*)optval
)->addr_num
)) {
6320 /* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
6321 * but we can't change it anymore.
6323 if (put_user(bytes_copied
, optlen
))
6330 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
6332 * Requests that the local SCTP stack use the enclosed peer address as
6333 * the association primary. The enclosed address must be one of the
6334 * association peer's addresses.
6336 static int sctp_getsockopt_primary_addr(struct sock
*sk
, int len
,
6337 char __user
*optval
, int __user
*optlen
)
6339 struct sctp_prim prim
;
6340 struct sctp_association
*asoc
;
6341 struct sctp_sock
*sp
= sctp_sk(sk
);
6343 if (len
< sizeof(struct sctp_prim
))
6346 len
= sizeof(struct sctp_prim
);
6348 if (copy_from_user(&prim
, optval
, len
))
6351 asoc
= sctp_id2assoc(sk
, prim
.ssp_assoc_id
);
6355 if (!asoc
->peer
.primary_path
)
6358 memcpy(&prim
.ssp_addr
, &asoc
->peer
.primary_path
->ipaddr
,
6359 asoc
->peer
.primary_path
->af_specific
->sockaddr_len
);
6361 sctp_get_pf_specific(sk
->sk_family
)->addr_to_user(sp
,
6362 (union sctp_addr
*)&prim
.ssp_addr
);
6364 if (put_user(len
, optlen
))
6366 if (copy_to_user(optval
, &prim
, len
))
6373 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
6375 * Requests that the local endpoint set the specified Adaptation Layer
6376 * Indication parameter for all future INIT and INIT-ACK exchanges.
6378 static int sctp_getsockopt_adaptation_layer(struct sock
*sk
, int len
,
6379 char __user
*optval
, int __user
*optlen
)
6381 struct sctp_setadaptation adaptation
;
6383 if (len
< sizeof(struct sctp_setadaptation
))
6386 len
= sizeof(struct sctp_setadaptation
);
6388 adaptation
.ssb_adaptation_ind
= sctp_sk(sk
)->adaptation_ind
;
6390 if (put_user(len
, optlen
))
6392 if (copy_to_user(optval
, &adaptation
, len
))
6400 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
6402 * Applications that wish to use the sendto() system call may wish to
6403 * specify a default set of parameters that would normally be supplied
6404 * through the inclusion of ancillary data. This socket option allows
6405 * such an application to set the default sctp_sndrcvinfo structure.
6408 * The application that wishes to use this socket option simply passes
6409 * in to this call the sctp_sndrcvinfo structure defined in Section
6410 * 5.2.2) The input parameters accepted by this call include
6411 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
6412 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
6413 * to this call if the caller is using the UDP model.
6415 * For getsockopt, it get the default sctp_sndrcvinfo structure.
6417 static int sctp_getsockopt_default_send_param(struct sock
*sk
,
6418 int len
, char __user
*optval
,
6421 struct sctp_sock
*sp
= sctp_sk(sk
);
6422 struct sctp_association
*asoc
;
6423 struct sctp_sndrcvinfo info
;
6425 if (len
< sizeof(info
))
6430 if (copy_from_user(&info
, optval
, len
))
6433 asoc
= sctp_id2assoc(sk
, info
.sinfo_assoc_id
);
6434 if (!asoc
&& info
.sinfo_assoc_id
!= SCTP_FUTURE_ASSOC
&&
6435 sctp_style(sk
, UDP
))
6439 info
.sinfo_stream
= asoc
->default_stream
;
6440 info
.sinfo_flags
= asoc
->default_flags
;
6441 info
.sinfo_ppid
= asoc
->default_ppid
;
6442 info
.sinfo_context
= asoc
->default_context
;
6443 info
.sinfo_timetolive
= asoc
->default_timetolive
;
6445 info
.sinfo_stream
= sp
->default_stream
;
6446 info
.sinfo_flags
= sp
->default_flags
;
6447 info
.sinfo_ppid
= sp
->default_ppid
;
6448 info
.sinfo_context
= sp
->default_context
;
6449 info
.sinfo_timetolive
= sp
->default_timetolive
;
6452 if (put_user(len
, optlen
))
6454 if (copy_to_user(optval
, &info
, len
))
6460 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
6461 * (SCTP_DEFAULT_SNDINFO)
6463 static int sctp_getsockopt_default_sndinfo(struct sock
*sk
, int len
,
6464 char __user
*optval
,
6467 struct sctp_sock
*sp
= sctp_sk(sk
);
6468 struct sctp_association
*asoc
;
6469 struct sctp_sndinfo info
;
6471 if (len
< sizeof(info
))
6476 if (copy_from_user(&info
, optval
, len
))
6479 asoc
= sctp_id2assoc(sk
, info
.snd_assoc_id
);
6480 if (!asoc
&& info
.snd_assoc_id
!= SCTP_FUTURE_ASSOC
&&
6481 sctp_style(sk
, UDP
))
6485 info
.snd_sid
= asoc
->default_stream
;
6486 info
.snd_flags
= asoc
->default_flags
;
6487 info
.snd_ppid
= asoc
->default_ppid
;
6488 info
.snd_context
= asoc
->default_context
;
6490 info
.snd_sid
= sp
->default_stream
;
6491 info
.snd_flags
= sp
->default_flags
;
6492 info
.snd_ppid
= sp
->default_ppid
;
6493 info
.snd_context
= sp
->default_context
;
6496 if (put_user(len
, optlen
))
6498 if (copy_to_user(optval
, &info
, len
))
6506 * 7.1.5 SCTP_NODELAY
6508 * Turn on/off any Nagle-like algorithm. This means that packets are
6509 * generally sent as soon as possible and no unnecessary delays are
6510 * introduced, at the cost of more packets in the network. Expects an
6511 * integer boolean flag.
6514 static int sctp_getsockopt_nodelay(struct sock
*sk
, int len
,
6515 char __user
*optval
, int __user
*optlen
)
6519 if (len
< sizeof(int))
6523 val
= (sctp_sk(sk
)->nodelay
== 1);
6524 if (put_user(len
, optlen
))
6526 if (copy_to_user(optval
, &val
, len
))
6533 * 7.1.1 SCTP_RTOINFO
6535 * The protocol parameters used to initialize and bound retransmission
6536 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
6537 * and modify these parameters.
6538 * All parameters are time values, in milliseconds. A value of 0, when
6539 * modifying the parameters, indicates that the current value should not
6543 static int sctp_getsockopt_rtoinfo(struct sock
*sk
, int len
,
6544 char __user
*optval
,
6545 int __user
*optlen
) {
6546 struct sctp_rtoinfo rtoinfo
;
6547 struct sctp_association
*asoc
;
6549 if (len
< sizeof (struct sctp_rtoinfo
))
6552 len
= sizeof(struct sctp_rtoinfo
);
6554 if (copy_from_user(&rtoinfo
, optval
, len
))
6557 asoc
= sctp_id2assoc(sk
, rtoinfo
.srto_assoc_id
);
6559 if (!asoc
&& rtoinfo
.srto_assoc_id
!= SCTP_FUTURE_ASSOC
&&
6560 sctp_style(sk
, UDP
))
6563 /* Values corresponding to the specific association. */
6565 rtoinfo
.srto_initial
= jiffies_to_msecs(asoc
->rto_initial
);
6566 rtoinfo
.srto_max
= jiffies_to_msecs(asoc
->rto_max
);
6567 rtoinfo
.srto_min
= jiffies_to_msecs(asoc
->rto_min
);
6569 /* Values corresponding to the endpoint. */
6570 struct sctp_sock
*sp
= sctp_sk(sk
);
6572 rtoinfo
.srto_initial
= sp
->rtoinfo
.srto_initial
;
6573 rtoinfo
.srto_max
= sp
->rtoinfo
.srto_max
;
6574 rtoinfo
.srto_min
= sp
->rtoinfo
.srto_min
;
6577 if (put_user(len
, optlen
))
6580 if (copy_to_user(optval
, &rtoinfo
, len
))
6588 * 7.1.2 SCTP_ASSOCINFO
6590 * This option is used to tune the maximum retransmission attempts
6591 * of the association.
6592 * Returns an error if the new association retransmission value is
6593 * greater than the sum of the retransmission value of the peer.
6594 * See [SCTP] for more information.
6597 static int sctp_getsockopt_associnfo(struct sock
*sk
, int len
,
6598 char __user
*optval
,
6602 struct sctp_assocparams assocparams
;
6603 struct sctp_association
*asoc
;
6604 struct list_head
*pos
;
6607 if (len
< sizeof (struct sctp_assocparams
))
6610 len
= sizeof(struct sctp_assocparams
);
6612 if (copy_from_user(&assocparams
, optval
, len
))
6615 asoc
= sctp_id2assoc(sk
, assocparams
.sasoc_assoc_id
);
6617 if (!asoc
&& assocparams
.sasoc_assoc_id
!= SCTP_FUTURE_ASSOC
&&
6618 sctp_style(sk
, UDP
))
6621 /* Values correspoinding to the specific association */
6623 assocparams
.sasoc_asocmaxrxt
= asoc
->max_retrans
;
6624 assocparams
.sasoc_peer_rwnd
= asoc
->peer
.rwnd
;
6625 assocparams
.sasoc_local_rwnd
= asoc
->a_rwnd
;
6626 assocparams
.sasoc_cookie_life
= ktime_to_ms(asoc
->cookie_life
);
6628 list_for_each(pos
, &asoc
->peer
.transport_addr_list
) {
6632 assocparams
.sasoc_number_peer_destinations
= cnt
;
6634 /* Values corresponding to the endpoint */
6635 struct sctp_sock
*sp
= sctp_sk(sk
);
6637 assocparams
.sasoc_asocmaxrxt
= sp
->assocparams
.sasoc_asocmaxrxt
;
6638 assocparams
.sasoc_peer_rwnd
= sp
->assocparams
.sasoc_peer_rwnd
;
6639 assocparams
.sasoc_local_rwnd
= sp
->assocparams
.sasoc_local_rwnd
;
6640 assocparams
.sasoc_cookie_life
=
6641 sp
->assocparams
.sasoc_cookie_life
;
6642 assocparams
.sasoc_number_peer_destinations
=
6644 sasoc_number_peer_destinations
;
6647 if (put_user(len
, optlen
))
6650 if (copy_to_user(optval
, &assocparams
, len
))
6657 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
6659 * This socket option is a boolean flag which turns on or off mapped V4
6660 * addresses. If this option is turned on and the socket is type
6661 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
6662 * If this option is turned off, then no mapping will be done of V4
6663 * addresses and a user will receive both PF_INET6 and PF_INET type
6664 * addresses on the socket.
6666 static int sctp_getsockopt_mappedv4(struct sock
*sk
, int len
,
6667 char __user
*optval
, int __user
*optlen
)
6670 struct sctp_sock
*sp
= sctp_sk(sk
);
6672 if (len
< sizeof(int))
6677 if (put_user(len
, optlen
))
6679 if (copy_to_user(optval
, &val
, len
))
6686 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
6687 * (chapter and verse is quoted at sctp_setsockopt_context())
6689 static int sctp_getsockopt_context(struct sock
*sk
, int len
,
6690 char __user
*optval
, int __user
*optlen
)
6692 struct sctp_assoc_value params
;
6693 struct sctp_association
*asoc
;
6695 if (len
< sizeof(struct sctp_assoc_value
))
6698 len
= sizeof(struct sctp_assoc_value
);
6700 if (copy_from_user(¶ms
, optval
, len
))
6703 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6704 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
6705 sctp_style(sk
, UDP
))
6708 params
.assoc_value
= asoc
? asoc
->default_rcv_context
6709 : sctp_sk(sk
)->default_rcv_context
;
6711 if (put_user(len
, optlen
))
6713 if (copy_to_user(optval
, ¶ms
, len
))
6720 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
6721 * This option will get or set the maximum size to put in any outgoing
6722 * SCTP DATA chunk. If a message is larger than this size it will be
6723 * fragmented by SCTP into the specified size. Note that the underlying
6724 * SCTP implementation may fragment into smaller sized chunks when the
6725 * PMTU of the underlying association is smaller than the value set by
6726 * the user. The default value for this option is '0' which indicates
6727 * the user is NOT limiting fragmentation and only the PMTU will effect
6728 * SCTP's choice of DATA chunk size. Note also that values set larger
6729 * than the maximum size of an IP datagram will effectively let SCTP
6730 * control fragmentation (i.e. the same as setting this option to 0).
6732 * The following structure is used to access and modify this parameter:
6734 * struct sctp_assoc_value {
6735 * sctp_assoc_t assoc_id;
6736 * uint32_t assoc_value;
6739 * assoc_id: This parameter is ignored for one-to-one style sockets.
6740 * For one-to-many style sockets this parameter indicates which
6741 * association the user is performing an action upon. Note that if
6742 * this field's value is zero then the endpoints default value is
6743 * changed (effecting future associations only).
6744 * assoc_value: This parameter specifies the maximum size in bytes.
6746 static int sctp_getsockopt_maxseg(struct sock
*sk
, int len
,
6747 char __user
*optval
, int __user
*optlen
)
6749 struct sctp_assoc_value params
;
6750 struct sctp_association
*asoc
;
6752 if (len
== sizeof(int)) {
6753 pr_warn_ratelimited(DEPRECATED
6755 "Use of int in maxseg socket option.\n"
6756 "Use struct sctp_assoc_value instead\n",
6757 current
->comm
, task_pid_nr(current
));
6758 params
.assoc_id
= SCTP_FUTURE_ASSOC
;
6759 } else if (len
>= sizeof(struct sctp_assoc_value
)) {
6760 len
= sizeof(struct sctp_assoc_value
);
6761 if (copy_from_user(¶ms
, optval
, len
))
6766 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6767 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
6768 sctp_style(sk
, UDP
))
6772 params
.assoc_value
= asoc
->frag_point
;
6774 params
.assoc_value
= sctp_sk(sk
)->user_frag
;
6776 if (put_user(len
, optlen
))
6778 if (len
== sizeof(int)) {
6779 if (copy_to_user(optval
, ¶ms
.assoc_value
, len
))
6782 if (copy_to_user(optval
, ¶ms
, len
))
6790 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
6791 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
6793 static int sctp_getsockopt_fragment_interleave(struct sock
*sk
, int len
,
6794 char __user
*optval
, int __user
*optlen
)
6798 if (len
< sizeof(int))
6803 val
= sctp_sk(sk
)->frag_interleave
;
6804 if (put_user(len
, optlen
))
6806 if (copy_to_user(optval
, &val
, len
))
6813 * 7.1.25. Set or Get the sctp partial delivery point
6814 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
6816 static int sctp_getsockopt_partial_delivery_point(struct sock
*sk
, int len
,
6817 char __user
*optval
,
6822 if (len
< sizeof(u32
))
6827 val
= sctp_sk(sk
)->pd_point
;
6828 if (put_user(len
, optlen
))
6830 if (copy_to_user(optval
, &val
, len
))
6837 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
6838 * (chapter and verse is quoted at sctp_setsockopt_maxburst())
6840 static int sctp_getsockopt_maxburst(struct sock
*sk
, int len
,
6841 char __user
*optval
,
6844 struct sctp_assoc_value params
;
6845 struct sctp_association
*asoc
;
6847 if (len
== sizeof(int)) {
6848 pr_warn_ratelimited(DEPRECATED
6850 "Use of int in max_burst socket option.\n"
6851 "Use struct sctp_assoc_value instead\n",
6852 current
->comm
, task_pid_nr(current
));
6853 params
.assoc_id
= SCTP_FUTURE_ASSOC
;
6854 } else if (len
>= sizeof(struct sctp_assoc_value
)) {
6855 len
= sizeof(struct sctp_assoc_value
);
6856 if (copy_from_user(¶ms
, optval
, len
))
6861 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
6862 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
6863 sctp_style(sk
, UDP
))
6866 params
.assoc_value
= asoc
? asoc
->max_burst
: sctp_sk(sk
)->max_burst
;
6868 if (len
== sizeof(int)) {
6869 if (copy_to_user(optval
, ¶ms
.assoc_value
, len
))
6872 if (copy_to_user(optval
, ¶ms
, len
))
6880 static int sctp_getsockopt_hmac_ident(struct sock
*sk
, int len
,
6881 char __user
*optval
, int __user
*optlen
)
6883 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
6884 struct sctp_hmacalgo __user
*p
= (void __user
*)optval
;
6885 struct sctp_hmac_algo_param
*hmacs
;
6890 if (!ep
->auth_enable
)
6893 hmacs
= ep
->auth_hmacs_list
;
6894 data_len
= ntohs(hmacs
->param_hdr
.length
) -
6895 sizeof(struct sctp_paramhdr
);
6897 if (len
< sizeof(struct sctp_hmacalgo
) + data_len
)
6900 len
= sizeof(struct sctp_hmacalgo
) + data_len
;
6901 num_idents
= data_len
/ sizeof(u16
);
6903 if (put_user(len
, optlen
))
6905 if (put_user(num_idents
, &p
->shmac_num_idents
))
6907 for (i
= 0; i
< num_idents
; i
++) {
6908 __u16 hmacid
= ntohs(hmacs
->hmac_ids
[i
]);
6910 if (copy_to_user(&p
->shmac_idents
[i
], &hmacid
, sizeof(__u16
)))
6916 static int sctp_getsockopt_active_key(struct sock
*sk
, int len
,
6917 char __user
*optval
, int __user
*optlen
)
6919 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
6920 struct sctp_authkeyid val
;
6921 struct sctp_association
*asoc
;
6923 if (!ep
->auth_enable
)
6926 if (len
< sizeof(struct sctp_authkeyid
))
6929 len
= sizeof(struct sctp_authkeyid
);
6930 if (copy_from_user(&val
, optval
, len
))
6933 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
6934 if (!asoc
&& val
.scact_assoc_id
&& sctp_style(sk
, UDP
))
6938 val
.scact_keynumber
= asoc
->active_key_id
;
6940 val
.scact_keynumber
= ep
->active_key_id
;
6942 if (put_user(len
, optlen
))
6944 if (copy_to_user(optval
, &val
, len
))
6950 static int sctp_getsockopt_peer_auth_chunks(struct sock
*sk
, int len
,
6951 char __user
*optval
, int __user
*optlen
)
6953 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
6954 struct sctp_authchunks __user
*p
= (void __user
*)optval
;
6955 struct sctp_authchunks val
;
6956 struct sctp_association
*asoc
;
6957 struct sctp_chunks_param
*ch
;
6961 if (!ep
->auth_enable
)
6964 if (len
< sizeof(struct sctp_authchunks
))
6967 if (copy_from_user(&val
, optval
, sizeof(val
)))
6970 to
= p
->gauth_chunks
;
6971 asoc
= sctp_id2assoc(sk
, val
.gauth_assoc_id
);
6975 ch
= asoc
->peer
.peer_chunks
;
6979 /* See if the user provided enough room for all the data */
6980 num_chunks
= ntohs(ch
->param_hdr
.length
) - sizeof(struct sctp_paramhdr
);
6981 if (len
< num_chunks
)
6984 if (copy_to_user(to
, ch
->chunks
, num_chunks
))
6987 len
= sizeof(struct sctp_authchunks
) + num_chunks
;
6988 if (put_user(len
, optlen
))
6990 if (put_user(num_chunks
, &p
->gauth_number_of_chunks
))
6995 static int sctp_getsockopt_local_auth_chunks(struct sock
*sk
, int len
,
6996 char __user
*optval
, int __user
*optlen
)
6998 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
6999 struct sctp_authchunks __user
*p
= (void __user
*)optval
;
7000 struct sctp_authchunks val
;
7001 struct sctp_association
*asoc
;
7002 struct sctp_chunks_param
*ch
;
7006 if (!ep
->auth_enable
)
7009 if (len
< sizeof(struct sctp_authchunks
))
7012 if (copy_from_user(&val
, optval
, sizeof(val
)))
7015 to
= p
->gauth_chunks
;
7016 asoc
= sctp_id2assoc(sk
, val
.gauth_assoc_id
);
7017 if (!asoc
&& val
.gauth_assoc_id
!= SCTP_FUTURE_ASSOC
&&
7018 sctp_style(sk
, UDP
))
7021 ch
= asoc
? (struct sctp_chunks_param
*)asoc
->c
.auth_chunks
7022 : ep
->auth_chunk_list
;
7026 num_chunks
= ntohs(ch
->param_hdr
.length
) - sizeof(struct sctp_paramhdr
);
7027 if (len
< sizeof(struct sctp_authchunks
) + num_chunks
)
7030 if (copy_to_user(to
, ch
->chunks
, num_chunks
))
7033 len
= sizeof(struct sctp_authchunks
) + num_chunks
;
7034 if (put_user(len
, optlen
))
7036 if (put_user(num_chunks
, &p
->gauth_number_of_chunks
))
7043 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
7044 * This option gets the current number of associations that are attached
7045 * to a one-to-many style socket. The option value is an uint32_t.
7047 static int sctp_getsockopt_assoc_number(struct sock
*sk
, int len
,
7048 char __user
*optval
, int __user
*optlen
)
7050 struct sctp_sock
*sp
= sctp_sk(sk
);
7051 struct sctp_association
*asoc
;
7054 if (sctp_style(sk
, TCP
))
7057 if (len
< sizeof(u32
))
7062 list_for_each_entry(asoc
, &(sp
->ep
->asocs
), asocs
) {
7066 if (put_user(len
, optlen
))
7068 if (copy_to_user(optval
, &val
, len
))
7075 * 8.1.23 SCTP_AUTO_ASCONF
7076 * See the corresponding setsockopt entry as description
7078 static int sctp_getsockopt_auto_asconf(struct sock
*sk
, int len
,
7079 char __user
*optval
, int __user
*optlen
)
7083 if (len
< sizeof(int))
7087 if (sctp_sk(sk
)->do_auto_asconf
&& sctp_is_ep_boundall(sk
))
7089 if (put_user(len
, optlen
))
7091 if (copy_to_user(optval
, &val
, len
))
7097 * 8.2.6. Get the Current Identifiers of Associations
7098 * (SCTP_GET_ASSOC_ID_LIST)
7100 * This option gets the current list of SCTP association identifiers of
7101 * the SCTP associations handled by a one-to-many style socket.
7103 static int sctp_getsockopt_assoc_ids(struct sock
*sk
, int len
,
7104 char __user
*optval
, int __user
*optlen
)
7106 struct sctp_sock
*sp
= sctp_sk(sk
);
7107 struct sctp_association
*asoc
;
7108 struct sctp_assoc_ids
*ids
;
7111 if (sctp_style(sk
, TCP
))
7114 if (len
< sizeof(struct sctp_assoc_ids
))
7117 list_for_each_entry(asoc
, &(sp
->ep
->asocs
), asocs
) {
7121 if (len
< sizeof(struct sctp_assoc_ids
) + sizeof(sctp_assoc_t
) * num
)
7124 len
= sizeof(struct sctp_assoc_ids
) + sizeof(sctp_assoc_t
) * num
;
7126 ids
= kmalloc(len
, GFP_USER
| __GFP_NOWARN
);
7130 ids
->gaids_number_of_ids
= num
;
7132 list_for_each_entry(asoc
, &(sp
->ep
->asocs
), asocs
) {
7133 ids
->gaids_assoc_id
[num
++] = asoc
->assoc_id
;
7136 if (put_user(len
, optlen
) || copy_to_user(optval
, ids
, len
)) {
7146 * SCTP_PEER_ADDR_THLDS
7148 * This option allows us to fetch the partially failed threshold for one or all
7149 * transports in an association. See Section 6.1 of:
7150 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
7152 static int sctp_getsockopt_paddr_thresholds(struct sock
*sk
,
7153 char __user
*optval
,
7157 struct sctp_paddrthlds val
;
7158 struct sctp_transport
*trans
;
7159 struct sctp_association
*asoc
;
7161 if (len
< sizeof(struct sctp_paddrthlds
))
7163 len
= sizeof(struct sctp_paddrthlds
);
7164 if (copy_from_user(&val
, (struct sctp_paddrthlds __user
*)optval
, len
))
7167 if (!sctp_is_any(sk
, (const union sctp_addr
*)&val
.spt_address
)) {
7168 trans
= sctp_addr_id2transport(sk
, &val
.spt_address
,
7173 val
.spt_pathmaxrxt
= trans
->pathmaxrxt
;
7174 val
.spt_pathpfthld
= trans
->pf_retrans
;
7179 asoc
= sctp_id2assoc(sk
, val
.spt_assoc_id
);
7180 if (!asoc
&& val
.spt_assoc_id
!= SCTP_FUTURE_ASSOC
&&
7181 sctp_style(sk
, UDP
))
7185 val
.spt_pathpfthld
= asoc
->pf_retrans
;
7186 val
.spt_pathmaxrxt
= asoc
->pathmaxrxt
;
7188 struct sctp_sock
*sp
= sctp_sk(sk
);
7190 val
.spt_pathpfthld
= sp
->pf_retrans
;
7191 val
.spt_pathmaxrxt
= sp
->pathmaxrxt
;
7194 if (put_user(len
, optlen
) || copy_to_user(optval
, &val
, len
))
7201 * SCTP_GET_ASSOC_STATS
7203 * This option retrieves local per endpoint statistics. It is modeled
7204 * after OpenSolaris' implementation
7206 static int sctp_getsockopt_assoc_stats(struct sock
*sk
, int len
,
7207 char __user
*optval
,
7210 struct sctp_assoc_stats sas
;
7211 struct sctp_association
*asoc
= NULL
;
7213 /* User must provide at least the assoc id */
7214 if (len
< sizeof(sctp_assoc_t
))
7217 /* Allow the struct to grow and fill in as much as possible */
7218 len
= min_t(size_t, len
, sizeof(sas
));
7220 if (copy_from_user(&sas
, optval
, len
))
7223 asoc
= sctp_id2assoc(sk
, sas
.sas_assoc_id
);
7227 sas
.sas_rtxchunks
= asoc
->stats
.rtxchunks
;
7228 sas
.sas_gapcnt
= asoc
->stats
.gapcnt
;
7229 sas
.sas_outofseqtsns
= asoc
->stats
.outofseqtsns
;
7230 sas
.sas_osacks
= asoc
->stats
.osacks
;
7231 sas
.sas_isacks
= asoc
->stats
.isacks
;
7232 sas
.sas_octrlchunks
= asoc
->stats
.octrlchunks
;
7233 sas
.sas_ictrlchunks
= asoc
->stats
.ictrlchunks
;
7234 sas
.sas_oodchunks
= asoc
->stats
.oodchunks
;
7235 sas
.sas_iodchunks
= asoc
->stats
.iodchunks
;
7236 sas
.sas_ouodchunks
= asoc
->stats
.ouodchunks
;
7237 sas
.sas_iuodchunks
= asoc
->stats
.iuodchunks
;
7238 sas
.sas_idupchunks
= asoc
->stats
.idupchunks
;
7239 sas
.sas_opackets
= asoc
->stats
.opackets
;
7240 sas
.sas_ipackets
= asoc
->stats
.ipackets
;
7242 /* New high max rto observed, will return 0 if not a single
7243 * RTO update took place. obs_rto_ipaddr will be bogus
7246 sas
.sas_maxrto
= asoc
->stats
.max_obs_rto
;
7247 memcpy(&sas
.sas_obs_rto_ipaddr
, &asoc
->stats
.obs_rto_ipaddr
,
7248 sizeof(struct sockaddr_storage
));
7250 /* Mark beginning of a new observation period */
7251 asoc
->stats
.max_obs_rto
= asoc
->rto_min
;
7253 if (put_user(len
, optlen
))
7256 pr_debug("%s: len:%d, assoc_id:%d\n", __func__
, len
, sas
.sas_assoc_id
);
7258 if (copy_to_user(optval
, &sas
, len
))
7264 static int sctp_getsockopt_recvrcvinfo(struct sock
*sk
, int len
,
7265 char __user
*optval
,
7270 if (len
< sizeof(int))
7274 if (sctp_sk(sk
)->recvrcvinfo
)
7276 if (put_user(len
, optlen
))
7278 if (copy_to_user(optval
, &val
, len
))
7284 static int sctp_getsockopt_recvnxtinfo(struct sock
*sk
, int len
,
7285 char __user
*optval
,
7290 if (len
< sizeof(int))
7294 if (sctp_sk(sk
)->recvnxtinfo
)
7296 if (put_user(len
, optlen
))
7298 if (copy_to_user(optval
, &val
, len
))
7304 static int sctp_getsockopt_pr_supported(struct sock
*sk
, int len
,
7305 char __user
*optval
,
7308 struct sctp_assoc_value params
;
7309 struct sctp_association
*asoc
;
7310 int retval
= -EFAULT
;
7312 if (len
< sizeof(params
)) {
7317 len
= sizeof(params
);
7318 if (copy_from_user(¶ms
, optval
, len
))
7321 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7322 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
7323 sctp_style(sk
, UDP
)) {
7328 params
.assoc_value
= asoc
? asoc
->peer
.prsctp_capable
7329 : sctp_sk(sk
)->ep
->prsctp_enable
;
7331 if (put_user(len
, optlen
))
7334 if (copy_to_user(optval
, ¶ms
, len
))
7343 static int sctp_getsockopt_default_prinfo(struct sock
*sk
, int len
,
7344 char __user
*optval
,
7347 struct sctp_default_prinfo info
;
7348 struct sctp_association
*asoc
;
7349 int retval
= -EFAULT
;
7351 if (len
< sizeof(info
)) {
7357 if (copy_from_user(&info
, optval
, len
))
7360 asoc
= sctp_id2assoc(sk
, info
.pr_assoc_id
);
7361 if (!asoc
&& info
.pr_assoc_id
!= SCTP_FUTURE_ASSOC
&&
7362 sctp_style(sk
, UDP
)) {
7368 info
.pr_policy
= SCTP_PR_POLICY(asoc
->default_flags
);
7369 info
.pr_value
= asoc
->default_timetolive
;
7371 struct sctp_sock
*sp
= sctp_sk(sk
);
7373 info
.pr_policy
= SCTP_PR_POLICY(sp
->default_flags
);
7374 info
.pr_value
= sp
->default_timetolive
;
7377 if (put_user(len
, optlen
))
7380 if (copy_to_user(optval
, &info
, len
))
7389 static int sctp_getsockopt_pr_assocstatus(struct sock
*sk
, int len
,
7390 char __user
*optval
,
7393 struct sctp_prstatus params
;
7394 struct sctp_association
*asoc
;
7396 int retval
= -EINVAL
;
7398 if (len
< sizeof(params
))
7401 len
= sizeof(params
);
7402 if (copy_from_user(¶ms
, optval
, len
)) {
7407 policy
= params
.sprstat_policy
;
7408 if (!policy
|| (policy
& ~(SCTP_PR_SCTP_MASK
| SCTP_PR_SCTP_ALL
)) ||
7409 ((policy
& SCTP_PR_SCTP_ALL
) && (policy
& SCTP_PR_SCTP_MASK
)))
7412 asoc
= sctp_id2assoc(sk
, params
.sprstat_assoc_id
);
7416 if (policy
== SCTP_PR_SCTP_ALL
) {
7417 params
.sprstat_abandoned_unsent
= 0;
7418 params
.sprstat_abandoned_sent
= 0;
7419 for (policy
= 0; policy
<= SCTP_PR_INDEX(MAX
); policy
++) {
7420 params
.sprstat_abandoned_unsent
+=
7421 asoc
->abandoned_unsent
[policy
];
7422 params
.sprstat_abandoned_sent
+=
7423 asoc
->abandoned_sent
[policy
];
7426 params
.sprstat_abandoned_unsent
=
7427 asoc
->abandoned_unsent
[__SCTP_PR_INDEX(policy
)];
7428 params
.sprstat_abandoned_sent
=
7429 asoc
->abandoned_sent
[__SCTP_PR_INDEX(policy
)];
7432 if (put_user(len
, optlen
)) {
7437 if (copy_to_user(optval
, ¶ms
, len
)) {
7448 static int sctp_getsockopt_pr_streamstatus(struct sock
*sk
, int len
,
7449 char __user
*optval
,
7452 struct sctp_stream_out_ext
*streamoute
;
7453 struct sctp_association
*asoc
;
7454 struct sctp_prstatus params
;
7455 int retval
= -EINVAL
;
7458 if (len
< sizeof(params
))
7461 len
= sizeof(params
);
7462 if (copy_from_user(¶ms
, optval
, len
)) {
7467 policy
= params
.sprstat_policy
;
7468 if (!policy
|| (policy
& ~(SCTP_PR_SCTP_MASK
| SCTP_PR_SCTP_ALL
)) ||
7469 ((policy
& SCTP_PR_SCTP_ALL
) && (policy
& SCTP_PR_SCTP_MASK
)))
7472 asoc
= sctp_id2assoc(sk
, params
.sprstat_assoc_id
);
7473 if (!asoc
|| params
.sprstat_sid
>= asoc
->stream
.outcnt
)
7476 streamoute
= SCTP_SO(&asoc
->stream
, params
.sprstat_sid
)->ext
;
7478 /* Not allocated yet, means all stats are 0 */
7479 params
.sprstat_abandoned_unsent
= 0;
7480 params
.sprstat_abandoned_sent
= 0;
7485 if (policy
== SCTP_PR_SCTP_ALL
) {
7486 params
.sprstat_abandoned_unsent
= 0;
7487 params
.sprstat_abandoned_sent
= 0;
7488 for (policy
= 0; policy
<= SCTP_PR_INDEX(MAX
); policy
++) {
7489 params
.sprstat_abandoned_unsent
+=
7490 streamoute
->abandoned_unsent
[policy
];
7491 params
.sprstat_abandoned_sent
+=
7492 streamoute
->abandoned_sent
[policy
];
7495 params
.sprstat_abandoned_unsent
=
7496 streamoute
->abandoned_unsent
[__SCTP_PR_INDEX(policy
)];
7497 params
.sprstat_abandoned_sent
=
7498 streamoute
->abandoned_sent
[__SCTP_PR_INDEX(policy
)];
7501 if (put_user(len
, optlen
) || copy_to_user(optval
, ¶ms
, len
)) {
7512 static int sctp_getsockopt_reconfig_supported(struct sock
*sk
, int len
,
7513 char __user
*optval
,
7516 struct sctp_assoc_value params
;
7517 struct sctp_association
*asoc
;
7518 int retval
= -EFAULT
;
7520 if (len
< sizeof(params
)) {
7525 len
= sizeof(params
);
7526 if (copy_from_user(¶ms
, optval
, len
))
7529 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7530 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
7531 sctp_style(sk
, UDP
)) {
7536 params
.assoc_value
= asoc
? asoc
->peer
.reconf_capable
7537 : sctp_sk(sk
)->ep
->reconf_enable
;
7539 if (put_user(len
, optlen
))
7542 if (copy_to_user(optval
, ¶ms
, len
))
7551 static int sctp_getsockopt_enable_strreset(struct sock
*sk
, int len
,
7552 char __user
*optval
,
7555 struct sctp_assoc_value params
;
7556 struct sctp_association
*asoc
;
7557 int retval
= -EFAULT
;
7559 if (len
< sizeof(params
)) {
7564 len
= sizeof(params
);
7565 if (copy_from_user(¶ms
, optval
, len
))
7568 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7569 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
7570 sctp_style(sk
, UDP
)) {
7575 params
.assoc_value
= asoc
? asoc
->strreset_enable
7576 : sctp_sk(sk
)->ep
->strreset_enable
;
7578 if (put_user(len
, optlen
))
7581 if (copy_to_user(optval
, ¶ms
, len
))
7590 static int sctp_getsockopt_scheduler(struct sock
*sk
, int len
,
7591 char __user
*optval
,
7594 struct sctp_assoc_value params
;
7595 struct sctp_association
*asoc
;
7596 int retval
= -EFAULT
;
7598 if (len
< sizeof(params
)) {
7603 len
= sizeof(params
);
7604 if (copy_from_user(¶ms
, optval
, len
))
7607 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7608 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
7609 sctp_style(sk
, UDP
)) {
7614 params
.assoc_value
= asoc
? sctp_sched_get_sched(asoc
)
7615 : sctp_sk(sk
)->default_ss
;
7617 if (put_user(len
, optlen
))
7620 if (copy_to_user(optval
, ¶ms
, len
))
7629 static int sctp_getsockopt_scheduler_value(struct sock
*sk
, int len
,
7630 char __user
*optval
,
7633 struct sctp_stream_value params
;
7634 struct sctp_association
*asoc
;
7635 int retval
= -EFAULT
;
7637 if (len
< sizeof(params
)) {
7642 len
= sizeof(params
);
7643 if (copy_from_user(¶ms
, optval
, len
))
7646 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7652 retval
= sctp_sched_get_value(asoc
, params
.stream_id
,
7653 ¶ms
.stream_value
);
7657 if (put_user(len
, optlen
)) {
7662 if (copy_to_user(optval
, ¶ms
, len
)) {
7671 static int sctp_getsockopt_interleaving_supported(struct sock
*sk
, int len
,
7672 char __user
*optval
,
7675 struct sctp_assoc_value params
;
7676 struct sctp_association
*asoc
;
7677 int retval
= -EFAULT
;
7679 if (len
< sizeof(params
)) {
7684 len
= sizeof(params
);
7685 if (copy_from_user(¶ms
, optval
, len
))
7688 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
7689 if (!asoc
&& params
.assoc_id
!= SCTP_FUTURE_ASSOC
&&
7690 sctp_style(sk
, UDP
)) {
7695 params
.assoc_value
= asoc
? asoc
->peer
.intl_capable
7696 : sctp_sk(sk
)->ep
->intl_enable
;
7698 if (put_user(len
, optlen
))
7701 if (copy_to_user(optval
, ¶ms
, len
))
7710 static int sctp_getsockopt_reuse_port(struct sock
*sk
, int len
,
7711 char __user
*optval
,
7716 if (len
< sizeof(int))
7720 val
= sctp_sk(sk
)->reuse
;
7721 if (put_user(len
, optlen
))
7724 if (copy_to_user(optval
, &val
, len
))
7730 static int sctp_getsockopt_event(struct sock
*sk
, int len
, char __user
*optval
,
7733 struct sctp_association
*asoc
;
7734 struct sctp_event param
;
7737 if (len
< sizeof(param
))
7740 len
= sizeof(param
);
7741 if (copy_from_user(¶m
, optval
, len
))
7744 if (param
.se_type
< SCTP_SN_TYPE_BASE
||
7745 param
.se_type
> SCTP_SN_TYPE_MAX
)
7748 asoc
= sctp_id2assoc(sk
, param
.se_assoc_id
);
7749 if (!asoc
&& param
.se_assoc_id
!= SCTP_FUTURE_ASSOC
&&
7750 sctp_style(sk
, UDP
))
7753 subscribe
= asoc
? asoc
->subscribe
: sctp_sk(sk
)->subscribe
;
7754 param
.se_on
= sctp_ulpevent_type_enabled(subscribe
, param
.se_type
);
7756 if (put_user(len
, optlen
))
7759 if (copy_to_user(optval
, ¶m
, len
))
7765 static int sctp_getsockopt(struct sock
*sk
, int level
, int optname
,
7766 char __user
*optval
, int __user
*optlen
)
7771 pr_debug("%s: sk:%p, optname:%d\n", __func__
, sk
, optname
);
7773 /* I can hardly begin to describe how wrong this is. This is
7774 * so broken as to be worse than useless. The API draft
7775 * REALLY is NOT helpful here... I am not convinced that the
7776 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP
7777 * are at all well-founded.
7779 if (level
!= SOL_SCTP
) {
7780 struct sctp_af
*af
= sctp_sk(sk
)->pf
->af
;
7782 retval
= af
->getsockopt(sk
, level
, optname
, optval
, optlen
);
7786 if (get_user(len
, optlen
))
7796 retval
= sctp_getsockopt_sctp_status(sk
, len
, optval
, optlen
);
7798 case SCTP_DISABLE_FRAGMENTS
:
7799 retval
= sctp_getsockopt_disable_fragments(sk
, len
, optval
,
7803 retval
= sctp_getsockopt_events(sk
, len
, optval
, optlen
);
7805 case SCTP_AUTOCLOSE
:
7806 retval
= sctp_getsockopt_autoclose(sk
, len
, optval
, optlen
);
7808 case SCTP_SOCKOPT_PEELOFF
:
7809 retval
= sctp_getsockopt_peeloff(sk
, len
, optval
, optlen
);
7811 case SCTP_SOCKOPT_PEELOFF_FLAGS
:
7812 retval
= sctp_getsockopt_peeloff_flags(sk
, len
, optval
, optlen
);
7814 case SCTP_PEER_ADDR_PARAMS
:
7815 retval
= sctp_getsockopt_peer_addr_params(sk
, len
, optval
,
7818 case SCTP_DELAYED_SACK
:
7819 retval
= sctp_getsockopt_delayed_ack(sk
, len
, optval
,
7823 retval
= sctp_getsockopt_initmsg(sk
, len
, optval
, optlen
);
7825 case SCTP_GET_PEER_ADDRS
:
7826 retval
= sctp_getsockopt_peer_addrs(sk
, len
, optval
,
7829 case SCTP_GET_LOCAL_ADDRS
:
7830 retval
= sctp_getsockopt_local_addrs(sk
, len
, optval
,
7833 case SCTP_SOCKOPT_CONNECTX3
:
7834 retval
= sctp_getsockopt_connectx3(sk
, len
, optval
, optlen
);
7836 case SCTP_DEFAULT_SEND_PARAM
:
7837 retval
= sctp_getsockopt_default_send_param(sk
, len
,
7840 case SCTP_DEFAULT_SNDINFO
:
7841 retval
= sctp_getsockopt_default_sndinfo(sk
, len
,
7844 case SCTP_PRIMARY_ADDR
:
7845 retval
= sctp_getsockopt_primary_addr(sk
, len
, optval
, optlen
);
7848 retval
= sctp_getsockopt_nodelay(sk
, len
, optval
, optlen
);
7851 retval
= sctp_getsockopt_rtoinfo(sk
, len
, optval
, optlen
);
7853 case SCTP_ASSOCINFO
:
7854 retval
= sctp_getsockopt_associnfo(sk
, len
, optval
, optlen
);
7856 case SCTP_I_WANT_MAPPED_V4_ADDR
:
7857 retval
= sctp_getsockopt_mappedv4(sk
, len
, optval
, optlen
);
7860 retval
= sctp_getsockopt_maxseg(sk
, len
, optval
, optlen
);
7862 case SCTP_GET_PEER_ADDR_INFO
:
7863 retval
= sctp_getsockopt_peer_addr_info(sk
, len
, optval
,
7866 case SCTP_ADAPTATION_LAYER
:
7867 retval
= sctp_getsockopt_adaptation_layer(sk
, len
, optval
,
7871 retval
= sctp_getsockopt_context(sk
, len
, optval
, optlen
);
7873 case SCTP_FRAGMENT_INTERLEAVE
:
7874 retval
= sctp_getsockopt_fragment_interleave(sk
, len
, optval
,
7877 case SCTP_PARTIAL_DELIVERY_POINT
:
7878 retval
= sctp_getsockopt_partial_delivery_point(sk
, len
, optval
,
7881 case SCTP_MAX_BURST
:
7882 retval
= sctp_getsockopt_maxburst(sk
, len
, optval
, optlen
);
7885 case SCTP_AUTH_CHUNK
:
7886 case SCTP_AUTH_DELETE_KEY
:
7887 case SCTP_AUTH_DEACTIVATE_KEY
:
7888 retval
= -EOPNOTSUPP
;
7890 case SCTP_HMAC_IDENT
:
7891 retval
= sctp_getsockopt_hmac_ident(sk
, len
, optval
, optlen
);
7893 case SCTP_AUTH_ACTIVE_KEY
:
7894 retval
= sctp_getsockopt_active_key(sk
, len
, optval
, optlen
);
7896 case SCTP_PEER_AUTH_CHUNKS
:
7897 retval
= sctp_getsockopt_peer_auth_chunks(sk
, len
, optval
,
7900 case SCTP_LOCAL_AUTH_CHUNKS
:
7901 retval
= sctp_getsockopt_local_auth_chunks(sk
, len
, optval
,
7904 case SCTP_GET_ASSOC_NUMBER
:
7905 retval
= sctp_getsockopt_assoc_number(sk
, len
, optval
, optlen
);
7907 case SCTP_GET_ASSOC_ID_LIST
:
7908 retval
= sctp_getsockopt_assoc_ids(sk
, len
, optval
, optlen
);
7910 case SCTP_AUTO_ASCONF
:
7911 retval
= sctp_getsockopt_auto_asconf(sk
, len
, optval
, optlen
);
7913 case SCTP_PEER_ADDR_THLDS
:
7914 retval
= sctp_getsockopt_paddr_thresholds(sk
, optval
, len
, optlen
);
7916 case SCTP_GET_ASSOC_STATS
:
7917 retval
= sctp_getsockopt_assoc_stats(sk
, len
, optval
, optlen
);
7919 case SCTP_RECVRCVINFO
:
7920 retval
= sctp_getsockopt_recvrcvinfo(sk
, len
, optval
, optlen
);
7922 case SCTP_RECVNXTINFO
:
7923 retval
= sctp_getsockopt_recvnxtinfo(sk
, len
, optval
, optlen
);
7925 case SCTP_PR_SUPPORTED
:
7926 retval
= sctp_getsockopt_pr_supported(sk
, len
, optval
, optlen
);
7928 case SCTP_DEFAULT_PRINFO
:
7929 retval
= sctp_getsockopt_default_prinfo(sk
, len
, optval
,
7932 case SCTP_PR_ASSOC_STATUS
:
7933 retval
= sctp_getsockopt_pr_assocstatus(sk
, len
, optval
,
7936 case SCTP_PR_STREAM_STATUS
:
7937 retval
= sctp_getsockopt_pr_streamstatus(sk
, len
, optval
,
7940 case SCTP_RECONFIG_SUPPORTED
:
7941 retval
= sctp_getsockopt_reconfig_supported(sk
, len
, optval
,
7944 case SCTP_ENABLE_STREAM_RESET
:
7945 retval
= sctp_getsockopt_enable_strreset(sk
, len
, optval
,
7948 case SCTP_STREAM_SCHEDULER
:
7949 retval
= sctp_getsockopt_scheduler(sk
, len
, optval
,
7952 case SCTP_STREAM_SCHEDULER_VALUE
:
7953 retval
= sctp_getsockopt_scheduler_value(sk
, len
, optval
,
7956 case SCTP_INTERLEAVING_SUPPORTED
:
7957 retval
= sctp_getsockopt_interleaving_supported(sk
, len
, optval
,
7960 case SCTP_REUSE_PORT
:
7961 retval
= sctp_getsockopt_reuse_port(sk
, len
, optval
, optlen
);
7964 retval
= sctp_getsockopt_event(sk
, len
, optval
, optlen
);
7967 retval
= -ENOPROTOOPT
;
7975 static int sctp_hash(struct sock
*sk
)
7981 static void sctp_unhash(struct sock
*sk
)
7986 /* Check if port is acceptable. Possibly find first available port.
7988 * The port hash table (contained in the 'global' SCTP protocol storage
7989 * returned by struct sctp_protocol *sctp_get_protocol()). The hash
7990 * table is an array of 4096 lists (sctp_bind_hashbucket). Each
7991 * list (the list number is the port number hashed out, so as you
7992 * would expect from a hash function, all the ports in a given list have
7993 * such a number that hashes out to the same list number; you were
7994 * expecting that, right?); so each list has a set of ports, with a
7995 * link to the socket (struct sock) that uses it, the port number and
7996 * a fastreuse flag (FIXME: NPI ipg).
7998 static struct sctp_bind_bucket
*sctp_bucket_create(
7999 struct sctp_bind_hashbucket
*head
, struct net
*, unsigned short snum
);
8001 static long sctp_get_port_local(struct sock
*sk
, union sctp_addr
*addr
)
8003 struct sctp_sock
*sp
= sctp_sk(sk
);
8004 bool reuse
= (sk
->sk_reuse
|| sp
->reuse
);
8005 struct sctp_bind_hashbucket
*head
; /* hash list */
8006 kuid_t uid
= sock_i_uid(sk
);
8007 struct sctp_bind_bucket
*pp
;
8008 unsigned short snum
;
8011 snum
= ntohs(addr
->v4
.sin_port
);
8013 pr_debug("%s: begins, snum:%d\n", __func__
, snum
);
8018 /* Search for an available port. */
8019 int low
, high
, remaining
, index
;
8021 struct net
*net
= sock_net(sk
);
8023 inet_get_local_port_range(net
, &low
, &high
);
8024 remaining
= (high
- low
) + 1;
8025 rover
= prandom_u32() % remaining
+ low
;
8029 if ((rover
< low
) || (rover
> high
))
8031 if (inet_is_local_reserved_port(net
, rover
))
8033 index
= sctp_phashfn(sock_net(sk
), rover
);
8034 head
= &sctp_port_hashtable
[index
];
8035 spin_lock(&head
->lock
);
8036 sctp_for_each_hentry(pp
, &head
->chain
)
8037 if ((pp
->port
== rover
) &&
8038 net_eq(sock_net(sk
), pp
->net
))
8042 spin_unlock(&head
->lock
);
8043 } while (--remaining
> 0);
8045 /* Exhausted local port range during search? */
8050 /* OK, here is the one we will use. HEAD (the port
8051 * hash table list entry) is non-NULL and we hold it's
8056 /* We are given an specific port number; we verify
8057 * that it is not being used. If it is used, we will
8058 * exahust the search in the hash list corresponding
8059 * to the port number (snum) - we detect that with the
8060 * port iterator, pp being NULL.
8062 head
= &sctp_port_hashtable
[sctp_phashfn(sock_net(sk
), snum
)];
8063 spin_lock(&head
->lock
);
8064 sctp_for_each_hentry(pp
, &head
->chain
) {
8065 if ((pp
->port
== snum
) && net_eq(pp
->net
, sock_net(sk
)))
8072 if (!hlist_empty(&pp
->owner
)) {
8073 /* We had a port hash table hit - there is an
8074 * available port (pp != NULL) and it is being
8075 * used by other socket (pp->owner not empty); that other
8076 * socket is going to be sk2.
8080 pr_debug("%s: found a possible match\n", __func__
);
8082 if ((pp
->fastreuse
&& reuse
&&
8083 sk
->sk_state
!= SCTP_SS_LISTENING
) ||
8084 (pp
->fastreuseport
&& sk
->sk_reuseport
&&
8085 uid_eq(pp
->fastuid
, uid
)))
8088 /* Run through the list of sockets bound to the port
8089 * (pp->port) [via the pointers bind_next and
8090 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
8091 * we get the endpoint they describe and run through
8092 * the endpoint's list of IP (v4 or v6) addresses,
8093 * comparing each of the addresses with the address of
8094 * the socket sk. If we find a match, then that means
8095 * that this port/socket (sk) combination are already
8098 sk_for_each_bound(sk2
, &pp
->owner
) {
8099 struct sctp_sock
*sp2
= sctp_sk(sk2
);
8100 struct sctp_endpoint
*ep2
= sp2
->ep
;
8103 (reuse
&& (sk2
->sk_reuse
|| sp2
->reuse
) &&
8104 sk2
->sk_state
!= SCTP_SS_LISTENING
) ||
8105 (sk
->sk_reuseport
&& sk2
->sk_reuseport
&&
8106 uid_eq(uid
, sock_i_uid(sk2
))))
8109 if (sctp_bind_addr_conflict(&ep2
->base
.bind_addr
,
8116 pr_debug("%s: found a match\n", __func__
);
8119 /* If there was a hash table miss, create a new port. */
8121 if (!pp
&& !(pp
= sctp_bucket_create(head
, sock_net(sk
), snum
)))
8124 /* In either case (hit or miss), make sure fastreuse is 1 only
8125 * if sk->sk_reuse is too (that is, if the caller requested
8126 * SO_REUSEADDR on this socket -sk-).
8128 if (hlist_empty(&pp
->owner
)) {
8129 if (reuse
&& sk
->sk_state
!= SCTP_SS_LISTENING
)
8134 if (sk
->sk_reuseport
) {
8135 pp
->fastreuseport
= 1;
8138 pp
->fastreuseport
= 0;
8141 if (pp
->fastreuse
&&
8142 (!reuse
|| sk
->sk_state
== SCTP_SS_LISTENING
))
8145 if (pp
->fastreuseport
&&
8146 (!sk
->sk_reuseport
|| !uid_eq(pp
->fastuid
, uid
)))
8147 pp
->fastreuseport
= 0;
8150 /* We are set, so fill up all the data in the hash table
8151 * entry, tie the socket list information with the rest of the
8152 * sockets FIXME: Blurry, NPI (ipg).
8155 if (!sp
->bind_hash
) {
8156 inet_sk(sk
)->inet_num
= snum
;
8157 sk_add_bind_node(sk
, &pp
->owner
);
8163 spin_unlock(&head
->lock
);
8170 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
8171 * port is requested.
8173 static int sctp_get_port(struct sock
*sk
, unsigned short snum
)
8175 union sctp_addr addr
;
8176 struct sctp_af
*af
= sctp_sk(sk
)->pf
->af
;
8178 /* Set up a dummy address struct from the sk. */
8179 af
->from_sk(&addr
, sk
);
8180 addr
.v4
.sin_port
= htons(snum
);
8182 /* Note: sk->sk_num gets filled in if ephemeral port request. */
8183 return !!sctp_get_port_local(sk
, &addr
);
8187 * Move a socket to LISTENING state.
8189 static int sctp_listen_start(struct sock
*sk
, int backlog
)
8191 struct sctp_sock
*sp
= sctp_sk(sk
);
8192 struct sctp_endpoint
*ep
= sp
->ep
;
8193 struct crypto_shash
*tfm
= NULL
;
8196 /* Allocate HMAC for generating cookie. */
8197 if (!sp
->hmac
&& sp
->sctp_hmac_alg
) {
8198 sprintf(alg
, "hmac(%s)", sp
->sctp_hmac_alg
);
8199 tfm
= crypto_alloc_shash(alg
, 0, 0);
8201 net_info_ratelimited("failed to load transform for %s: %ld\n",
8202 sp
->sctp_hmac_alg
, PTR_ERR(tfm
));
8205 sctp_sk(sk
)->hmac
= tfm
;
8209 * If a bind() or sctp_bindx() is not called prior to a listen()
8210 * call that allows new associations to be accepted, the system
8211 * picks an ephemeral port and will choose an address set equivalent
8212 * to binding with a wildcard address.
8214 * This is not currently spelled out in the SCTP sockets
8215 * extensions draft, but follows the practice as seen in TCP
8219 inet_sk_set_state(sk
, SCTP_SS_LISTENING
);
8220 if (!ep
->base
.bind_addr
.port
) {
8221 if (sctp_autobind(sk
))
8224 if (sctp_get_port(sk
, inet_sk(sk
)->inet_num
)) {
8225 inet_sk_set_state(sk
, SCTP_SS_CLOSED
);
8230 sk
->sk_max_ack_backlog
= backlog
;
8231 return sctp_hash_endpoint(ep
);
8235 * 4.1.3 / 5.1.3 listen()
8237 * By default, new associations are not accepted for UDP style sockets.
8238 * An application uses listen() to mark a socket as being able to
8239 * accept new associations.
8241 * On TCP style sockets, applications use listen() to ready the SCTP
8242 * endpoint for accepting inbound associations.
8244 * On both types of endpoints a backlog of '0' disables listening.
8246 * Move a socket to LISTENING state.
8248 int sctp_inet_listen(struct socket
*sock
, int backlog
)
8250 struct sock
*sk
= sock
->sk
;
8251 struct sctp_endpoint
*ep
= sctp_sk(sk
)->ep
;
8254 if (unlikely(backlog
< 0))
8259 /* Peeled-off sockets are not allowed to listen(). */
8260 if (sctp_style(sk
, UDP_HIGH_BANDWIDTH
))
8263 if (sock
->state
!= SS_UNCONNECTED
)
8266 if (!sctp_sstate(sk
, LISTENING
) && !sctp_sstate(sk
, CLOSED
))
8269 /* If backlog is zero, disable listening. */
8271 if (sctp_sstate(sk
, CLOSED
))
8275 sctp_unhash_endpoint(ep
);
8276 sk
->sk_state
= SCTP_SS_CLOSED
;
8277 if (sk
->sk_reuse
|| sctp_sk(sk
)->reuse
)
8278 sctp_sk(sk
)->bind_hash
->fastreuse
= 1;
8282 /* If we are already listening, just update the backlog */
8283 if (sctp_sstate(sk
, LISTENING
))
8284 sk
->sk_max_ack_backlog
= backlog
;
8286 err
= sctp_listen_start(sk
, backlog
);
8298 * This function is done by modeling the current datagram_poll() and the
8299 * tcp_poll(). Note that, based on these implementations, we don't
8300 * lock the socket in this function, even though it seems that,
8301 * ideally, locking or some other mechanisms can be used to ensure
8302 * the integrity of the counters (sndbuf and wmem_alloc) used
8303 * in this place. We assume that we don't need locks either until proven
8306 * Another thing to note is that we include the Async I/O support
8307 * here, again, by modeling the current TCP/UDP code. We don't have
8308 * a good way to test with it yet.
8310 __poll_t
sctp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
8312 struct sock
*sk
= sock
->sk
;
8313 struct sctp_sock
*sp
= sctp_sk(sk
);
8316 poll_wait(file
, sk_sleep(sk
), wait
);
8318 sock_rps_record_flow(sk
);
8320 /* A TCP-style listening socket becomes readable when the accept queue
8323 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))
8324 return (!list_empty(&sp
->ep
->asocs
)) ?
8325 (EPOLLIN
| EPOLLRDNORM
) : 0;
8329 /* Is there any exceptional events? */
8330 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
8332 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? EPOLLPRI
: 0);
8333 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
8334 mask
|= EPOLLRDHUP
| EPOLLIN
| EPOLLRDNORM
;
8335 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
8338 /* Is it readable? Reconsider this code with TCP-style support. */
8339 if (!skb_queue_empty(&sk
->sk_receive_queue
))
8340 mask
|= EPOLLIN
| EPOLLRDNORM
;
8342 /* The association is either gone or not ready. */
8343 if (!sctp_style(sk
, UDP
) && sctp_sstate(sk
, CLOSED
))
8346 /* Is it writable? */
8347 if (sctp_writeable(sk
)) {
8348 mask
|= EPOLLOUT
| EPOLLWRNORM
;
8350 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
8352 * Since the socket is not locked, the buffer
8353 * might be made available after the writeable check and
8354 * before the bit is set. This could cause a lost I/O
8355 * signal. tcp_poll() has a race breaker for this race
8356 * condition. Based on their implementation, we put
8357 * in the following code to cover it as well.
8359 if (sctp_writeable(sk
))
8360 mask
|= EPOLLOUT
| EPOLLWRNORM
;
8365 /********************************************************************
8366 * 2nd Level Abstractions
8367 ********************************************************************/
8369 static struct sctp_bind_bucket
*sctp_bucket_create(
8370 struct sctp_bind_hashbucket
*head
, struct net
*net
, unsigned short snum
)
8372 struct sctp_bind_bucket
*pp
;
8374 pp
= kmem_cache_alloc(sctp_bucket_cachep
, GFP_ATOMIC
);
8376 SCTP_DBG_OBJCNT_INC(bind_bucket
);
8379 INIT_HLIST_HEAD(&pp
->owner
);
8381 hlist_add_head(&pp
->node
, &head
->chain
);
8386 /* Caller must hold hashbucket lock for this tb with local BH disabled */
8387 static void sctp_bucket_destroy(struct sctp_bind_bucket
*pp
)
8389 if (pp
&& hlist_empty(&pp
->owner
)) {
8390 __hlist_del(&pp
->node
);
8391 kmem_cache_free(sctp_bucket_cachep
, pp
);
8392 SCTP_DBG_OBJCNT_DEC(bind_bucket
);
8396 /* Release this socket's reference to a local port. */
8397 static inline void __sctp_put_port(struct sock
*sk
)
8399 struct sctp_bind_hashbucket
*head
=
8400 &sctp_port_hashtable
[sctp_phashfn(sock_net(sk
),
8401 inet_sk(sk
)->inet_num
)];
8402 struct sctp_bind_bucket
*pp
;
8404 spin_lock(&head
->lock
);
8405 pp
= sctp_sk(sk
)->bind_hash
;
8406 __sk_del_bind_node(sk
);
8407 sctp_sk(sk
)->bind_hash
= NULL
;
8408 inet_sk(sk
)->inet_num
= 0;
8409 sctp_bucket_destroy(pp
);
8410 spin_unlock(&head
->lock
);
8413 void sctp_put_port(struct sock
*sk
)
8416 __sctp_put_port(sk
);
8421 * The system picks an ephemeral port and choose an address set equivalent
8422 * to binding with a wildcard address.
8423 * One of those addresses will be the primary address for the association.
8424 * This automatically enables the multihoming capability of SCTP.
8426 static int sctp_autobind(struct sock
*sk
)
8428 union sctp_addr autoaddr
;
8432 /* Initialize a local sockaddr structure to INADDR_ANY. */
8433 af
= sctp_sk(sk
)->pf
->af
;
8435 port
= htons(inet_sk(sk
)->inet_num
);
8436 af
->inaddr_any(&autoaddr
, port
);
8438 return sctp_do_bind(sk
, &autoaddr
, af
->sockaddr_len
);
8441 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
8444 * 4.2 The cmsghdr Structure *
8446 * When ancillary data is sent or received, any number of ancillary data
8447 * objects can be specified by the msg_control and msg_controllen members of
8448 * the msghdr structure, because each object is preceded by
8449 * a cmsghdr structure defining the object's length (the cmsg_len member).
8450 * Historically Berkeley-derived implementations have passed only one object
8451 * at a time, but this API allows multiple objects to be
8452 * passed in a single call to sendmsg() or recvmsg(). The following example
8453 * shows two ancillary data objects in a control buffer.
8455 * |<--------------------------- msg_controllen -------------------------->|
8458 * |<----- ancillary data object ----->|<----- ancillary data object ----->|
8460 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
8463 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
8465 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
8468 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
8469 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
8471 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
8473 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
8480 static int sctp_msghdr_parse(const struct msghdr
*msg
, struct sctp_cmsgs
*cmsgs
)
8482 struct msghdr
*my_msg
= (struct msghdr
*)msg
;
8483 struct cmsghdr
*cmsg
;
8485 for_each_cmsghdr(cmsg
, my_msg
) {
8486 if (!CMSG_OK(my_msg
, cmsg
))
8489 /* Should we parse this header or ignore? */
8490 if (cmsg
->cmsg_level
!= IPPROTO_SCTP
)
8493 /* Strictly check lengths following example in SCM code. */
8494 switch (cmsg
->cmsg_type
) {
8496 /* SCTP Socket API Extension
8497 * 5.3.1 SCTP Initiation Structure (SCTP_INIT)
8499 * This cmsghdr structure provides information for
8500 * initializing new SCTP associations with sendmsg().
8501 * The SCTP_INITMSG socket option uses this same data
8502 * structure. This structure is not used for
8505 * cmsg_level cmsg_type cmsg_data[]
8506 * ------------ ------------ ----------------------
8507 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
8509 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_initmsg
)))
8512 cmsgs
->init
= CMSG_DATA(cmsg
);
8516 /* SCTP Socket API Extension
8517 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
8519 * This cmsghdr structure specifies SCTP options for
8520 * sendmsg() and describes SCTP header information
8521 * about a received message through recvmsg().
8523 * cmsg_level cmsg_type cmsg_data[]
8524 * ------------ ------------ ----------------------
8525 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
8527 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_sndrcvinfo
)))
8530 cmsgs
->srinfo
= CMSG_DATA(cmsg
);
8532 if (cmsgs
->srinfo
->sinfo_flags
&
8533 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
8534 SCTP_SACK_IMMEDIATELY
| SCTP_SENDALL
|
8535 SCTP_PR_SCTP_MASK
| SCTP_ABORT
| SCTP_EOF
))
8540 /* SCTP Socket API Extension
8541 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
8543 * This cmsghdr structure specifies SCTP options for
8544 * sendmsg(). This structure and SCTP_RCVINFO replaces
8545 * SCTP_SNDRCV which has been deprecated.
8547 * cmsg_level cmsg_type cmsg_data[]
8548 * ------------ ------------ ---------------------
8549 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo
8551 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_sndinfo
)))
8554 cmsgs
->sinfo
= CMSG_DATA(cmsg
);
8556 if (cmsgs
->sinfo
->snd_flags
&
8557 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
8558 SCTP_SACK_IMMEDIATELY
| SCTP_SENDALL
|
8559 SCTP_PR_SCTP_MASK
| SCTP_ABORT
| SCTP_EOF
))
8563 /* SCTP Socket API Extension
8564 * 5.3.7 SCTP PR-SCTP Information Structure (SCTP_PRINFO)
8566 * This cmsghdr structure specifies SCTP options for sendmsg().
8568 * cmsg_level cmsg_type cmsg_data[]
8569 * ------------ ------------ ---------------------
8570 * IPPROTO_SCTP SCTP_PRINFO struct sctp_prinfo
8572 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_prinfo
)))
8575 cmsgs
->prinfo
= CMSG_DATA(cmsg
);
8576 if (cmsgs
->prinfo
->pr_policy
& ~SCTP_PR_SCTP_MASK
)
8579 if (cmsgs
->prinfo
->pr_policy
== SCTP_PR_SCTP_NONE
)
8580 cmsgs
->prinfo
->pr_value
= 0;
8583 /* SCTP Socket API Extension
8584 * 5.3.8 SCTP AUTH Information Structure (SCTP_AUTHINFO)
8586 * This cmsghdr structure specifies SCTP options for sendmsg().
8588 * cmsg_level cmsg_type cmsg_data[]
8589 * ------------ ------------ ---------------------
8590 * IPPROTO_SCTP SCTP_AUTHINFO struct sctp_authinfo
8592 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct sctp_authinfo
)))
8595 cmsgs
->authinfo
= CMSG_DATA(cmsg
);
8597 case SCTP_DSTADDRV4
:
8598 case SCTP_DSTADDRV6
:
8599 /* SCTP Socket API Extension
8600 * 5.3.9/10 SCTP Destination IPv4/6 Address Structure (SCTP_DSTADDRV4/6)
8602 * This cmsghdr structure specifies SCTP options for sendmsg().
8604 * cmsg_level cmsg_type cmsg_data[]
8605 * ------------ ------------ ---------------------
8606 * IPPROTO_SCTP SCTP_DSTADDRV4 struct in_addr
8607 * ------------ ------------ ---------------------
8608 * IPPROTO_SCTP SCTP_DSTADDRV6 struct in6_addr
8610 cmsgs
->addrs_msg
= my_msg
;
8621 * Wait for a packet..
8622 * Note: This function is the same function as in core/datagram.c
8623 * with a few modifications to make lksctp work.
8625 static int sctp_wait_for_packet(struct sock
*sk
, int *err
, long *timeo_p
)
8630 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
8632 /* Socket errors? */
8633 error
= sock_error(sk
);
8637 if (!skb_queue_empty(&sk
->sk_receive_queue
))
8640 /* Socket shut down? */
8641 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
8644 /* Sequenced packets can come disconnected. If so we report the
8649 /* Is there a good reason to think that we may receive some data? */
8650 if (list_empty(&sctp_sk(sk
)->ep
->asocs
) && !sctp_sstate(sk
, LISTENING
))
8653 /* Handle signals. */
8654 if (signal_pending(current
))
8657 /* Let another process have a go. Since we are going to sleep
8658 * anyway. Note: This may cause odd behaviors if the message
8659 * does not fit in the user's buffer, but this seems to be the
8660 * only way to honor MSG_DONTWAIT realistically.
8663 *timeo_p
= schedule_timeout(*timeo_p
);
8667 finish_wait(sk_sleep(sk
), &wait
);
8671 error
= sock_intr_errno(*timeo_p
);
8674 finish_wait(sk_sleep(sk
), &wait
);
8679 /* Receive a datagram.
8680 * Note: This is pretty much the same routine as in core/datagram.c
8681 * with a few changes to make lksctp work.
8683 struct sk_buff
*sctp_skb_recv_datagram(struct sock
*sk
, int flags
,
8684 int noblock
, int *err
)
8687 struct sk_buff
*skb
;
8690 timeo
= sock_rcvtimeo(sk
, noblock
);
8692 pr_debug("%s: timeo:%ld, max:%ld\n", __func__
, timeo
,
8693 MAX_SCHEDULE_TIMEOUT
);
8696 /* Again only user level code calls this function,
8697 * so nothing interrupt level
8698 * will suddenly eat the receive_queue.
8700 * Look at current nfs client by the way...
8701 * However, this function was correct in any case. 8)
8703 if (flags
& MSG_PEEK
) {
8704 skb
= skb_peek(&sk
->sk_receive_queue
);
8706 refcount_inc(&skb
->users
);
8708 skb
= __skb_dequeue(&sk
->sk_receive_queue
);
8714 /* Caller is allowed not to check sk->sk_err before calling. */
8715 error
= sock_error(sk
);
8719 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
8722 if (sk_can_busy_loop(sk
)) {
8723 sk_busy_loop(sk
, noblock
);
8725 if (!skb_queue_empty(&sk
->sk_receive_queue
))
8729 /* User doesn't want to wait. */
8733 } while (sctp_wait_for_packet(sk
, err
, &timeo
) == 0);
8742 /* If sndbuf has changed, wake up per association sndbuf waiters. */
8743 static void __sctp_write_space(struct sctp_association
*asoc
)
8745 struct sock
*sk
= asoc
->base
.sk
;
8747 if (sctp_wspace(asoc
) <= 0)
8750 if (waitqueue_active(&asoc
->wait
))
8751 wake_up_interruptible(&asoc
->wait
);
8753 if (sctp_writeable(sk
)) {
8754 struct socket_wq
*wq
;
8757 wq
= rcu_dereference(sk
->sk_wq
);
8759 if (waitqueue_active(&wq
->wait
))
8760 wake_up_interruptible(&wq
->wait
);
8762 /* Note that we try to include the Async I/O support
8763 * here by modeling from the current TCP/UDP code.
8764 * We have not tested with it yet.
8766 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
))
8767 sock_wake_async(wq
, SOCK_WAKE_SPACE
, POLL_OUT
);
8773 static void sctp_wake_up_waiters(struct sock
*sk
,
8774 struct sctp_association
*asoc
)
8776 struct sctp_association
*tmp
= asoc
;
8778 /* We do accounting for the sndbuf space per association,
8779 * so we only need to wake our own association.
8781 if (asoc
->ep
->sndbuf_policy
)
8782 return __sctp_write_space(asoc
);
8784 /* If association goes down and is just flushing its
8785 * outq, then just normally notify others.
8787 if (asoc
->base
.dead
)
8788 return sctp_write_space(sk
);
8790 /* Accounting for the sndbuf space is per socket, so we
8791 * need to wake up others, try to be fair and in case of
8792 * other associations, let them have a go first instead
8793 * of just doing a sctp_write_space() call.
8795 * Note that we reach sctp_wake_up_waiters() only when
8796 * associations free up queued chunks, thus we are under
8797 * lock and the list of associations on a socket is
8798 * guaranteed not to change.
8800 for (tmp
= list_next_entry(tmp
, asocs
); 1;
8801 tmp
= list_next_entry(tmp
, asocs
)) {
8802 /* Manually skip the head element. */
8803 if (&tmp
->asocs
== &((sctp_sk(sk
))->ep
->asocs
))
8805 /* Wake up association. */
8806 __sctp_write_space(tmp
);
8807 /* We've reached the end. */
8813 /* Do accounting for the sndbuf space.
8814 * Decrement the used sndbuf space of the corresponding association by the
8815 * data size which was just transmitted(freed).
8817 static void sctp_wfree(struct sk_buff
*skb
)
8819 struct sctp_chunk
*chunk
= skb_shinfo(skb
)->destructor_arg
;
8820 struct sctp_association
*asoc
= chunk
->asoc
;
8821 struct sock
*sk
= asoc
->base
.sk
;
8823 sk_mem_uncharge(sk
, skb
->truesize
);
8824 sk
->sk_wmem_queued
-= skb
->truesize
+ sizeof(struct sctp_chunk
);
8825 asoc
->sndbuf_used
-= skb
->truesize
+ sizeof(struct sctp_chunk
);
8826 WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk
),
8827 &sk
->sk_wmem_alloc
));
8830 struct sctp_shared_key
*shkey
= chunk
->shkey
;
8832 /* refcnt == 2 and !list_empty mean after this release, it's
8833 * not being used anywhere, and it's time to notify userland
8834 * that this shkey can be freed if it's been deactivated.
8836 if (shkey
->deactivated
&& !list_empty(&shkey
->key_list
) &&
8837 refcount_read(&shkey
->refcnt
) == 2) {
8838 struct sctp_ulpevent
*ev
;
8840 ev
= sctp_ulpevent_make_authkey(asoc
, shkey
->key_id
,
8844 asoc
->stream
.si
->enqueue_event(&asoc
->ulpq
, ev
);
8846 sctp_auth_shkey_release(chunk
->shkey
);
8850 sctp_wake_up_waiters(sk
, asoc
);
8852 sctp_association_put(asoc
);
8855 /* Do accounting for the receive space on the socket.
8856 * Accounting for the association is done in ulpevent.c
8857 * We set this as a destructor for the cloned data skbs so that
8858 * accounting is done at the correct time.
8860 void sctp_sock_rfree(struct sk_buff
*skb
)
8862 struct sock
*sk
= skb
->sk
;
8863 struct sctp_ulpevent
*event
= sctp_skb2event(skb
);
8865 atomic_sub(event
->rmem_len
, &sk
->sk_rmem_alloc
);
8868 * Mimic the behavior of sock_rfree
8870 sk_mem_uncharge(sk
, event
->rmem_len
);
8874 /* Helper function to wait for space in the sndbuf. */
8875 static int sctp_wait_for_sndbuf(struct sctp_association
*asoc
, long *timeo_p
,
8878 struct sock
*sk
= asoc
->base
.sk
;
8879 long current_timeo
= *timeo_p
;
8883 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__
, asoc
,
8886 /* Increment the association's refcnt. */
8887 sctp_association_hold(asoc
);
8889 /* Wait on the association specific sndbuf space. */
8891 prepare_to_wait_exclusive(&asoc
->wait
, &wait
,
8892 TASK_INTERRUPTIBLE
);
8893 if (asoc
->base
.dead
)
8897 if (sk
->sk_err
|| asoc
->state
>= SCTP_STATE_SHUTDOWN_PENDING
)
8899 if (signal_pending(current
))
8900 goto do_interrupted
;
8901 if (sk_under_memory_pressure(sk
))
8903 if ((int)msg_len
<= sctp_wspace(asoc
) &&
8904 sk_wmem_schedule(sk
, msg_len
))
8907 /* Let another process have a go. Since we are going
8911 current_timeo
= schedule_timeout(current_timeo
);
8913 if (sk
!= asoc
->base
.sk
)
8916 *timeo_p
= current_timeo
;
8920 finish_wait(&asoc
->wait
, &wait
);
8922 /* Release the association's refcnt. */
8923 sctp_association_put(asoc
);
8936 err
= sock_intr_errno(*timeo_p
);
8944 void sctp_data_ready(struct sock
*sk
)
8946 struct socket_wq
*wq
;
8949 wq
= rcu_dereference(sk
->sk_wq
);
8950 if (skwq_has_sleeper(wq
))
8951 wake_up_interruptible_sync_poll(&wq
->wait
, EPOLLIN
|
8952 EPOLLRDNORM
| EPOLLRDBAND
);
8953 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
8957 /* If socket sndbuf has changed, wake up all per association waiters. */
8958 void sctp_write_space(struct sock
*sk
)
8960 struct sctp_association
*asoc
;
8962 /* Wake up the tasks in each wait queue. */
8963 list_for_each_entry(asoc
, &((sctp_sk(sk
))->ep
->asocs
), asocs
) {
8964 __sctp_write_space(asoc
);
8968 /* Is there any sndbuf space available on the socket?
8970 * Note that sk_wmem_alloc is the sum of the send buffers on all of the
8971 * associations on the same socket. For a UDP-style socket with
8972 * multiple associations, it is possible for it to be "unwriteable"
8973 * prematurely. I assume that this is acceptable because
8974 * a premature "unwriteable" is better than an accidental "writeable" which
8975 * would cause an unwanted block under certain circumstances. For the 1-1
8976 * UDP-style sockets or TCP-style sockets, this code should work.
8979 static bool sctp_writeable(struct sock
*sk
)
8981 return sk
->sk_sndbuf
> sk
->sk_wmem_queued
;
8984 /* Wait for an association to go into ESTABLISHED state. If timeout is 0,
8985 * returns immediately with EINPROGRESS.
8987 static int sctp_wait_for_connect(struct sctp_association
*asoc
, long *timeo_p
)
8989 struct sock
*sk
= asoc
->base
.sk
;
8991 long current_timeo
= *timeo_p
;
8994 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__
, asoc
, *timeo_p
);
8996 /* Increment the association's refcnt. */
8997 sctp_association_hold(asoc
);
9000 prepare_to_wait_exclusive(&asoc
->wait
, &wait
,
9001 TASK_INTERRUPTIBLE
);
9004 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
9006 if (sk
->sk_err
|| asoc
->state
>= SCTP_STATE_SHUTDOWN_PENDING
||
9009 if (signal_pending(current
))
9010 goto do_interrupted
;
9012 if (sctp_state(asoc
, ESTABLISHED
))
9015 /* Let another process have a go. Since we are going
9019 current_timeo
= schedule_timeout(current_timeo
);
9022 *timeo_p
= current_timeo
;
9026 finish_wait(&asoc
->wait
, &wait
);
9028 /* Release the association's refcnt. */
9029 sctp_association_put(asoc
);
9034 if (asoc
->init_err_counter
+ 1 > asoc
->max_init_attempts
)
9037 err
= -ECONNREFUSED
;
9041 err
= sock_intr_errno(*timeo_p
);
9049 static int sctp_wait_for_accept(struct sock
*sk
, long timeo
)
9051 struct sctp_endpoint
*ep
;
9055 ep
= sctp_sk(sk
)->ep
;
9059 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
,
9060 TASK_INTERRUPTIBLE
);
9062 if (list_empty(&ep
->asocs
)) {
9064 timeo
= schedule_timeout(timeo
);
9069 if (!sctp_sstate(sk
, LISTENING
))
9073 if (!list_empty(&ep
->asocs
))
9076 err
= sock_intr_errno(timeo
);
9077 if (signal_pending(current
))
9085 finish_wait(sk_sleep(sk
), &wait
);
9090 static void sctp_wait_for_close(struct sock
*sk
, long timeout
)
9095 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
9096 if (list_empty(&sctp_sk(sk
)->ep
->asocs
))
9099 timeout
= schedule_timeout(timeout
);
9101 } while (!signal_pending(current
) && timeout
);
9103 finish_wait(sk_sleep(sk
), &wait
);
9106 static void sctp_skb_set_owner_r_frag(struct sk_buff
*skb
, struct sock
*sk
)
9108 struct sk_buff
*frag
;
9113 /* Don't forget the fragments. */
9114 skb_walk_frags(skb
, frag
)
9115 sctp_skb_set_owner_r_frag(frag
, sk
);
9118 sctp_skb_set_owner_r(skb
, sk
);
9121 void sctp_copy_sock(struct sock
*newsk
, struct sock
*sk
,
9122 struct sctp_association
*asoc
)
9124 struct inet_sock
*inet
= inet_sk(sk
);
9125 struct inet_sock
*newinet
;
9126 struct sctp_sock
*sp
= sctp_sk(sk
);
9127 struct sctp_endpoint
*ep
= sp
->ep
;
9129 newsk
->sk_type
= sk
->sk_type
;
9130 newsk
->sk_bound_dev_if
= sk
->sk_bound_dev_if
;
9131 newsk
->sk_flags
= sk
->sk_flags
;
9132 newsk
->sk_tsflags
= sk
->sk_tsflags
;
9133 newsk
->sk_no_check_tx
= sk
->sk_no_check_tx
;
9134 newsk
->sk_no_check_rx
= sk
->sk_no_check_rx
;
9135 newsk
->sk_reuse
= sk
->sk_reuse
;
9136 sctp_sk(newsk
)->reuse
= sp
->reuse
;
9138 newsk
->sk_shutdown
= sk
->sk_shutdown
;
9139 newsk
->sk_destruct
= sctp_destruct_sock
;
9140 newsk
->sk_family
= sk
->sk_family
;
9141 newsk
->sk_protocol
= IPPROTO_SCTP
;
9142 newsk
->sk_backlog_rcv
= sk
->sk_prot
->backlog_rcv
;
9143 newsk
->sk_sndbuf
= sk
->sk_sndbuf
;
9144 newsk
->sk_rcvbuf
= sk
->sk_rcvbuf
;
9145 newsk
->sk_lingertime
= sk
->sk_lingertime
;
9146 newsk
->sk_rcvtimeo
= sk
->sk_rcvtimeo
;
9147 newsk
->sk_sndtimeo
= sk
->sk_sndtimeo
;
9148 newsk
->sk_rxhash
= sk
->sk_rxhash
;
9150 newinet
= inet_sk(newsk
);
9152 /* Initialize sk's sport, dport, rcv_saddr and daddr for
9153 * getsockname() and getpeername()
9155 newinet
->inet_sport
= inet
->inet_sport
;
9156 newinet
->inet_saddr
= inet
->inet_saddr
;
9157 newinet
->inet_rcv_saddr
= inet
->inet_rcv_saddr
;
9158 newinet
->inet_dport
= htons(asoc
->peer
.port
);
9159 newinet
->pmtudisc
= inet
->pmtudisc
;
9160 newinet
->inet_id
= asoc
->next_tsn
^ jiffies
;
9162 newinet
->uc_ttl
= inet
->uc_ttl
;
9163 newinet
->mc_loop
= 1;
9164 newinet
->mc_ttl
= 1;
9165 newinet
->mc_index
= 0;
9166 newinet
->mc_list
= NULL
;
9168 if (newsk
->sk_flags
& SK_FLAGS_TIMESTAMP
)
9169 net_enable_timestamp();
9171 /* Set newsk security attributes from orginal sk and connection
9172 * security attribute from ep.
9174 security_sctp_sk_clone(ep
, sk
, newsk
);
9177 static inline void sctp_copy_descendant(struct sock
*sk_to
,
9178 const struct sock
*sk_from
)
9180 int ancestor_size
= sizeof(struct inet_sock
) +
9181 sizeof(struct sctp_sock
) -
9182 offsetof(struct sctp_sock
, pd_lobby
);
9184 if (sk_from
->sk_family
== PF_INET6
)
9185 ancestor_size
+= sizeof(struct ipv6_pinfo
);
9187 __inet_sk_copy_descendant(sk_to
, sk_from
, ancestor_size
);
9190 /* Populate the fields of the newsk from the oldsk and migrate the assoc
9191 * and its messages to the newsk.
9193 static int sctp_sock_migrate(struct sock
*oldsk
, struct sock
*newsk
,
9194 struct sctp_association
*assoc
,
9195 enum sctp_socket_type type
)
9197 struct sctp_sock
*oldsp
= sctp_sk(oldsk
);
9198 struct sctp_sock
*newsp
= sctp_sk(newsk
);
9199 struct sctp_bind_bucket
*pp
; /* hash list port iterator */
9200 struct sctp_endpoint
*newep
= newsp
->ep
;
9201 struct sk_buff
*skb
, *tmp
;
9202 struct sctp_ulpevent
*event
;
9203 struct sctp_bind_hashbucket
*head
;
9206 /* Migrate socket buffer sizes and all the socket level options to the
9209 newsk
->sk_sndbuf
= oldsk
->sk_sndbuf
;
9210 newsk
->sk_rcvbuf
= oldsk
->sk_rcvbuf
;
9211 /* Brute force copy old sctp opt. */
9212 sctp_copy_descendant(newsk
, oldsk
);
9214 /* Restore the ep value that was overwritten with the above structure
9220 /* Hook this new socket in to the bind_hash list. */
9221 head
= &sctp_port_hashtable
[sctp_phashfn(sock_net(oldsk
),
9222 inet_sk(oldsk
)->inet_num
)];
9223 spin_lock_bh(&head
->lock
);
9224 pp
= sctp_sk(oldsk
)->bind_hash
;
9225 sk_add_bind_node(newsk
, &pp
->owner
);
9226 sctp_sk(newsk
)->bind_hash
= pp
;
9227 inet_sk(newsk
)->inet_num
= inet_sk(oldsk
)->inet_num
;
9228 spin_unlock_bh(&head
->lock
);
9230 /* Copy the bind_addr list from the original endpoint to the new
9231 * endpoint so that we can handle restarts properly
9233 err
= sctp_bind_addr_dup(&newsp
->ep
->base
.bind_addr
,
9234 &oldsp
->ep
->base
.bind_addr
, GFP_KERNEL
);
9238 /* New ep's auth_hmacs should be set if old ep's is set, in case
9239 * that net->sctp.auth_enable has been changed to 0 by users and
9240 * new ep's auth_hmacs couldn't be set in sctp_endpoint_init().
9242 if (oldsp
->ep
->auth_hmacs
) {
9243 err
= sctp_auth_init_hmacs(newsp
->ep
, GFP_KERNEL
);
9248 /* Move any messages in the old socket's receive queue that are for the
9249 * peeled off association to the new socket's receive queue.
9251 sctp_skb_for_each(skb
, &oldsk
->sk_receive_queue
, tmp
) {
9252 event
= sctp_skb2event(skb
);
9253 if (event
->asoc
== assoc
) {
9254 __skb_unlink(skb
, &oldsk
->sk_receive_queue
);
9255 __skb_queue_tail(&newsk
->sk_receive_queue
, skb
);
9256 sctp_skb_set_owner_r_frag(skb
, newsk
);
9260 /* Clean up any messages pending delivery due to partial
9261 * delivery. Three cases:
9262 * 1) No partial deliver; no work.
9263 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
9264 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
9266 atomic_set(&sctp_sk(newsk
)->pd_mode
, assoc
->ulpq
.pd_mode
);
9268 if (atomic_read(&sctp_sk(oldsk
)->pd_mode
)) {
9269 struct sk_buff_head
*queue
;
9271 /* Decide which queue to move pd_lobby skbs to. */
9272 if (assoc
->ulpq
.pd_mode
) {
9273 queue
= &newsp
->pd_lobby
;
9275 queue
= &newsk
->sk_receive_queue
;
9277 /* Walk through the pd_lobby, looking for skbs that
9278 * need moved to the new socket.
9280 sctp_skb_for_each(skb
, &oldsp
->pd_lobby
, tmp
) {
9281 event
= sctp_skb2event(skb
);
9282 if (event
->asoc
== assoc
) {
9283 __skb_unlink(skb
, &oldsp
->pd_lobby
);
9284 __skb_queue_tail(queue
, skb
);
9285 sctp_skb_set_owner_r_frag(skb
, newsk
);
9289 /* Clear up any skbs waiting for the partial
9290 * delivery to finish.
9292 if (assoc
->ulpq
.pd_mode
)
9293 sctp_clear_pd(oldsk
, NULL
);
9297 sctp_for_each_rx_skb(assoc
, newsk
, sctp_skb_set_owner_r_frag
);
9299 /* Set the type of socket to indicate that it is peeled off from the
9300 * original UDP-style socket or created with the accept() call on a
9301 * TCP-style socket..
9305 /* Mark the new socket "in-use" by the user so that any packets
9306 * that may arrive on the association after we've moved it are
9307 * queued to the backlog. This prevents a potential race between
9308 * backlog processing on the old socket and new-packet processing
9309 * on the new socket.
9311 * The caller has just allocated newsk so we can guarantee that other
9312 * paths won't try to lock it and then oldsk.
9314 lock_sock_nested(newsk
, SINGLE_DEPTH_NESTING
);
9315 sctp_for_each_tx_datachunk(assoc
, sctp_clear_owner_w
);
9316 sctp_assoc_migrate(assoc
, newsk
);
9317 sctp_for_each_tx_datachunk(assoc
, sctp_set_owner_w
);
9319 /* If the association on the newsk is already closed before accept()
9320 * is called, set RCV_SHUTDOWN flag.
9322 if (sctp_state(assoc
, CLOSED
) && sctp_style(newsk
, TCP
)) {
9323 inet_sk_set_state(newsk
, SCTP_SS_CLOSED
);
9324 newsk
->sk_shutdown
|= RCV_SHUTDOWN
;
9326 inet_sk_set_state(newsk
, SCTP_SS_ESTABLISHED
);
9329 release_sock(newsk
);
9335 /* This proto struct describes the ULP interface for SCTP. */
9336 struct proto sctp_prot
= {
9338 .owner
= THIS_MODULE
,
9339 .close
= sctp_close
,
9340 .disconnect
= sctp_disconnect
,
9341 .accept
= sctp_accept
,
9342 .ioctl
= sctp_ioctl
,
9343 .init
= sctp_init_sock
,
9344 .destroy
= sctp_destroy_sock
,
9345 .shutdown
= sctp_shutdown
,
9346 .setsockopt
= sctp_setsockopt
,
9347 .getsockopt
= sctp_getsockopt
,
9348 .sendmsg
= sctp_sendmsg
,
9349 .recvmsg
= sctp_recvmsg
,
9351 .backlog_rcv
= sctp_backlog_rcv
,
9353 .unhash
= sctp_unhash
,
9354 .get_port
= sctp_get_port
,
9355 .obj_size
= sizeof(struct sctp_sock
),
9356 .useroffset
= offsetof(struct sctp_sock
, subscribe
),
9357 .usersize
= offsetof(struct sctp_sock
, initmsg
) -
9358 offsetof(struct sctp_sock
, subscribe
) +
9359 sizeof_field(struct sctp_sock
, initmsg
),
9360 .sysctl_mem
= sysctl_sctp_mem
,
9361 .sysctl_rmem
= sysctl_sctp_rmem
,
9362 .sysctl_wmem
= sysctl_sctp_wmem
,
9363 .memory_pressure
= &sctp_memory_pressure
,
9364 .enter_memory_pressure
= sctp_enter_memory_pressure
,
9365 .memory_allocated
= &sctp_memory_allocated
,
9366 .sockets_allocated
= &sctp_sockets_allocated
,
9369 #if IS_ENABLED(CONFIG_IPV6)
9371 #include <net/transp_v6.h>
9372 static void sctp_v6_destroy_sock(struct sock
*sk
)
9374 sctp_destroy_sock(sk
);
9375 inet6_destroy_sock(sk
);
9378 struct proto sctpv6_prot
= {
9380 .owner
= THIS_MODULE
,
9381 .close
= sctp_close
,
9382 .disconnect
= sctp_disconnect
,
9383 .accept
= sctp_accept
,
9384 .ioctl
= sctp_ioctl
,
9385 .init
= sctp_init_sock
,
9386 .destroy
= sctp_v6_destroy_sock
,
9387 .shutdown
= sctp_shutdown
,
9388 .setsockopt
= sctp_setsockopt
,
9389 .getsockopt
= sctp_getsockopt
,
9390 .sendmsg
= sctp_sendmsg
,
9391 .recvmsg
= sctp_recvmsg
,
9393 .backlog_rcv
= sctp_backlog_rcv
,
9395 .unhash
= sctp_unhash
,
9396 .get_port
= sctp_get_port
,
9397 .obj_size
= sizeof(struct sctp6_sock
),
9398 .useroffset
= offsetof(struct sctp6_sock
, sctp
.subscribe
),
9399 .usersize
= offsetof(struct sctp6_sock
, sctp
.initmsg
) -
9400 offsetof(struct sctp6_sock
, sctp
.subscribe
) +
9401 sizeof_field(struct sctp6_sock
, sctp
.initmsg
),
9402 .sysctl_mem
= sysctl_sctp_mem
,
9403 .sysctl_rmem
= sysctl_sctp_rmem
,
9404 .sysctl_wmem
= sysctl_sctp_wmem
,
9405 .memory_pressure
= &sctp_memory_pressure
,
9406 .enter_memory_pressure
= sctp_enter_memory_pressure
,
9407 .memory_allocated
= &sctp_memory_allocated
,
9408 .sockets_allocated
= &sctp_sockets_allocated
,
9410 #endif /* IS_ENABLED(CONFIG_IPV6) */