1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
6 * Copyright (c) 2001-2002 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This file is part of the SCTP kernel implementation
11 * These functions interface with the sockets layer to implement the
12 * SCTP Extensions for the Sockets API.
14 * Note that the descriptions from the specification are USER level
15 * functions--this file is the functions which populate the struct proto
16 * for SCTP which is the BOTTOM of the sockets interface.
18 * This SCTP implementation is free software;
19 * you can redistribute it and/or modify it under the terms of
20 * the GNU General Public License as published by
21 * the Free Software Foundation; either version 2, or (at your option)
24 * This SCTP implementation is distributed in the hope that it
25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
26 * ************************
27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
28 * See the GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with GNU CC; see the file COPYING. If not, write to
32 * the Free Software Foundation, 59 Temple Place - Suite 330,
33 * Boston, MA 02111-1307, USA.
35 * Please send any bug reports or fixes you make to the
37 * lksctp developers <lksctp-developers@lists.sourceforge.net>
39 * Or submit a bug report through the following website:
40 * http://www.sf.net/projects/lksctp
42 * Written or modified by:
43 * La Monte H.P. Yarroll <piggy@acm.org>
44 * Narasimha Budihal <narsi@refcode.org>
45 * Karl Knutson <karl@athena.chicago.il.us>
46 * Jon Grimm <jgrimm@us.ibm.com>
47 * Xingang Guo <xingang.guo@intel.com>
48 * Daisy Chang <daisyc@us.ibm.com>
49 * Sridhar Samudrala <samudrala@us.ibm.com>
50 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
51 * Ardelle Fan <ardelle.fan@intel.com>
52 * Ryan Layer <rmlayer@us.ibm.com>
53 * Anup Pemmaiah <pemmaiah@cc.usu.edu>
54 * Kevin Gao <kevin.gao@intel.com>
56 * Any bugs reported given to us we will try to fix... any fixes shared will
57 * be incorporated into the next SCTP release.
60 #include <linux/types.h>
61 #include <linux/kernel.h>
62 #include <linux/wait.h>
63 #include <linux/time.h>
65 #include <linux/capability.h>
66 #include <linux/fcntl.h>
67 #include <linux/poll.h>
68 #include <linux/init.h>
69 #include <linux/crypto.h>
73 #include <net/route.h>
75 #include <net/inet_common.h>
77 #include <linux/socket.h> /* for sa_family_t */
79 #include <net/sctp/sctp.h>
80 #include <net/sctp/sm.h>
82 /* WARNING: Please do not remove the SCTP_STATIC attribute to
83 * any of the functions below as they are used to export functions
84 * used by a project regression testsuite.
87 /* Forward declarations for internal helper functions. */
88 static int sctp_writeable(struct sock
*sk
);
89 static void sctp_wfree(struct sk_buff
*skb
);
90 static int sctp_wait_for_sndbuf(struct sctp_association
*, long *timeo_p
,
92 static int sctp_wait_for_packet(struct sock
* sk
, int *err
, long *timeo_p
);
93 static int sctp_wait_for_connect(struct sctp_association
*, long *timeo_p
);
94 static int sctp_wait_for_accept(struct sock
*sk
, long timeo
);
95 static void sctp_wait_for_close(struct sock
*sk
, long timeo
);
96 static struct sctp_af
*sctp_sockaddr_af(struct sctp_sock
*opt
,
97 union sctp_addr
*addr
, int len
);
98 static int sctp_bindx_add(struct sock
*, struct sockaddr
*, int);
99 static int sctp_bindx_rem(struct sock
*, struct sockaddr
*, int);
100 static int sctp_send_asconf_add_ip(struct sock
*, struct sockaddr
*, int);
101 static int sctp_send_asconf_del_ip(struct sock
*, struct sockaddr
*, int);
102 static int sctp_send_asconf(struct sctp_association
*asoc
,
103 struct sctp_chunk
*chunk
);
104 static int sctp_do_bind(struct sock
*, union sctp_addr
*, int);
105 static int sctp_autobind(struct sock
*sk
);
106 static void sctp_sock_migrate(struct sock
*, struct sock
*,
107 struct sctp_association
*, sctp_socket_type_t
);
108 static char *sctp_hmac_alg
= SCTP_COOKIE_HMAC_ALG
;
110 extern struct kmem_cache
*sctp_bucket_cachep
;
111 extern int sysctl_sctp_mem
[3];
112 extern int sysctl_sctp_rmem
[3];
113 extern int sysctl_sctp_wmem
[3];
115 static int sctp_memory_pressure
;
116 static atomic_t sctp_memory_allocated
;
117 static atomic_t sctp_sockets_allocated
;
119 static void sctp_enter_memory_pressure(void)
121 sctp_memory_pressure
= 1;
125 /* Get the sndbuf space available at the time on the association. */
126 static inline int sctp_wspace(struct sctp_association
*asoc
)
130 if (asoc
->ep
->sndbuf_policy
)
131 amt
= asoc
->sndbuf_used
;
133 amt
= atomic_read(&asoc
->base
.sk
->sk_wmem_alloc
);
135 if (amt
>= asoc
->base
.sk
->sk_sndbuf
) {
136 if (asoc
->base
.sk
->sk_userlocks
& SOCK_SNDBUF_LOCK
)
139 amt
= sk_stream_wspace(asoc
->base
.sk
);
144 amt
= asoc
->base
.sk
->sk_sndbuf
- amt
;
149 /* Increment the used sndbuf space count of the corresponding association by
150 * the size of the outgoing data chunk.
151 * Also, set the skb destructor for sndbuf accounting later.
153 * Since it is always 1-1 between chunk and skb, and also a new skb is always
154 * allocated for chunk bundling in sctp_packet_transmit(), we can use the
155 * destructor in the data chunk skb for the purpose of the sndbuf space
158 static inline void sctp_set_owner_w(struct sctp_chunk
*chunk
)
160 struct sctp_association
*asoc
= chunk
->asoc
;
161 struct sock
*sk
= asoc
->base
.sk
;
163 /* The sndbuf space is tracked per association. */
164 sctp_association_hold(asoc
);
166 skb_set_owner_w(chunk
->skb
, sk
);
168 chunk
->skb
->destructor
= sctp_wfree
;
169 /* Save the chunk pointer in skb for sctp_wfree to use later. */
170 *((struct sctp_chunk
**)(chunk
->skb
->cb
)) = chunk
;
172 asoc
->sndbuf_used
+= SCTP_DATA_SNDSIZE(chunk
) +
173 sizeof(struct sk_buff
) +
174 sizeof(struct sctp_chunk
);
176 atomic_add(sizeof(struct sctp_chunk
), &sk
->sk_wmem_alloc
);
177 sk
->sk_wmem_queued
+= chunk
->skb
->truesize
;
178 sk_mem_charge(sk
, chunk
->skb
->truesize
);
181 /* Verify that this is a valid address. */
182 static inline int sctp_verify_addr(struct sock
*sk
, union sctp_addr
*addr
,
187 /* Verify basic sockaddr. */
188 af
= sctp_sockaddr_af(sctp_sk(sk
), addr
, len
);
192 /* Is this a valid SCTP address? */
193 if (!af
->addr_valid(addr
, sctp_sk(sk
), NULL
))
196 if (!sctp_sk(sk
)->pf
->send_verify(sctp_sk(sk
), (addr
)))
202 /* Look up the association by its id. If this is not a UDP-style
203 * socket, the ID field is always ignored.
205 struct sctp_association
*sctp_id2assoc(struct sock
*sk
, sctp_assoc_t id
)
207 struct sctp_association
*asoc
= NULL
;
209 /* If this is not a UDP-style socket, assoc id should be ignored. */
210 if (!sctp_style(sk
, UDP
)) {
211 /* Return NULL if the socket state is not ESTABLISHED. It
212 * could be a TCP-style listening socket or a socket which
213 * hasn't yet called connect() to establish an association.
215 if (!sctp_sstate(sk
, ESTABLISHED
))
218 /* Get the first and the only association from the list. */
219 if (!list_empty(&sctp_sk(sk
)->ep
->asocs
))
220 asoc
= list_entry(sctp_sk(sk
)->ep
->asocs
.next
,
221 struct sctp_association
, asocs
);
225 /* Otherwise this is a UDP-style socket. */
226 if (!id
|| (id
== (sctp_assoc_t
)-1))
229 spin_lock_bh(&sctp_assocs_id_lock
);
230 asoc
= (struct sctp_association
*)idr_find(&sctp_assocs_id
, (int)id
);
231 spin_unlock_bh(&sctp_assocs_id_lock
);
233 if (!asoc
|| (asoc
->base
.sk
!= sk
) || asoc
->base
.dead
)
239 /* Look up the transport from an address and an assoc id. If both address and
240 * id are specified, the associations matching the address and the id should be
243 static struct sctp_transport
*sctp_addr_id2transport(struct sock
*sk
,
244 struct sockaddr_storage
*addr
,
247 struct sctp_association
*addr_asoc
= NULL
, *id_asoc
= NULL
;
248 struct sctp_transport
*transport
;
249 union sctp_addr
*laddr
= (union sctp_addr
*)addr
;
251 addr_asoc
= sctp_endpoint_lookup_assoc(sctp_sk(sk
)->ep
,
258 id_asoc
= sctp_id2assoc(sk
, id
);
259 if (id_asoc
&& (id_asoc
!= addr_asoc
))
262 sctp_get_pf_specific(sk
->sk_family
)->addr_v4map(sctp_sk(sk
),
263 (union sctp_addr
*)addr
);
268 /* API 3.1.2 bind() - UDP Style Syntax
269 * The syntax of bind() is,
271 * ret = bind(int sd, struct sockaddr *addr, int addrlen);
273 * sd - the socket descriptor returned by socket().
274 * addr - the address structure (struct sockaddr_in or struct
275 * sockaddr_in6 [RFC 2553]),
276 * addr_len - the size of the address structure.
278 SCTP_STATIC
int sctp_bind(struct sock
*sk
, struct sockaddr
*addr
, int addr_len
)
284 SCTP_DEBUG_PRINTK("sctp_bind(sk: %p, addr: %p, addr_len: %d)\n",
287 /* Disallow binding twice. */
288 if (!sctp_sk(sk
)->ep
->base
.bind_addr
.port
)
289 retval
= sctp_do_bind(sk
, (union sctp_addr
*)addr
,
294 sctp_release_sock(sk
);
299 static long sctp_get_port_local(struct sock
*, union sctp_addr
*);
301 /* Verify this is a valid sockaddr. */
302 static struct sctp_af
*sctp_sockaddr_af(struct sctp_sock
*opt
,
303 union sctp_addr
*addr
, int len
)
307 /* Check minimum size. */
308 if (len
< sizeof (struct sockaddr
))
311 /* Does this PF support this AF? */
312 if (!opt
->pf
->af_supported(addr
->sa
.sa_family
, opt
))
315 /* If we get this far, af is valid. */
316 af
= sctp_get_af_specific(addr
->sa
.sa_family
);
318 if (len
< af
->sockaddr_len
)
324 /* Bind a local address either to an endpoint or to an association. */
325 SCTP_STATIC
int sctp_do_bind(struct sock
*sk
, union sctp_addr
*addr
, int len
)
327 struct sctp_sock
*sp
= sctp_sk(sk
);
328 struct sctp_endpoint
*ep
= sp
->ep
;
329 struct sctp_bind_addr
*bp
= &ep
->base
.bind_addr
;
334 /* Common sockaddr verification. */
335 af
= sctp_sockaddr_af(sp
, addr
, len
);
337 SCTP_DEBUG_PRINTK("sctp_do_bind(sk: %p, newaddr: %p, len: %d) EINVAL\n",
342 snum
= ntohs(addr
->v4
.sin_port
);
344 SCTP_DEBUG_PRINTK_IPADDR("sctp_do_bind(sk: %p, new addr: ",
345 ", port: %d, new port: %d, len: %d)\n",
351 /* PF specific bind() address verification. */
352 if (!sp
->pf
->bind_verify(sp
, addr
))
353 return -EADDRNOTAVAIL
;
355 /* We must either be unbound, or bind to the same port.
356 * It's OK to allow 0 ports if we are already bound.
357 * We'll just inhert an already bound port in this case
362 else if (snum
!= bp
->port
) {
363 SCTP_DEBUG_PRINTK("sctp_do_bind:"
364 " New port %d does not match existing port "
365 "%d.\n", snum
, bp
->port
);
370 if (snum
&& snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
373 /* Make sure we are allowed to bind here.
374 * The function sctp_get_port_local() does duplicate address
377 addr
->v4
.sin_port
= htons(snum
);
378 if ((ret
= sctp_get_port_local(sk
, addr
))) {
379 if (ret
== (long) sk
) {
380 /* This endpoint has a conflicting address. */
387 /* Refresh ephemeral port. */
389 bp
->port
= inet_sk(sk
)->num
;
391 /* Add the address to the bind address list.
392 * Use GFP_ATOMIC since BHs will be disabled.
394 ret
= sctp_add_bind_addr(bp
, addr
, SCTP_ADDR_SRC
, GFP_ATOMIC
);
396 /* Copy back into socket for getsockname() use. */
398 inet_sk(sk
)->sport
= htons(inet_sk(sk
)->num
);
399 af
->to_sk_saddr(addr
, sk
);
405 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
407 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
408 * at any one time. If a sender, after sending an ASCONF chunk, decides
409 * it needs to transfer another ASCONF Chunk, it MUST wait until the
410 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
411 * subsequent ASCONF. Note this restriction binds each side, so at any
412 * time two ASCONF may be in-transit on any given association (one sent
413 * from each endpoint).
415 static int sctp_send_asconf(struct sctp_association
*asoc
,
416 struct sctp_chunk
*chunk
)
420 /* If there is an outstanding ASCONF chunk, queue it for later
423 if (asoc
->addip_last_asconf
) {
424 list_add_tail(&chunk
->list
, &asoc
->addip_chunk_list
);
428 /* Hold the chunk until an ASCONF_ACK is received. */
429 sctp_chunk_hold(chunk
);
430 retval
= sctp_primitive_ASCONF(asoc
, chunk
);
432 sctp_chunk_free(chunk
);
434 asoc
->addip_last_asconf
= chunk
;
440 /* Add a list of addresses as bind addresses to local endpoint or
443 * Basically run through each address specified in the addrs/addrcnt
444 * array/length pair, determine if it is IPv6 or IPv4 and call
445 * sctp_do_bind() on it.
447 * If any of them fails, then the operation will be reversed and the
448 * ones that were added will be removed.
450 * Only sctp_setsockopt_bindx() is supposed to call this function.
452 static int sctp_bindx_add(struct sock
*sk
, struct sockaddr
*addrs
, int addrcnt
)
457 struct sockaddr
*sa_addr
;
460 SCTP_DEBUG_PRINTK("sctp_bindx_add (sk: %p, addrs: %p, addrcnt: %d)\n",
464 for (cnt
= 0; cnt
< addrcnt
; cnt
++) {
465 /* The list may contain either IPv4 or IPv6 address;
466 * determine the address length for walking thru the list.
468 sa_addr
= (struct sockaddr
*)addr_buf
;
469 af
= sctp_get_af_specific(sa_addr
->sa_family
);
475 retval
= sctp_do_bind(sk
, (union sctp_addr
*)sa_addr
,
478 addr_buf
+= af
->sockaddr_len
;
482 /* Failed. Cleanup the ones that have been added */
484 sctp_bindx_rem(sk
, addrs
, cnt
);
492 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the
493 * associations that are part of the endpoint indicating that a list of local
494 * addresses are added to the endpoint.
496 * If any of the addresses is already in the bind address list of the
497 * association, we do not send the chunk for that association. But it will not
498 * affect other associations.
500 * Only sctp_setsockopt_bindx() is supposed to call this function.
502 static int sctp_send_asconf_add_ip(struct sock
*sk
,
503 struct sockaddr
*addrs
,
506 struct sctp_sock
*sp
;
507 struct sctp_endpoint
*ep
;
508 struct sctp_association
*asoc
;
509 struct sctp_bind_addr
*bp
;
510 struct sctp_chunk
*chunk
;
511 struct sctp_sockaddr_entry
*laddr
;
512 union sctp_addr
*addr
;
513 union sctp_addr saveaddr
;
520 if (!sctp_addip_enable
)
526 SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n",
527 __func__
, sk
, addrs
, addrcnt
);
529 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
531 if (!asoc
->peer
.asconf_capable
)
534 if (asoc
->peer
.addip_disabled_mask
& SCTP_PARAM_ADD_IP
)
537 if (!sctp_state(asoc
, ESTABLISHED
))
540 /* Check if any address in the packed array of addresses is
541 * in the bind address list of the association. If so,
542 * do not send the asconf chunk to its peer, but continue with
543 * other associations.
546 for (i
= 0; i
< addrcnt
; i
++) {
547 addr
= (union sctp_addr
*)addr_buf
;
548 af
= sctp_get_af_specific(addr
->v4
.sin_family
);
554 if (sctp_assoc_lookup_laddr(asoc
, addr
))
557 addr_buf
+= af
->sockaddr_len
;
562 /* Use the first valid address in bind addr list of
563 * association as Address Parameter of ASCONF CHUNK.
565 bp
= &asoc
->base
.bind_addr
;
566 p
= bp
->address_list
.next
;
567 laddr
= list_entry(p
, struct sctp_sockaddr_entry
, list
);
568 chunk
= sctp_make_asconf_update_ip(asoc
, &laddr
->a
, addrs
,
569 addrcnt
, SCTP_PARAM_ADD_IP
);
575 retval
= sctp_send_asconf(asoc
, chunk
);
579 /* Add the new addresses to the bind address list with
580 * use_as_src set to 0.
583 for (i
= 0; i
< addrcnt
; i
++) {
584 addr
= (union sctp_addr
*)addr_buf
;
585 af
= sctp_get_af_specific(addr
->v4
.sin_family
);
586 memcpy(&saveaddr
, addr
, af
->sockaddr_len
);
587 retval
= sctp_add_bind_addr(bp
, &saveaddr
,
588 SCTP_ADDR_NEW
, GFP_ATOMIC
);
589 addr_buf
+= af
->sockaddr_len
;
597 /* Remove a list of addresses from bind addresses list. Do not remove the
600 * Basically run through each address specified in the addrs/addrcnt
601 * array/length pair, determine if it is IPv6 or IPv4 and call
602 * sctp_del_bind() on it.
604 * If any of them fails, then the operation will be reversed and the
605 * ones that were removed will be added back.
607 * At least one address has to be left; if only one address is
608 * available, the operation will return -EBUSY.
610 * Only sctp_setsockopt_bindx() is supposed to call this function.
612 static int sctp_bindx_rem(struct sock
*sk
, struct sockaddr
*addrs
, int addrcnt
)
614 struct sctp_sock
*sp
= sctp_sk(sk
);
615 struct sctp_endpoint
*ep
= sp
->ep
;
617 struct sctp_bind_addr
*bp
= &ep
->base
.bind_addr
;
620 union sctp_addr
*sa_addr
;
623 SCTP_DEBUG_PRINTK("sctp_bindx_rem (sk: %p, addrs: %p, addrcnt: %d)\n",
627 for (cnt
= 0; cnt
< addrcnt
; cnt
++) {
628 /* If the bind address list is empty or if there is only one
629 * bind address, there is nothing more to be removed (we need
630 * at least one address here).
632 if (list_empty(&bp
->address_list
) ||
633 (sctp_list_single_entry(&bp
->address_list
))) {
638 sa_addr
= (union sctp_addr
*)addr_buf
;
639 af
= sctp_get_af_specific(sa_addr
->sa
.sa_family
);
645 if (!af
->addr_valid(sa_addr
, sp
, NULL
)) {
646 retval
= -EADDRNOTAVAIL
;
650 if (sa_addr
->v4
.sin_port
!= htons(bp
->port
)) {
655 /* FIXME - There is probably a need to check if sk->sk_saddr and
656 * sk->sk_rcv_addr are currently set to one of the addresses to
657 * be removed. This is something which needs to be looked into
658 * when we are fixing the outstanding issues with multi-homing
659 * socket routing and failover schemes. Refer to comments in
660 * sctp_do_bind(). -daisy
662 retval
= sctp_del_bind_addr(bp
, sa_addr
);
664 addr_buf
+= af
->sockaddr_len
;
667 /* Failed. Add the ones that has been removed back */
669 sctp_bindx_add(sk
, addrs
, cnt
);
677 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of
678 * the associations that are part of the endpoint indicating that a list of
679 * local addresses are removed from the endpoint.
681 * If any of the addresses is already in the bind address list of the
682 * association, we do not send the chunk for that association. But it will not
683 * affect other associations.
685 * Only sctp_setsockopt_bindx() is supposed to call this function.
687 static int sctp_send_asconf_del_ip(struct sock
*sk
,
688 struct sockaddr
*addrs
,
691 struct sctp_sock
*sp
;
692 struct sctp_endpoint
*ep
;
693 struct sctp_association
*asoc
;
694 struct sctp_transport
*transport
;
695 struct sctp_bind_addr
*bp
;
696 struct sctp_chunk
*chunk
;
697 union sctp_addr
*laddr
;
700 struct sctp_sockaddr_entry
*saddr
;
704 if (!sctp_addip_enable
)
710 SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n",
711 __func__
, sk
, addrs
, addrcnt
);
713 list_for_each_entry(asoc
, &ep
->asocs
, asocs
) {
715 if (!asoc
->peer
.asconf_capable
)
718 if (asoc
->peer
.addip_disabled_mask
& SCTP_PARAM_DEL_IP
)
721 if (!sctp_state(asoc
, ESTABLISHED
))
724 /* Check if any address in the packed array of addresses is
725 * not present in the bind address list of the association.
726 * If so, do not send the asconf chunk to its peer, but
727 * continue with other associations.
730 for (i
= 0; i
< addrcnt
; i
++) {
731 laddr
= (union sctp_addr
*)addr_buf
;
732 af
= sctp_get_af_specific(laddr
->v4
.sin_family
);
738 if (!sctp_assoc_lookup_laddr(asoc
, laddr
))
741 addr_buf
+= af
->sockaddr_len
;
746 /* Find one address in the association's bind address list
747 * that is not in the packed array of addresses. This is to
748 * make sure that we do not delete all the addresses in the
751 bp
= &asoc
->base
.bind_addr
;
752 laddr
= sctp_find_unmatch_addr(bp
, (union sctp_addr
*)addrs
,
757 /* We do not need RCU protection throughout this loop
758 * because this is done under a socket lock from the
761 chunk
= sctp_make_asconf_update_ip(asoc
, laddr
, addrs
, addrcnt
,
768 /* Reset use_as_src flag for the addresses in the bind address
769 * list that are to be deleted.
772 for (i
= 0; i
< addrcnt
; i
++) {
773 laddr
= (union sctp_addr
*)addr_buf
;
774 af
= sctp_get_af_specific(laddr
->v4
.sin_family
);
775 list_for_each_entry(saddr
, &bp
->address_list
, list
) {
776 if (sctp_cmp_addr_exact(&saddr
->a
, laddr
))
777 saddr
->state
= SCTP_ADDR_DEL
;
779 addr_buf
+= af
->sockaddr_len
;
782 /* Update the route and saddr entries for all the transports
783 * as some of the addresses in the bind address list are
784 * about to be deleted and cannot be used as source addresses.
786 list_for_each_entry(transport
, &asoc
->peer
.transport_addr_list
,
788 dst_release(transport
->dst
);
789 sctp_transport_route(transport
, NULL
,
790 sctp_sk(asoc
->base
.sk
));
793 retval
= sctp_send_asconf(asoc
, chunk
);
799 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
802 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
805 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
806 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
809 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
810 * Section 3.1.2 for this usage.
812 * addrs is a pointer to an array of one or more socket addresses. Each
813 * address is contained in its appropriate structure (i.e. struct
814 * sockaddr_in or struct sockaddr_in6) the family of the address type
815 * must be used to distinguish the address length (note that this
816 * representation is termed a "packed array" of addresses). The caller
817 * specifies the number of addresses in the array with addrcnt.
819 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
820 * -1, and sets errno to the appropriate error code.
822 * For SCTP, the port given in each socket address must be the same, or
823 * sctp_bindx() will fail, setting errno to EINVAL.
825 * The flags parameter is formed from the bitwise OR of zero or more of
826 * the following currently defined flags:
828 * SCTP_BINDX_ADD_ADDR
830 * SCTP_BINDX_REM_ADDR
832 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
833 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
834 * addresses from the association. The two flags are mutually exclusive;
835 * if both are given, sctp_bindx() will fail with EINVAL. A caller may
836 * not remove all addresses from an association; sctp_bindx() will
837 * reject such an attempt with EINVAL.
839 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
840 * additional addresses with an endpoint after calling bind(). Or use
841 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
842 * socket is associated with so that no new association accepted will be
843 * associated with those addresses. If the endpoint supports dynamic
844 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
845 * endpoint to send the appropriate message to the peer to change the
846 * peers address lists.
848 * Adding and removing addresses from a connected association is
849 * optional functionality. Implementations that do not support this
850 * functionality should return EOPNOTSUPP.
852 * Basically do nothing but copying the addresses from user to kernel
853 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
854 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
857 * We don't use copy_from_user() for optimization: we first do the
858 * sanity checks (buffer size -fast- and access check-healthy
859 * pointer); if all of those succeed, then we can alloc the memory
860 * (expensive operation) needed to copy the data to kernel. Then we do
861 * the copying without checking the user space area
862 * (__copy_from_user()).
864 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
867 * sk The sk of the socket
868 * addrs The pointer to the addresses in user land
869 * addrssize Size of the addrs buffer
870 * op Operation to perform (add or remove, see the flags of
873 * Returns 0 if ok, <0 errno code on error.
875 SCTP_STATIC
int sctp_setsockopt_bindx(struct sock
* sk
,
876 struct sockaddr __user
*addrs
,
877 int addrs_size
, int op
)
879 struct sockaddr
*kaddrs
;
883 struct sockaddr
*sa_addr
;
887 SCTP_DEBUG_PRINTK("sctp_setsocktopt_bindx: sk %p addrs %p"
888 " addrs_size %d opt %d\n", sk
, addrs
, addrs_size
, op
);
890 if (unlikely(addrs_size
<= 0))
893 /* Check the user passed a healthy pointer. */
894 if (unlikely(!access_ok(VERIFY_READ
, addrs
, addrs_size
)))
897 /* Alloc space for the address array in kernel memory. */
898 kaddrs
= kmalloc(addrs_size
, GFP_KERNEL
);
899 if (unlikely(!kaddrs
))
902 if (__copy_from_user(kaddrs
, addrs
, addrs_size
)) {
907 /* Walk through the addrs buffer and count the number of addresses. */
909 while (walk_size
< addrs_size
) {
910 sa_addr
= (struct sockaddr
*)addr_buf
;
911 af
= sctp_get_af_specific(sa_addr
->sa_family
);
913 /* If the address family is not supported or if this address
914 * causes the address buffer to overflow return EINVAL.
916 if (!af
|| (walk_size
+ af
->sockaddr_len
) > addrs_size
) {
921 addr_buf
+= af
->sockaddr_len
;
922 walk_size
+= af
->sockaddr_len
;
927 case SCTP_BINDX_ADD_ADDR
:
928 err
= sctp_bindx_add(sk
, kaddrs
, addrcnt
);
931 err
= sctp_send_asconf_add_ip(sk
, kaddrs
, addrcnt
);
934 case SCTP_BINDX_REM_ADDR
:
935 err
= sctp_bindx_rem(sk
, kaddrs
, addrcnt
);
938 err
= sctp_send_asconf_del_ip(sk
, kaddrs
, addrcnt
);
952 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
954 * Common routine for handling connect() and sctp_connectx().
955 * Connect will come in with just a single address.
957 static int __sctp_connect(struct sock
* sk
,
958 struct sockaddr
*kaddrs
,
961 struct sctp_sock
*sp
;
962 struct sctp_endpoint
*ep
;
963 struct sctp_association
*asoc
= NULL
;
964 struct sctp_association
*asoc2
;
965 struct sctp_transport
*transport
;
973 union sctp_addr
*sa_addr
= NULL
;
976 unsigned int f_flags
= 0;
981 /* connect() cannot be done on a socket that is already in ESTABLISHED
982 * state - UDP-style peeled off socket or a TCP-style socket that
983 * is already connected.
984 * It cannot be done even on a TCP-style listening socket.
986 if (sctp_sstate(sk
, ESTABLISHED
) ||
987 (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))) {
992 /* Walk through the addrs buffer and count the number of addresses. */
994 while (walk_size
< addrs_size
) {
995 sa_addr
= (union sctp_addr
*)addr_buf
;
996 af
= sctp_get_af_specific(sa_addr
->sa
.sa_family
);
997 port
= ntohs(sa_addr
->v4
.sin_port
);
999 /* If the address family is not supported or if this address
1000 * causes the address buffer to overflow return EINVAL.
1002 if (!af
|| (walk_size
+ af
->sockaddr_len
) > addrs_size
) {
1007 /* Save current address so we can work with it */
1008 memcpy(&to
, sa_addr
, af
->sockaddr_len
);
1010 err
= sctp_verify_addr(sk
, &to
, af
->sockaddr_len
);
1014 /* Make sure the destination port is correctly set
1017 if (asoc
&& asoc
->peer
.port
&& asoc
->peer
.port
!= port
)
1021 /* Check if there already is a matching association on the
1022 * endpoint (other than the one created here).
1024 asoc2
= sctp_endpoint_lookup_assoc(ep
, &to
, &transport
);
1025 if (asoc2
&& asoc2
!= asoc
) {
1026 if (asoc2
->state
>= SCTP_STATE_ESTABLISHED
)
1033 /* If we could not find a matching association on the endpoint,
1034 * make sure that there is no peeled-off association matching
1035 * the peer address even on another socket.
1037 if (sctp_endpoint_is_peeled_off(ep
, &to
)) {
1038 err
= -EADDRNOTAVAIL
;
1043 /* If a bind() or sctp_bindx() is not called prior to
1044 * an sctp_connectx() call, the system picks an
1045 * ephemeral port and will choose an address set
1046 * equivalent to binding with a wildcard address.
1048 if (!ep
->base
.bind_addr
.port
) {
1049 if (sctp_autobind(sk
)) {
1055 * If an unprivileged user inherits a 1-many
1056 * style socket with open associations on a
1057 * privileged port, it MAY be permitted to
1058 * accept new associations, but it SHOULD NOT
1059 * be permitted to open new associations.
1061 if (ep
->base
.bind_addr
.port
< PROT_SOCK
&&
1062 !capable(CAP_NET_BIND_SERVICE
)) {
1068 scope
= sctp_scope(&to
);
1069 asoc
= sctp_association_new(ep
, sk
, scope
, GFP_KERNEL
);
1076 /* Prime the peer's transport structures. */
1077 transport
= sctp_assoc_add_peer(asoc
, &to
, GFP_KERNEL
,
1085 addr_buf
+= af
->sockaddr_len
;
1086 walk_size
+= af
->sockaddr_len
;
1089 err
= sctp_assoc_set_bind_addr_from_ep(asoc
, GFP_KERNEL
);
1094 err
= sctp_primitive_ASSOCIATE(asoc
, NULL
);
1099 /* Initialize sk's dport and daddr for getpeername() */
1100 inet_sk(sk
)->dport
= htons(asoc
->peer
.port
);
1101 af
= sctp_get_af_specific(sa_addr
->sa
.sa_family
);
1102 af
->to_sk_daddr(sa_addr
, sk
);
1105 /* in-kernel sockets don't generally have a file allocated to them
1106 * if all they do is call sock_create_kern().
1108 if (sk
->sk_socket
->file
)
1109 f_flags
= sk
->sk_socket
->file
->f_flags
;
1111 timeo
= sock_sndtimeo(sk
, f_flags
& O_NONBLOCK
);
1113 err
= sctp_wait_for_connect(asoc
, &timeo
);
1115 /* Don't free association on exit. */
1120 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
1121 " kaddrs: %p err: %d\n",
1124 sctp_association_free(asoc
);
1128 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1131 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt);
1133 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1134 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
1135 * or IPv6 addresses.
1137 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
1138 * Section 3.1.2 for this usage.
1140 * addrs is a pointer to an array of one or more socket addresses. Each
1141 * address is contained in its appropriate structure (i.e. struct
1142 * sockaddr_in or struct sockaddr_in6) the family of the address type
1143 * must be used to distengish the address length (note that this
1144 * representation is termed a "packed array" of addresses). The caller
1145 * specifies the number of addresses in the array with addrcnt.
1147 * On success, sctp_connectx() returns 0. On failure, sctp_connectx() returns
1148 * -1, and sets errno to the appropriate error code.
1150 * For SCTP, the port given in each socket address must be the same, or
1151 * sctp_connectx() will fail, setting errno to EINVAL.
1153 * An application can use sctp_connectx to initiate an association with
1154 * an endpoint that is multi-homed. Much like sctp_bindx() this call
1155 * allows a caller to specify multiple addresses at which a peer can be
1156 * reached. The way the SCTP stack uses the list of addresses to set up
1157 * the association is implementation dependant. This function only
1158 * specifies that the stack will try to make use of all the addresses in
1159 * the list when needed.
1161 * Note that the list of addresses passed in is only used for setting up
1162 * the association. It does not necessarily equal the set of addresses
1163 * the peer uses for the resulting association. If the caller wants to
1164 * find out the set of peer addresses, it must use sctp_getpaddrs() to
1165 * retrieve them after the association has been set up.
1167 * Basically do nothing but copying the addresses from user to kernel
1168 * land and invoking either sctp_connectx(). This is used for tunneling
1169 * the sctp_connectx() request through sctp_setsockopt() from userspace.
1171 * We don't use copy_from_user() for optimization: we first do the
1172 * sanity checks (buffer size -fast- and access check-healthy
1173 * pointer); if all of those succeed, then we can alloc the memory
1174 * (expensive operation) needed to copy the data to kernel. Then we do
1175 * the copying without checking the user space area
1176 * (__copy_from_user()).
1178 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
1181 * sk The sk of the socket
1182 * addrs The pointer to the addresses in user land
1183 * addrssize Size of the addrs buffer
1185 * Returns 0 if ok, <0 errno code on error.
1187 SCTP_STATIC
int sctp_setsockopt_connectx(struct sock
* sk
,
1188 struct sockaddr __user
*addrs
,
1192 struct sockaddr
*kaddrs
;
1194 SCTP_DEBUG_PRINTK("%s - sk %p addrs %p addrs_size %d\n",
1195 __func__
, sk
, addrs
, addrs_size
);
1197 if (unlikely(addrs_size
<= 0))
1200 /* Check the user passed a healthy pointer. */
1201 if (unlikely(!access_ok(VERIFY_READ
, addrs
, addrs_size
)))
1204 /* Alloc space for the address array in kernel memory. */
1205 kaddrs
= kmalloc(addrs_size
, GFP_KERNEL
);
1206 if (unlikely(!kaddrs
))
1209 if (__copy_from_user(kaddrs
, addrs
, addrs_size
)) {
1212 err
= __sctp_connect(sk
, kaddrs
, addrs_size
);
1219 /* API 3.1.4 close() - UDP Style Syntax
1220 * Applications use close() to perform graceful shutdown (as described in
1221 * Section 10.1 of [SCTP]) on ALL the associations currently represented
1222 * by a UDP-style socket.
1226 * ret = close(int sd);
1228 * sd - the socket descriptor of the associations to be closed.
1230 * To gracefully shutdown a specific association represented by the
1231 * UDP-style socket, an application should use the sendmsg() call,
1232 * passing no user data, but including the appropriate flag in the
1233 * ancillary data (see Section xxxx).
1235 * If sd in the close() call is a branched-off socket representing only
1236 * one association, the shutdown is performed on that association only.
1238 * 4.1.6 close() - TCP Style Syntax
1240 * Applications use close() to gracefully close down an association.
1244 * int close(int sd);
1246 * sd - the socket descriptor of the association to be closed.
1248 * After an application calls close() on a socket descriptor, no further
1249 * socket operations will succeed on that descriptor.
1251 * API 7.1.4 SO_LINGER
1253 * An application using the TCP-style socket can use this option to
1254 * perform the SCTP ABORT primitive. The linger option structure is:
1257 * int l_onoff; // option on/off
1258 * int l_linger; // linger time
1261 * To enable the option, set l_onoff to 1. If the l_linger value is set
1262 * to 0, calling close() is the same as the ABORT primitive. If the
1263 * value is set to a negative value, the setsockopt() call will return
1264 * an error. If the value is set to a positive value linger_time, the
1265 * close() can be blocked for at most linger_time ms. If the graceful
1266 * shutdown phase does not finish during this period, close() will
1267 * return but the graceful shutdown phase continues in the system.
1269 SCTP_STATIC
void sctp_close(struct sock
*sk
, long timeout
)
1271 struct sctp_endpoint
*ep
;
1272 struct sctp_association
*asoc
;
1273 struct list_head
*pos
, *temp
;
1275 SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk
, timeout
);
1278 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1280 ep
= sctp_sk(sk
)->ep
;
1282 /* Walk all associations on an endpoint. */
1283 list_for_each_safe(pos
, temp
, &ep
->asocs
) {
1284 asoc
= list_entry(pos
, struct sctp_association
, asocs
);
1286 if (sctp_style(sk
, TCP
)) {
1287 /* A closed association can still be in the list if
1288 * it belongs to a TCP-style listening socket that is
1289 * not yet accepted. If so, free it. If not, send an
1290 * ABORT or SHUTDOWN based on the linger options.
1292 if (sctp_state(asoc
, CLOSED
)) {
1293 sctp_unhash_established(asoc
);
1294 sctp_association_free(asoc
);
1299 if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
1300 struct sctp_chunk
*chunk
;
1302 chunk
= sctp_make_abort_user(asoc
, NULL
, 0);
1304 sctp_primitive_ABORT(asoc
, chunk
);
1306 sctp_primitive_SHUTDOWN(asoc
, NULL
);
1309 /* Clean up any skbs sitting on the receive queue. */
1310 sctp_queue_purge_ulpevents(&sk
->sk_receive_queue
);
1311 sctp_queue_purge_ulpevents(&sctp_sk(sk
)->pd_lobby
);
1313 /* On a TCP-style socket, block for at most linger_time if set. */
1314 if (sctp_style(sk
, TCP
) && timeout
)
1315 sctp_wait_for_close(sk
, timeout
);
1317 /* This will run the backlog queue. */
1318 sctp_release_sock(sk
);
1320 /* Supposedly, no process has access to the socket, but
1321 * the net layers still may.
1323 sctp_local_bh_disable();
1324 sctp_bh_lock_sock(sk
);
1326 /* Hold the sock, since sk_common_release() will put sock_put()
1327 * and we have just a little more cleanup.
1330 sk_common_release(sk
);
1332 sctp_bh_unlock_sock(sk
);
1333 sctp_local_bh_enable();
1337 SCTP_DBG_OBJCNT_DEC(sock
);
1340 /* Handle EPIPE error. */
1341 static int sctp_error(struct sock
*sk
, int flags
, int err
)
1344 err
= sock_error(sk
) ? : -EPIPE
;
1345 if (err
== -EPIPE
&& !(flags
& MSG_NOSIGNAL
))
1346 send_sig(SIGPIPE
, current
, 0);
1350 /* API 3.1.3 sendmsg() - UDP Style Syntax
1352 * An application uses sendmsg() and recvmsg() calls to transmit data to
1353 * and receive data from its peer.
1355 * ssize_t sendmsg(int socket, const struct msghdr *message,
1358 * socket - the socket descriptor of the endpoint.
1359 * message - pointer to the msghdr structure which contains a single
1360 * user message and possibly some ancillary data.
1362 * See Section 5 for complete description of the data
1365 * flags - flags sent or received with the user message, see Section
1366 * 5 for complete description of the flags.
1368 * Note: This function could use a rewrite especially when explicit
1369 * connect support comes in.
1371 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
1373 SCTP_STATIC
int sctp_msghdr_parse(const struct msghdr
*, sctp_cmsgs_t
*);
1375 SCTP_STATIC
int sctp_sendmsg(struct kiocb
*iocb
, struct sock
*sk
,
1376 struct msghdr
*msg
, size_t msg_len
)
1378 struct sctp_sock
*sp
;
1379 struct sctp_endpoint
*ep
;
1380 struct sctp_association
*new_asoc
=NULL
, *asoc
=NULL
;
1381 struct sctp_transport
*transport
, *chunk_tp
;
1382 struct sctp_chunk
*chunk
;
1384 struct sockaddr
*msg_name
= NULL
;
1385 struct sctp_sndrcvinfo default_sinfo
= { 0 };
1386 struct sctp_sndrcvinfo
*sinfo
;
1387 struct sctp_initmsg
*sinit
;
1388 sctp_assoc_t associd
= 0;
1389 sctp_cmsgs_t cmsgs
= { NULL
};
1393 __u16 sinfo_flags
= 0;
1394 struct sctp_datamsg
*datamsg
;
1395 int msg_flags
= msg
->msg_flags
;
1397 SCTP_DEBUG_PRINTK("sctp_sendmsg(sk: %p, msg: %p, msg_len: %zu)\n",
1404 SCTP_DEBUG_PRINTK("Using endpoint: %p.\n", ep
);
1406 /* We cannot send a message over a TCP-style listening socket. */
1407 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
)) {
1412 /* Parse out the SCTP CMSGs. */
1413 err
= sctp_msghdr_parse(msg
, &cmsgs
);
1416 SCTP_DEBUG_PRINTK("msghdr parse err = %x\n", err
);
1420 /* Fetch the destination address for this packet. This
1421 * address only selects the association--it is not necessarily
1422 * the address we will send to.
1423 * For a peeled-off socket, msg_name is ignored.
1425 if (!sctp_style(sk
, UDP_HIGH_BANDWIDTH
) && msg
->msg_name
) {
1426 int msg_namelen
= msg
->msg_namelen
;
1428 err
= sctp_verify_addr(sk
, (union sctp_addr
*)msg
->msg_name
,
1433 if (msg_namelen
> sizeof(to
))
1434 msg_namelen
= sizeof(to
);
1435 memcpy(&to
, msg
->msg_name
, msg_namelen
);
1436 msg_name
= msg
->msg_name
;
1442 /* Did the user specify SNDRCVINFO? */
1444 sinfo_flags
= sinfo
->sinfo_flags
;
1445 associd
= sinfo
->sinfo_assoc_id
;
1448 SCTP_DEBUG_PRINTK("msg_len: %zu, sinfo_flags: 0x%x\n",
1449 msg_len
, sinfo_flags
);
1451 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */
1452 if (sctp_style(sk
, TCP
) && (sinfo_flags
& (SCTP_EOF
| SCTP_ABORT
))) {
1457 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero
1458 * length messages when SCTP_EOF|SCTP_ABORT is not set.
1459 * If SCTP_ABORT is set, the message length could be non zero with
1460 * the msg_iov set to the user abort reason.
1462 if (((sinfo_flags
& SCTP_EOF
) && (msg_len
> 0)) ||
1463 (!(sinfo_flags
& (SCTP_EOF
|SCTP_ABORT
)) && (msg_len
== 0))) {
1468 /* If SCTP_ADDR_OVER is set, there must be an address
1469 * specified in msg_name.
1471 if ((sinfo_flags
& SCTP_ADDR_OVER
) && (!msg
->msg_name
)) {
1478 SCTP_DEBUG_PRINTK("About to look up association.\n");
1482 /* If a msg_name has been specified, assume this is to be used. */
1484 /* Look for a matching association on the endpoint. */
1485 asoc
= sctp_endpoint_lookup_assoc(ep
, &to
, &transport
);
1487 /* If we could not find a matching association on the
1488 * endpoint, make sure that it is not a TCP-style
1489 * socket that already has an association or there is
1490 * no peeled-off association on another socket.
1492 if ((sctp_style(sk
, TCP
) &&
1493 sctp_sstate(sk
, ESTABLISHED
)) ||
1494 sctp_endpoint_is_peeled_off(ep
, &to
)) {
1495 err
= -EADDRNOTAVAIL
;
1500 asoc
= sctp_id2assoc(sk
, associd
);
1508 SCTP_DEBUG_PRINTK("Just looked up association: %p.\n", asoc
);
1510 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED
1511 * socket that has an association in CLOSED state. This can
1512 * happen when an accepted socket has an association that is
1515 if (sctp_state(asoc
, CLOSED
) && sctp_style(sk
, TCP
)) {
1520 if (sinfo_flags
& SCTP_EOF
) {
1521 SCTP_DEBUG_PRINTK("Shutting down association: %p\n",
1523 sctp_primitive_SHUTDOWN(asoc
, NULL
);
1527 if (sinfo_flags
& SCTP_ABORT
) {
1529 chunk
= sctp_make_abort_user(asoc
, msg
, msg_len
);
1535 SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc
);
1536 sctp_primitive_ABORT(asoc
, chunk
);
1542 /* Do we need to create the association? */
1544 SCTP_DEBUG_PRINTK("There is no association yet.\n");
1546 if (sinfo_flags
& (SCTP_EOF
| SCTP_ABORT
)) {
1551 /* Check for invalid stream against the stream counts,
1552 * either the default or the user specified stream counts.
1555 if (!sinit
|| (sinit
&& !sinit
->sinit_num_ostreams
)) {
1556 /* Check against the defaults. */
1557 if (sinfo
->sinfo_stream
>=
1558 sp
->initmsg
.sinit_num_ostreams
) {
1563 /* Check against the requested. */
1564 if (sinfo
->sinfo_stream
>=
1565 sinit
->sinit_num_ostreams
) {
1573 * API 3.1.2 bind() - UDP Style Syntax
1574 * If a bind() or sctp_bindx() is not called prior to a
1575 * sendmsg() call that initiates a new association, the
1576 * system picks an ephemeral port and will choose an address
1577 * set equivalent to binding with a wildcard address.
1579 if (!ep
->base
.bind_addr
.port
) {
1580 if (sctp_autobind(sk
)) {
1586 * If an unprivileged user inherits a one-to-many
1587 * style socket with open associations on a privileged
1588 * port, it MAY be permitted to accept new associations,
1589 * but it SHOULD NOT be permitted to open new
1592 if (ep
->base
.bind_addr
.port
< PROT_SOCK
&&
1593 !capable(CAP_NET_BIND_SERVICE
)) {
1599 scope
= sctp_scope(&to
);
1600 new_asoc
= sctp_association_new(ep
, sk
, scope
, GFP_KERNEL
);
1607 /* If the SCTP_INIT ancillary data is specified, set all
1608 * the association init values accordingly.
1611 if (sinit
->sinit_num_ostreams
) {
1612 asoc
->c
.sinit_num_ostreams
=
1613 sinit
->sinit_num_ostreams
;
1615 if (sinit
->sinit_max_instreams
) {
1616 asoc
->c
.sinit_max_instreams
=
1617 sinit
->sinit_max_instreams
;
1619 if (sinit
->sinit_max_attempts
) {
1620 asoc
->max_init_attempts
1621 = sinit
->sinit_max_attempts
;
1623 if (sinit
->sinit_max_init_timeo
) {
1624 asoc
->max_init_timeo
=
1625 msecs_to_jiffies(sinit
->sinit_max_init_timeo
);
1629 /* Prime the peer's transport structures. */
1630 transport
= sctp_assoc_add_peer(asoc
, &to
, GFP_KERNEL
, SCTP_UNKNOWN
);
1635 err
= sctp_assoc_set_bind_addr_from_ep(asoc
, GFP_KERNEL
);
1642 /* ASSERT: we have a valid association at this point. */
1643 SCTP_DEBUG_PRINTK("We have a valid association.\n");
1646 /* If the user didn't specify SNDRCVINFO, make up one with
1649 default_sinfo
.sinfo_stream
= asoc
->default_stream
;
1650 default_sinfo
.sinfo_flags
= asoc
->default_flags
;
1651 default_sinfo
.sinfo_ppid
= asoc
->default_ppid
;
1652 default_sinfo
.sinfo_context
= asoc
->default_context
;
1653 default_sinfo
.sinfo_timetolive
= asoc
->default_timetolive
;
1654 default_sinfo
.sinfo_assoc_id
= sctp_assoc2id(asoc
);
1655 sinfo
= &default_sinfo
;
1658 /* API 7.1.7, the sndbuf size per association bounds the
1659 * maximum size of data that can be sent in a single send call.
1661 if (msg_len
> sk
->sk_sndbuf
) {
1666 if (asoc
->pmtu_pending
)
1667 sctp_assoc_pending_pmtu(asoc
);
1669 /* If fragmentation is disabled and the message length exceeds the
1670 * association fragmentation point, return EMSGSIZE. The I-D
1671 * does not specify what this error is, but this looks like
1674 if (sctp_sk(sk
)->disable_fragments
&& (msg_len
> asoc
->frag_point
)) {
1680 /* Check for invalid stream. */
1681 if (sinfo
->sinfo_stream
>= asoc
->c
.sinit_num_ostreams
) {
1687 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1688 if (!sctp_wspace(asoc
)) {
1689 err
= sctp_wait_for_sndbuf(asoc
, &timeo
, msg_len
);
1694 /* If an address is passed with the sendto/sendmsg call, it is used
1695 * to override the primary destination address in the TCP model, or
1696 * when SCTP_ADDR_OVER flag is set in the UDP model.
1698 if ((sctp_style(sk
, TCP
) && msg_name
) ||
1699 (sinfo_flags
& SCTP_ADDR_OVER
)) {
1700 chunk_tp
= sctp_assoc_lookup_paddr(asoc
, &to
);
1708 /* Auto-connect, if we aren't connected already. */
1709 if (sctp_state(asoc
, CLOSED
)) {
1710 err
= sctp_primitive_ASSOCIATE(asoc
, NULL
);
1713 SCTP_DEBUG_PRINTK("We associated primitively.\n");
1716 /* Break the message into multiple chunks of maximum size. */
1717 datamsg
= sctp_datamsg_from_user(asoc
, sinfo
, msg
, msg_len
);
1723 /* Now send the (possibly) fragmented message. */
1724 list_for_each_entry(chunk
, &datamsg
->chunks
, frag_list
) {
1725 sctp_chunk_hold(chunk
);
1727 /* Do accounting for the write space. */
1728 sctp_set_owner_w(chunk
);
1730 chunk
->transport
= chunk_tp
;
1732 /* Send it to the lower layers. Note: all chunks
1733 * must either fail or succeed. The lower layer
1734 * works that way today. Keep it that way or this
1737 err
= sctp_primitive_SEND(asoc
, chunk
);
1738 /* Did the lower layer accept the chunk? */
1740 sctp_chunk_free(chunk
);
1741 SCTP_DEBUG_PRINTK("We sent primitively.\n");
1744 sctp_datamsg_put(datamsg
);
1750 /* If we are already past ASSOCIATE, the lower
1751 * layers are responsible for association cleanup.
1757 sctp_association_free(asoc
);
1759 sctp_release_sock(sk
);
1762 return sctp_error(sk
, msg_flags
, err
);
1769 err
= sock_error(sk
);
1779 /* This is an extended version of skb_pull() that removes the data from the
1780 * start of a skb even when data is spread across the list of skb's in the
1781 * frag_list. len specifies the total amount of data that needs to be removed.
1782 * when 'len' bytes could be removed from the skb, it returns 0.
1783 * If 'len' exceeds the total skb length, it returns the no. of bytes that
1784 * could not be removed.
1786 static int sctp_skb_pull(struct sk_buff
*skb
, int len
)
1788 struct sk_buff
*list
;
1789 int skb_len
= skb_headlen(skb
);
1792 if (len
<= skb_len
) {
1793 __skb_pull(skb
, len
);
1797 __skb_pull(skb
, skb_len
);
1799 for (list
= skb_shinfo(skb
)->frag_list
; list
; list
= list
->next
) {
1800 rlen
= sctp_skb_pull(list
, len
);
1801 skb
->len
-= (len
-rlen
);
1802 skb
->data_len
-= (len
-rlen
);
1813 /* API 3.1.3 recvmsg() - UDP Style Syntax
1815 * ssize_t recvmsg(int socket, struct msghdr *message,
1818 * socket - the socket descriptor of the endpoint.
1819 * message - pointer to the msghdr structure which contains a single
1820 * user message and possibly some ancillary data.
1822 * See Section 5 for complete description of the data
1825 * flags - flags sent or received with the user message, see Section
1826 * 5 for complete description of the flags.
1828 static struct sk_buff
*sctp_skb_recv_datagram(struct sock
*, int, int, int *);
1830 SCTP_STATIC
int sctp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
,
1831 struct msghdr
*msg
, size_t len
, int noblock
,
1832 int flags
, int *addr_len
)
1834 struct sctp_ulpevent
*event
= NULL
;
1835 struct sctp_sock
*sp
= sctp_sk(sk
);
1836 struct sk_buff
*skb
;
1841 SCTP_DEBUG_PRINTK("sctp_recvmsg(%s: %p, %s: %p, %s: %zd, %s: %d, %s: "
1842 "0x%x, %s: %p)\n", "sk", sk
, "msghdr", msg
,
1843 "len", len
, "knoblauch", noblock
,
1844 "flags", flags
, "addr_len", addr_len
);
1848 if (sctp_style(sk
, TCP
) && !sctp_sstate(sk
, ESTABLISHED
)) {
1853 skb
= sctp_skb_recv_datagram(sk
, flags
, noblock
, &err
);
1857 /* Get the total length of the skb including any skb's in the
1866 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
1868 event
= sctp_skb2event(skb
);
1873 sock_recv_timestamp(msg
, sk
, skb
);
1874 if (sctp_ulpevent_is_notification(event
)) {
1875 msg
->msg_flags
|= MSG_NOTIFICATION
;
1876 sp
->pf
->event_msgname(event
, msg
->msg_name
, addr_len
);
1878 sp
->pf
->skb_msgname(skb
, msg
->msg_name
, addr_len
);
1881 /* Check if we allow SCTP_SNDRCVINFO. */
1882 if (sp
->subscribe
.sctp_data_io_event
)
1883 sctp_ulpevent_read_sndrcvinfo(event
, msg
);
1885 /* FIXME: we should be calling IP/IPv6 layers. */
1886 if (sk
->sk_protinfo
.af_inet
.cmsg_flags
)
1887 ip_cmsg_recv(msg
, skb
);
1892 /* If skb's length exceeds the user's buffer, update the skb and
1893 * push it back to the receive_queue so that the next call to
1894 * recvmsg() will return the remaining data. Don't set MSG_EOR.
1896 if (skb_len
> copied
) {
1897 msg
->msg_flags
&= ~MSG_EOR
;
1898 if (flags
& MSG_PEEK
)
1900 sctp_skb_pull(skb
, copied
);
1901 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1903 /* When only partial message is copied to the user, increase
1904 * rwnd by that amount. If all the data in the skb is read,
1905 * rwnd is updated when the event is freed.
1907 if (!sctp_ulpevent_is_notification(event
))
1908 sctp_assoc_rwnd_increase(event
->asoc
, copied
);
1910 } else if ((event
->msg_flags
& MSG_NOTIFICATION
) ||
1911 (event
->msg_flags
& MSG_EOR
))
1912 msg
->msg_flags
|= MSG_EOR
;
1914 msg
->msg_flags
&= ~MSG_EOR
;
1917 if (flags
& MSG_PEEK
) {
1918 /* Release the skb reference acquired after peeking the skb in
1919 * sctp_skb_recv_datagram().
1923 /* Free the event which includes releasing the reference to
1924 * the owner of the skb, freeing the skb and updating the
1927 sctp_ulpevent_free(event
);
1930 sctp_release_sock(sk
);
1934 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
1936 * This option is a on/off flag. If enabled no SCTP message
1937 * fragmentation will be performed. Instead if a message being sent
1938 * exceeds the current PMTU size, the message will NOT be sent and
1939 * instead a error will be indicated to the user.
1941 static int sctp_setsockopt_disable_fragments(struct sock
*sk
,
1942 char __user
*optval
, int optlen
)
1946 if (optlen
< sizeof(int))
1949 if (get_user(val
, (int __user
*)optval
))
1952 sctp_sk(sk
)->disable_fragments
= (val
== 0) ? 0 : 1;
1957 static int sctp_setsockopt_events(struct sock
*sk
, char __user
*optval
,
1960 if (optlen
> sizeof(struct sctp_event_subscribe
))
1962 if (copy_from_user(&sctp_sk(sk
)->subscribe
, optval
, optlen
))
1967 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
1969 * This socket option is applicable to the UDP-style socket only. When
1970 * set it will cause associations that are idle for more than the
1971 * specified number of seconds to automatically close. An association
1972 * being idle is defined an association that has NOT sent or received
1973 * user data. The special value of '0' indicates that no automatic
1974 * close of any associations should be performed. The option expects an
1975 * integer defining the number of seconds of idle time before an
1976 * association is closed.
1978 static int sctp_setsockopt_autoclose(struct sock
*sk
, char __user
*optval
,
1981 struct sctp_sock
*sp
= sctp_sk(sk
);
1983 /* Applicable to UDP-style socket only */
1984 if (sctp_style(sk
, TCP
))
1986 if (optlen
!= sizeof(int))
1988 if (copy_from_user(&sp
->autoclose
, optval
, optlen
))
1994 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
1996 * Applications can enable or disable heartbeats for any peer address of
1997 * an association, modify an address's heartbeat interval, force a
1998 * heartbeat to be sent immediately, and adjust the address's maximum
1999 * number of retransmissions sent before an address is considered
2000 * unreachable. The following structure is used to access and modify an
2001 * address's parameters:
2003 * struct sctp_paddrparams {
2004 * sctp_assoc_t spp_assoc_id;
2005 * struct sockaddr_storage spp_address;
2006 * uint32_t spp_hbinterval;
2007 * uint16_t spp_pathmaxrxt;
2008 * uint32_t spp_pathmtu;
2009 * uint32_t spp_sackdelay;
2010 * uint32_t spp_flags;
2013 * spp_assoc_id - (one-to-many style socket) This is filled in the
2014 * application, and identifies the association for
2016 * spp_address - This specifies which address is of interest.
2017 * spp_hbinterval - This contains the value of the heartbeat interval,
2018 * in milliseconds. If a value of zero
2019 * is present in this field then no changes are to
2020 * be made to this parameter.
2021 * spp_pathmaxrxt - This contains the maximum number of
2022 * retransmissions before this address shall be
2023 * considered unreachable. If a value of zero
2024 * is present in this field then no changes are to
2025 * be made to this parameter.
2026 * spp_pathmtu - When Path MTU discovery is disabled the value
2027 * specified here will be the "fixed" path mtu.
2028 * Note that if the spp_address field is empty
2029 * then all associations on this address will
2030 * have this fixed path mtu set upon them.
2032 * spp_sackdelay - When delayed sack is enabled, this value specifies
2033 * the number of milliseconds that sacks will be delayed
2034 * for. This value will apply to all addresses of an
2035 * association if the spp_address field is empty. Note
2036 * also, that if delayed sack is enabled and this
2037 * value is set to 0, no change is made to the last
2038 * recorded delayed sack timer value.
2040 * spp_flags - These flags are used to control various features
2041 * on an association. The flag field may contain
2042 * zero or more of the following options.
2044 * SPP_HB_ENABLE - Enable heartbeats on the
2045 * specified address. Note that if the address
2046 * field is empty all addresses for the association
2047 * have heartbeats enabled upon them.
2049 * SPP_HB_DISABLE - Disable heartbeats on the
2050 * speicifed address. Note that if the address
2051 * field is empty all addresses for the association
2052 * will have their heartbeats disabled. Note also
2053 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
2054 * mutually exclusive, only one of these two should
2055 * be specified. Enabling both fields will have
2056 * undetermined results.
2058 * SPP_HB_DEMAND - Request a user initiated heartbeat
2059 * to be made immediately.
2061 * SPP_HB_TIME_IS_ZERO - Specify's that the time for
2062 * heartbeat delayis to be set to the value of 0
2065 * SPP_PMTUD_ENABLE - This field will enable PMTU
2066 * discovery upon the specified address. Note that
2067 * if the address feild is empty then all addresses
2068 * on the association are effected.
2070 * SPP_PMTUD_DISABLE - This field will disable PMTU
2071 * discovery upon the specified address. Note that
2072 * if the address feild is empty then all addresses
2073 * on the association are effected. Not also that
2074 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
2075 * exclusive. Enabling both will have undetermined
2078 * SPP_SACKDELAY_ENABLE - Setting this flag turns
2079 * on delayed sack. The time specified in spp_sackdelay
2080 * is used to specify the sack delay for this address. Note
2081 * that if spp_address is empty then all addresses will
2082 * enable delayed sack and take on the sack delay
2083 * value specified in spp_sackdelay.
2084 * SPP_SACKDELAY_DISABLE - Setting this flag turns
2085 * off delayed sack. If the spp_address field is blank then
2086 * delayed sack is disabled for the entire association. Note
2087 * also that this field is mutually exclusive to
2088 * SPP_SACKDELAY_ENABLE, setting both will have undefined
2091 static int sctp_apply_peer_addr_params(struct sctp_paddrparams
*params
,
2092 struct sctp_transport
*trans
,
2093 struct sctp_association
*asoc
,
2094 struct sctp_sock
*sp
,
2097 int sackdelay_change
)
2101 if (params
->spp_flags
& SPP_HB_DEMAND
&& trans
) {
2102 error
= sctp_primitive_REQUESTHEARTBEAT (trans
->asoc
, trans
);
2107 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
2108 * this field is ignored. Note also that a value of zero indicates
2109 * the current setting should be left unchanged.
2111 if (params
->spp_flags
& SPP_HB_ENABLE
) {
2113 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
2114 * set. This lets us use 0 value when this flag
2117 if (params
->spp_flags
& SPP_HB_TIME_IS_ZERO
)
2118 params
->spp_hbinterval
= 0;
2120 if (params
->spp_hbinterval
||
2121 (params
->spp_flags
& SPP_HB_TIME_IS_ZERO
)) {
2124 msecs_to_jiffies(params
->spp_hbinterval
);
2127 msecs_to_jiffies(params
->spp_hbinterval
);
2129 sp
->hbinterval
= params
->spp_hbinterval
;
2136 trans
->param_flags
=
2137 (trans
->param_flags
& ~SPP_HB
) | hb_change
;
2140 (asoc
->param_flags
& ~SPP_HB
) | hb_change
;
2143 (sp
->param_flags
& ~SPP_HB
) | hb_change
;
2147 /* When Path MTU discovery is disabled the value specified here will
2148 * be the "fixed" path mtu (i.e. the value of the spp_flags field must
2149 * include the flag SPP_PMTUD_DISABLE for this field to have any
2152 if ((params
->spp_flags
& SPP_PMTUD_DISABLE
) && params
->spp_pathmtu
) {
2154 trans
->pathmtu
= params
->spp_pathmtu
;
2155 sctp_assoc_sync_pmtu(asoc
);
2157 asoc
->pathmtu
= params
->spp_pathmtu
;
2158 sctp_frag_point(sp
, params
->spp_pathmtu
);
2160 sp
->pathmtu
= params
->spp_pathmtu
;
2166 int update
= (trans
->param_flags
& SPP_PMTUD_DISABLE
) &&
2167 (params
->spp_flags
& SPP_PMTUD_ENABLE
);
2168 trans
->param_flags
=
2169 (trans
->param_flags
& ~SPP_PMTUD
) | pmtud_change
;
2171 sctp_transport_pmtu(trans
);
2172 sctp_assoc_sync_pmtu(asoc
);
2176 (asoc
->param_flags
& ~SPP_PMTUD
) | pmtud_change
;
2179 (sp
->param_flags
& ~SPP_PMTUD
) | pmtud_change
;
2183 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
2184 * value of this field is ignored. Note also that a value of zero
2185 * indicates the current setting should be left unchanged.
2187 if ((params
->spp_flags
& SPP_SACKDELAY_ENABLE
) && params
->spp_sackdelay
) {
2190 msecs_to_jiffies(params
->spp_sackdelay
);
2193 msecs_to_jiffies(params
->spp_sackdelay
);
2195 sp
->sackdelay
= params
->spp_sackdelay
;
2199 if (sackdelay_change
) {
2201 trans
->param_flags
=
2202 (trans
->param_flags
& ~SPP_SACKDELAY
) |
2206 (asoc
->param_flags
& ~SPP_SACKDELAY
) |
2210 (sp
->param_flags
& ~SPP_SACKDELAY
) |
2215 /* Note that unless the spp_flag is set to SPP_PMTUD_ENABLE the value
2216 * of this field is ignored. Note also that a value of zero
2217 * indicates the current setting should be left unchanged.
2219 if ((params
->spp_flags
& SPP_PMTUD_ENABLE
) && params
->spp_pathmaxrxt
) {
2221 trans
->pathmaxrxt
= params
->spp_pathmaxrxt
;
2223 asoc
->pathmaxrxt
= params
->spp_pathmaxrxt
;
2225 sp
->pathmaxrxt
= params
->spp_pathmaxrxt
;
2232 static int sctp_setsockopt_peer_addr_params(struct sock
*sk
,
2233 char __user
*optval
, int optlen
)
2235 struct sctp_paddrparams params
;
2236 struct sctp_transport
*trans
= NULL
;
2237 struct sctp_association
*asoc
= NULL
;
2238 struct sctp_sock
*sp
= sctp_sk(sk
);
2240 int hb_change
, pmtud_change
, sackdelay_change
;
2242 if (optlen
!= sizeof(struct sctp_paddrparams
))
2245 if (copy_from_user(¶ms
, optval
, optlen
))
2248 /* Validate flags and value parameters. */
2249 hb_change
= params
.spp_flags
& SPP_HB
;
2250 pmtud_change
= params
.spp_flags
& SPP_PMTUD
;
2251 sackdelay_change
= params
.spp_flags
& SPP_SACKDELAY
;
2253 if (hb_change
== SPP_HB
||
2254 pmtud_change
== SPP_PMTUD
||
2255 sackdelay_change
== SPP_SACKDELAY
||
2256 params
.spp_sackdelay
> 500 ||
2258 && params
.spp_pathmtu
< SCTP_DEFAULT_MINSEGMENT
))
2261 /* If an address other than INADDR_ANY is specified, and
2262 * no transport is found, then the request is invalid.
2264 if (!sctp_is_any(( union sctp_addr
*)¶ms
.spp_address
)) {
2265 trans
= sctp_addr_id2transport(sk
, ¶ms
.spp_address
,
2266 params
.spp_assoc_id
);
2271 /* Get association, if assoc_id != 0 and the socket is a one
2272 * to many style socket, and an association was not found, then
2273 * the id was invalid.
2275 asoc
= sctp_id2assoc(sk
, params
.spp_assoc_id
);
2276 if (!asoc
&& params
.spp_assoc_id
&& sctp_style(sk
, UDP
))
2279 /* Heartbeat demand can only be sent on a transport or
2280 * association, but not a socket.
2282 if (params
.spp_flags
& SPP_HB_DEMAND
&& !trans
&& !asoc
)
2285 /* Process parameters. */
2286 error
= sctp_apply_peer_addr_params(¶ms
, trans
, asoc
, sp
,
2287 hb_change
, pmtud_change
,
2293 /* If changes are for association, also apply parameters to each
2296 if (!trans
&& asoc
) {
2297 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
2299 sctp_apply_peer_addr_params(¶ms
, trans
, asoc
, sp
,
2300 hb_change
, pmtud_change
,
2308 /* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME)
2310 * This options will get or set the delayed ack timer. The time is set
2311 * in milliseconds. If the assoc_id is 0, then this sets or gets the
2312 * endpoints default delayed ack timer value. If the assoc_id field is
2313 * non-zero, then the set or get effects the specified association.
2315 * struct sctp_assoc_value {
2316 * sctp_assoc_t assoc_id;
2317 * uint32_t assoc_value;
2320 * assoc_id - This parameter, indicates which association the
2321 * user is preforming an action upon. Note that if
2322 * this field's value is zero then the endpoints
2323 * default value is changed (effecting future
2324 * associations only).
2326 * assoc_value - This parameter contains the number of milliseconds
2327 * that the user is requesting the delayed ACK timer
2328 * be set to. Note that this value is defined in
2329 * the standard to be between 200 and 500 milliseconds.
2331 * Note: a value of zero will leave the value alone,
2332 * but disable SACK delay. A non-zero value will also
2333 * enable SACK delay.
2336 static int sctp_setsockopt_delayed_ack_time(struct sock
*sk
,
2337 char __user
*optval
, int optlen
)
2339 struct sctp_assoc_value params
;
2340 struct sctp_transport
*trans
= NULL
;
2341 struct sctp_association
*asoc
= NULL
;
2342 struct sctp_sock
*sp
= sctp_sk(sk
);
2344 if (optlen
!= sizeof(struct sctp_assoc_value
))
2347 if (copy_from_user(¶ms
, optval
, optlen
))
2350 /* Validate value parameter. */
2351 if (params
.assoc_value
> 500)
2354 /* Get association, if assoc_id != 0 and the socket is a one
2355 * to many style socket, and an association was not found, then
2356 * the id was invalid.
2358 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
2359 if (!asoc
&& params
.assoc_id
&& sctp_style(sk
, UDP
))
2362 if (params
.assoc_value
) {
2365 msecs_to_jiffies(params
.assoc_value
);
2367 (asoc
->param_flags
& ~SPP_SACKDELAY
) |
2368 SPP_SACKDELAY_ENABLE
;
2370 sp
->sackdelay
= params
.assoc_value
;
2372 (sp
->param_flags
& ~SPP_SACKDELAY
) |
2373 SPP_SACKDELAY_ENABLE
;
2378 (asoc
->param_flags
& ~SPP_SACKDELAY
) |
2379 SPP_SACKDELAY_DISABLE
;
2382 (sp
->param_flags
& ~SPP_SACKDELAY
) |
2383 SPP_SACKDELAY_DISABLE
;
2387 /* If change is for association, also apply to each transport. */
2389 list_for_each_entry(trans
, &asoc
->peer
.transport_addr_list
,
2391 if (params
.assoc_value
) {
2393 msecs_to_jiffies(params
.assoc_value
);
2394 trans
->param_flags
=
2395 (trans
->param_flags
& ~SPP_SACKDELAY
) |
2396 SPP_SACKDELAY_ENABLE
;
2398 trans
->param_flags
=
2399 (trans
->param_flags
& ~SPP_SACKDELAY
) |
2400 SPP_SACKDELAY_DISABLE
;
2408 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
2410 * Applications can specify protocol parameters for the default association
2411 * initialization. The option name argument to setsockopt() and getsockopt()
2414 * Setting initialization parameters is effective only on an unconnected
2415 * socket (for UDP-style sockets only future associations are effected
2416 * by the change). With TCP-style sockets, this option is inherited by
2417 * sockets derived from a listener socket.
2419 static int sctp_setsockopt_initmsg(struct sock
*sk
, char __user
*optval
, int optlen
)
2421 struct sctp_initmsg sinit
;
2422 struct sctp_sock
*sp
= sctp_sk(sk
);
2424 if (optlen
!= sizeof(struct sctp_initmsg
))
2426 if (copy_from_user(&sinit
, optval
, optlen
))
2429 if (sinit
.sinit_num_ostreams
)
2430 sp
->initmsg
.sinit_num_ostreams
= sinit
.sinit_num_ostreams
;
2431 if (sinit
.sinit_max_instreams
)
2432 sp
->initmsg
.sinit_max_instreams
= sinit
.sinit_max_instreams
;
2433 if (sinit
.sinit_max_attempts
)
2434 sp
->initmsg
.sinit_max_attempts
= sinit
.sinit_max_attempts
;
2435 if (sinit
.sinit_max_init_timeo
)
2436 sp
->initmsg
.sinit_max_init_timeo
= sinit
.sinit_max_init_timeo
;
2442 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
2444 * Applications that wish to use the sendto() system call may wish to
2445 * specify a default set of parameters that would normally be supplied
2446 * through the inclusion of ancillary data. This socket option allows
2447 * such an application to set the default sctp_sndrcvinfo structure.
2448 * The application that wishes to use this socket option simply passes
2449 * in to this call the sctp_sndrcvinfo structure defined in Section
2450 * 5.2.2) The input parameters accepted by this call include
2451 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
2452 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
2453 * to this call if the caller is using the UDP model.
2455 static int sctp_setsockopt_default_send_param(struct sock
*sk
,
2456 char __user
*optval
, int optlen
)
2458 struct sctp_sndrcvinfo info
;
2459 struct sctp_association
*asoc
;
2460 struct sctp_sock
*sp
= sctp_sk(sk
);
2462 if (optlen
!= sizeof(struct sctp_sndrcvinfo
))
2464 if (copy_from_user(&info
, optval
, optlen
))
2467 asoc
= sctp_id2assoc(sk
, info
.sinfo_assoc_id
);
2468 if (!asoc
&& info
.sinfo_assoc_id
&& sctp_style(sk
, UDP
))
2472 asoc
->default_stream
= info
.sinfo_stream
;
2473 asoc
->default_flags
= info
.sinfo_flags
;
2474 asoc
->default_ppid
= info
.sinfo_ppid
;
2475 asoc
->default_context
= info
.sinfo_context
;
2476 asoc
->default_timetolive
= info
.sinfo_timetolive
;
2478 sp
->default_stream
= info
.sinfo_stream
;
2479 sp
->default_flags
= info
.sinfo_flags
;
2480 sp
->default_ppid
= info
.sinfo_ppid
;
2481 sp
->default_context
= info
.sinfo_context
;
2482 sp
->default_timetolive
= info
.sinfo_timetolive
;
2488 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
2490 * Requests that the local SCTP stack use the enclosed peer address as
2491 * the association primary. The enclosed address must be one of the
2492 * association peer's addresses.
2494 static int sctp_setsockopt_primary_addr(struct sock
*sk
, char __user
*optval
,
2497 struct sctp_prim prim
;
2498 struct sctp_transport
*trans
;
2500 if (optlen
!= sizeof(struct sctp_prim
))
2503 if (copy_from_user(&prim
, optval
, sizeof(struct sctp_prim
)))
2506 trans
= sctp_addr_id2transport(sk
, &prim
.ssp_addr
, prim
.ssp_assoc_id
);
2510 sctp_assoc_set_primary(trans
->asoc
, trans
);
2516 * 7.1.5 SCTP_NODELAY
2518 * Turn on/off any Nagle-like algorithm. This means that packets are
2519 * generally sent as soon as possible and no unnecessary delays are
2520 * introduced, at the cost of more packets in the network. Expects an
2521 * integer boolean flag.
2523 static int sctp_setsockopt_nodelay(struct sock
*sk
, char __user
*optval
,
2528 if (optlen
< sizeof(int))
2530 if (get_user(val
, (int __user
*)optval
))
2533 sctp_sk(sk
)->nodelay
= (val
== 0) ? 0 : 1;
2539 * 7.1.1 SCTP_RTOINFO
2541 * The protocol parameters used to initialize and bound retransmission
2542 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
2543 * and modify these parameters.
2544 * All parameters are time values, in milliseconds. A value of 0, when
2545 * modifying the parameters, indicates that the current value should not
2549 static int sctp_setsockopt_rtoinfo(struct sock
*sk
, char __user
*optval
, int optlen
) {
2550 struct sctp_rtoinfo rtoinfo
;
2551 struct sctp_association
*asoc
;
2553 if (optlen
!= sizeof (struct sctp_rtoinfo
))
2556 if (copy_from_user(&rtoinfo
, optval
, optlen
))
2559 asoc
= sctp_id2assoc(sk
, rtoinfo
.srto_assoc_id
);
2561 /* Set the values to the specific association */
2562 if (!asoc
&& rtoinfo
.srto_assoc_id
&& sctp_style(sk
, UDP
))
2566 if (rtoinfo
.srto_initial
!= 0)
2568 msecs_to_jiffies(rtoinfo
.srto_initial
);
2569 if (rtoinfo
.srto_max
!= 0)
2570 asoc
->rto_max
= msecs_to_jiffies(rtoinfo
.srto_max
);
2571 if (rtoinfo
.srto_min
!= 0)
2572 asoc
->rto_min
= msecs_to_jiffies(rtoinfo
.srto_min
);
2574 /* If there is no association or the association-id = 0
2575 * set the values to the endpoint.
2577 struct sctp_sock
*sp
= sctp_sk(sk
);
2579 if (rtoinfo
.srto_initial
!= 0)
2580 sp
->rtoinfo
.srto_initial
= rtoinfo
.srto_initial
;
2581 if (rtoinfo
.srto_max
!= 0)
2582 sp
->rtoinfo
.srto_max
= rtoinfo
.srto_max
;
2583 if (rtoinfo
.srto_min
!= 0)
2584 sp
->rtoinfo
.srto_min
= rtoinfo
.srto_min
;
2592 * 7.1.2 SCTP_ASSOCINFO
2594 * This option is used to tune the maximum retransmission attempts
2595 * of the association.
2596 * Returns an error if the new association retransmission value is
2597 * greater than the sum of the retransmission value of the peer.
2598 * See [SCTP] for more information.
2601 static int sctp_setsockopt_associnfo(struct sock
*sk
, char __user
*optval
, int optlen
)
2604 struct sctp_assocparams assocparams
;
2605 struct sctp_association
*asoc
;
2607 if (optlen
!= sizeof(struct sctp_assocparams
))
2609 if (copy_from_user(&assocparams
, optval
, optlen
))
2612 asoc
= sctp_id2assoc(sk
, assocparams
.sasoc_assoc_id
);
2614 if (!asoc
&& assocparams
.sasoc_assoc_id
&& sctp_style(sk
, UDP
))
2617 /* Set the values to the specific association */
2619 if (assocparams
.sasoc_asocmaxrxt
!= 0) {
2622 struct sctp_transport
*peer_addr
;
2624 list_for_each_entry(peer_addr
, &asoc
->peer
.transport_addr_list
,
2626 path_sum
+= peer_addr
->pathmaxrxt
;
2630 /* Only validate asocmaxrxt if we have more then
2631 * one path/transport. We do this because path
2632 * retransmissions are only counted when we have more
2636 assocparams
.sasoc_asocmaxrxt
> path_sum
)
2639 asoc
->max_retrans
= assocparams
.sasoc_asocmaxrxt
;
2642 if (assocparams
.sasoc_cookie_life
!= 0) {
2643 asoc
->cookie_life
.tv_sec
=
2644 assocparams
.sasoc_cookie_life
/ 1000;
2645 asoc
->cookie_life
.tv_usec
=
2646 (assocparams
.sasoc_cookie_life
% 1000)
2650 /* Set the values to the endpoint */
2651 struct sctp_sock
*sp
= sctp_sk(sk
);
2653 if (assocparams
.sasoc_asocmaxrxt
!= 0)
2654 sp
->assocparams
.sasoc_asocmaxrxt
=
2655 assocparams
.sasoc_asocmaxrxt
;
2656 if (assocparams
.sasoc_cookie_life
!= 0)
2657 sp
->assocparams
.sasoc_cookie_life
=
2658 assocparams
.sasoc_cookie_life
;
2664 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
2666 * This socket option is a boolean flag which turns on or off mapped V4
2667 * addresses. If this option is turned on and the socket is type
2668 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
2669 * If this option is turned off, then no mapping will be done of V4
2670 * addresses and a user will receive both PF_INET6 and PF_INET type
2671 * addresses on the socket.
2673 static int sctp_setsockopt_mappedv4(struct sock
*sk
, char __user
*optval
, int optlen
)
2676 struct sctp_sock
*sp
= sctp_sk(sk
);
2678 if (optlen
< sizeof(int))
2680 if (get_user(val
, (int __user
*)optval
))
2691 * 7.1.17 Set the maximum fragrmentation size (SCTP_MAXSEG)
2693 * This socket option specifies the maximum size to put in any outgoing
2694 * SCTP chunk. If a message is larger than this size it will be
2695 * fragmented by SCTP into the specified size. Note that the underlying
2696 * SCTP implementation may fragment into smaller sized chunks when the
2697 * PMTU of the underlying association is smaller than the value set by
2700 static int sctp_setsockopt_maxseg(struct sock
*sk
, char __user
*optval
, int optlen
)
2702 struct sctp_association
*asoc
;
2703 struct sctp_sock
*sp
= sctp_sk(sk
);
2706 if (optlen
< sizeof(int))
2708 if (get_user(val
, (int __user
*)optval
))
2710 if ((val
!= 0) && ((val
< 8) || (val
> SCTP_MAX_CHUNK_LEN
)))
2712 sp
->user_frag
= val
;
2714 /* Update the frag_point of the existing associations. */
2715 list_for_each_entry(asoc
, &(sp
->ep
->asocs
), asocs
) {
2716 asoc
->frag_point
= sctp_frag_point(sp
, asoc
->pathmtu
);
2724 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
2726 * Requests that the peer mark the enclosed address as the association
2727 * primary. The enclosed address must be one of the association's
2728 * locally bound addresses. The following structure is used to make a
2729 * set primary request:
2731 static int sctp_setsockopt_peer_primary_addr(struct sock
*sk
, char __user
*optval
,
2734 struct sctp_sock
*sp
;
2735 struct sctp_endpoint
*ep
;
2736 struct sctp_association
*asoc
= NULL
;
2737 struct sctp_setpeerprim prim
;
2738 struct sctp_chunk
*chunk
;
2744 if (!sctp_addip_enable
)
2747 if (optlen
!= sizeof(struct sctp_setpeerprim
))
2750 if (copy_from_user(&prim
, optval
, optlen
))
2753 asoc
= sctp_id2assoc(sk
, prim
.sspp_assoc_id
);
2757 if (!asoc
->peer
.asconf_capable
)
2760 if (asoc
->peer
.addip_disabled_mask
& SCTP_PARAM_SET_PRIMARY
)
2763 if (!sctp_state(asoc
, ESTABLISHED
))
2766 if (!sctp_assoc_lookup_laddr(asoc
, (union sctp_addr
*)&prim
.sspp_addr
))
2767 return -EADDRNOTAVAIL
;
2769 /* Create an ASCONF chunk with SET_PRIMARY parameter */
2770 chunk
= sctp_make_asconf_set_prim(asoc
,
2771 (union sctp_addr
*)&prim
.sspp_addr
);
2775 err
= sctp_send_asconf(asoc
, chunk
);
2777 SCTP_DEBUG_PRINTK("We set peer primary addr primitively.\n");
2782 static int sctp_setsockopt_adaptation_layer(struct sock
*sk
, char __user
*optval
,
2785 struct sctp_setadaptation adaptation
;
2787 if (optlen
!= sizeof(struct sctp_setadaptation
))
2789 if (copy_from_user(&adaptation
, optval
, optlen
))
2792 sctp_sk(sk
)->adaptation_ind
= adaptation
.ssb_adaptation_ind
;
2798 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
2800 * The context field in the sctp_sndrcvinfo structure is normally only
2801 * used when a failed message is retrieved holding the value that was
2802 * sent down on the actual send call. This option allows the setting of
2803 * a default context on an association basis that will be received on
2804 * reading messages from the peer. This is especially helpful in the
2805 * one-2-many model for an application to keep some reference to an
2806 * internal state machine that is processing messages on the
2807 * association. Note that the setting of this value only effects
2808 * received messages from the peer and does not effect the value that is
2809 * saved with outbound messages.
2811 static int sctp_setsockopt_context(struct sock
*sk
, char __user
*optval
,
2814 struct sctp_assoc_value params
;
2815 struct sctp_sock
*sp
;
2816 struct sctp_association
*asoc
;
2818 if (optlen
!= sizeof(struct sctp_assoc_value
))
2820 if (copy_from_user(¶ms
, optval
, optlen
))
2825 if (params
.assoc_id
!= 0) {
2826 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
2829 asoc
->default_rcv_context
= params
.assoc_value
;
2831 sp
->default_rcv_context
= params
.assoc_value
;
2838 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
2840 * This options will at a minimum specify if the implementation is doing
2841 * fragmented interleave. Fragmented interleave, for a one to many
2842 * socket, is when subsequent calls to receive a message may return
2843 * parts of messages from different associations. Some implementations
2844 * may allow you to turn this value on or off. If so, when turned off,
2845 * no fragment interleave will occur (which will cause a head of line
2846 * blocking amongst multiple associations sharing the same one to many
2847 * socket). When this option is turned on, then each receive call may
2848 * come from a different association (thus the user must receive data
2849 * with the extended calls (e.g. sctp_recvmsg) to keep track of which
2850 * association each receive belongs to.
2852 * This option takes a boolean value. A non-zero value indicates that
2853 * fragmented interleave is on. A value of zero indicates that
2854 * fragmented interleave is off.
2856 * Note that it is important that an implementation that allows this
2857 * option to be turned on, have it off by default. Otherwise an unaware
2858 * application using the one to many model may become confused and act
2861 static int sctp_setsockopt_fragment_interleave(struct sock
*sk
,
2862 char __user
*optval
,
2867 if (optlen
!= sizeof(int))
2869 if (get_user(val
, (int __user
*)optval
))
2872 sctp_sk(sk
)->frag_interleave
= (val
== 0) ? 0 : 1;
2878 * 7.1.25. Set or Get the sctp partial delivery point
2879 * (SCTP_PARTIAL_DELIVERY_POINT)
2880 * This option will set or get the SCTP partial delivery point. This
2881 * point is the size of a message where the partial delivery API will be
2882 * invoked to help free up rwnd space for the peer. Setting this to a
2883 * lower value will cause partial delivery's to happen more often. The
2884 * calls argument is an integer that sets or gets the partial delivery
2887 static int sctp_setsockopt_partial_delivery_point(struct sock
*sk
,
2888 char __user
*optval
,
2893 if (optlen
!= sizeof(u32
))
2895 if (get_user(val
, (int __user
*)optval
))
2898 sctp_sk(sk
)->pd_point
= val
;
2900 return 0; /* is this the right error code? */
2904 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
2906 * This option will allow a user to change the maximum burst of packets
2907 * that can be emitted by this association. Note that the default value
2908 * is 4, and some implementations may restrict this setting so that it
2909 * can only be lowered.
2911 * NOTE: This text doesn't seem right. Do this on a socket basis with
2912 * future associations inheriting the socket value.
2914 static int sctp_setsockopt_maxburst(struct sock
*sk
,
2915 char __user
*optval
,
2918 struct sctp_assoc_value params
;
2919 struct sctp_sock
*sp
;
2920 struct sctp_association
*asoc
;
2924 if (optlen
< sizeof(int))
2927 if (optlen
== sizeof(int)) {
2929 "SCTP: Use of int in max_burst socket option deprecated\n");
2931 "SCTP: Use struct sctp_assoc_value instead\n");
2932 if (copy_from_user(&val
, optval
, optlen
))
2934 } else if (optlen
== sizeof(struct sctp_assoc_value
)) {
2935 if (copy_from_user(¶ms
, optval
, optlen
))
2937 val
= params
.assoc_value
;
2938 assoc_id
= params
.assoc_id
;
2944 if (assoc_id
!= 0) {
2945 asoc
= sctp_id2assoc(sk
, assoc_id
);
2948 asoc
->max_burst
= val
;
2950 sp
->max_burst
= val
;
2956 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
2958 * This set option adds a chunk type that the user is requesting to be
2959 * received only in an authenticated way. Changes to the list of chunks
2960 * will only effect future associations on the socket.
2962 static int sctp_setsockopt_auth_chunk(struct sock
*sk
,
2963 char __user
*optval
,
2966 struct sctp_authchunk val
;
2968 if (!sctp_auth_enable
)
2971 if (optlen
!= sizeof(struct sctp_authchunk
))
2973 if (copy_from_user(&val
, optval
, optlen
))
2976 switch (val
.sauth_chunk
) {
2978 case SCTP_CID_INIT_ACK
:
2979 case SCTP_CID_SHUTDOWN_COMPLETE
:
2984 /* add this chunk id to the endpoint */
2985 return sctp_auth_ep_add_chunkid(sctp_sk(sk
)->ep
, val
.sauth_chunk
);
2989 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
2991 * This option gets or sets the list of HMAC algorithms that the local
2992 * endpoint requires the peer to use.
2994 static int sctp_setsockopt_hmac_ident(struct sock
*sk
,
2995 char __user
*optval
,
2998 struct sctp_hmacalgo
*hmacs
;
3002 if (!sctp_auth_enable
)
3005 if (optlen
< sizeof(struct sctp_hmacalgo
))
3008 hmacs
= kmalloc(optlen
, GFP_KERNEL
);
3012 if (copy_from_user(hmacs
, optval
, optlen
)) {
3017 idents
= hmacs
->shmac_num_idents
;
3018 if (idents
== 0 || idents
> SCTP_AUTH_NUM_HMACS
||
3019 (idents
* sizeof(u16
)) > (optlen
- sizeof(struct sctp_hmacalgo
))) {
3024 err
= sctp_auth_ep_set_hmacs(sctp_sk(sk
)->ep
, hmacs
);
3031 * 7.1.20. Set a shared key (SCTP_AUTH_KEY)
3033 * This option will set a shared secret key which is used to build an
3034 * association shared key.
3036 static int sctp_setsockopt_auth_key(struct sock
*sk
,
3037 char __user
*optval
,
3040 struct sctp_authkey
*authkey
;
3041 struct sctp_association
*asoc
;
3044 if (!sctp_auth_enable
)
3047 if (optlen
<= sizeof(struct sctp_authkey
))
3050 authkey
= kmalloc(optlen
, GFP_KERNEL
);
3054 if (copy_from_user(authkey
, optval
, optlen
)) {
3059 if (authkey
->sca_keylength
> optlen
- sizeof(struct sctp_authkey
)) {
3064 asoc
= sctp_id2assoc(sk
, authkey
->sca_assoc_id
);
3065 if (!asoc
&& authkey
->sca_assoc_id
&& sctp_style(sk
, UDP
)) {
3070 ret
= sctp_auth_set_key(sctp_sk(sk
)->ep
, asoc
, authkey
);
3077 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
3079 * This option will get or set the active shared key to be used to build
3080 * the association shared key.
3082 static int sctp_setsockopt_active_key(struct sock
*sk
,
3083 char __user
*optval
,
3086 struct sctp_authkeyid val
;
3087 struct sctp_association
*asoc
;
3089 if (!sctp_auth_enable
)
3092 if (optlen
!= sizeof(struct sctp_authkeyid
))
3094 if (copy_from_user(&val
, optval
, optlen
))
3097 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
3098 if (!asoc
&& val
.scact_assoc_id
&& sctp_style(sk
, UDP
))
3101 return sctp_auth_set_active_key(sctp_sk(sk
)->ep
, asoc
,
3102 val
.scact_keynumber
);
3106 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
3108 * This set option will delete a shared secret key from use.
3110 static int sctp_setsockopt_del_key(struct sock
*sk
,
3111 char __user
*optval
,
3114 struct sctp_authkeyid val
;
3115 struct sctp_association
*asoc
;
3117 if (!sctp_auth_enable
)
3120 if (optlen
!= sizeof(struct sctp_authkeyid
))
3122 if (copy_from_user(&val
, optval
, optlen
))
3125 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
3126 if (!asoc
&& val
.scact_assoc_id
&& sctp_style(sk
, UDP
))
3129 return sctp_auth_del_key_id(sctp_sk(sk
)->ep
, asoc
,
3130 val
.scact_keynumber
);
3135 /* API 6.2 setsockopt(), getsockopt()
3137 * Applications use setsockopt() and getsockopt() to set or retrieve
3138 * socket options. Socket options are used to change the default
3139 * behavior of sockets calls. They are described in Section 7.
3143 * ret = getsockopt(int sd, int level, int optname, void __user *optval,
3144 * int __user *optlen);
3145 * ret = setsockopt(int sd, int level, int optname, const void __user *optval,
3148 * sd - the socket descript.
3149 * level - set to IPPROTO_SCTP for all SCTP options.
3150 * optname - the option name.
3151 * optval - the buffer to store the value of the option.
3152 * optlen - the size of the buffer.
3154 SCTP_STATIC
int sctp_setsockopt(struct sock
*sk
, int level
, int optname
,
3155 char __user
*optval
, int optlen
)
3159 SCTP_DEBUG_PRINTK("sctp_setsockopt(sk: %p... optname: %d)\n",
3162 /* I can hardly begin to describe how wrong this is. This is
3163 * so broken as to be worse than useless. The API draft
3164 * REALLY is NOT helpful here... I am not convinced that the
3165 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP
3166 * are at all well-founded.
3168 if (level
!= SOL_SCTP
) {
3169 struct sctp_af
*af
= sctp_sk(sk
)->pf
->af
;
3170 retval
= af
->setsockopt(sk
, level
, optname
, optval
, optlen
);
3177 case SCTP_SOCKOPT_BINDX_ADD
:
3178 /* 'optlen' is the size of the addresses buffer. */
3179 retval
= sctp_setsockopt_bindx(sk
, (struct sockaddr __user
*)optval
,
3180 optlen
, SCTP_BINDX_ADD_ADDR
);
3183 case SCTP_SOCKOPT_BINDX_REM
:
3184 /* 'optlen' is the size of the addresses buffer. */
3185 retval
= sctp_setsockopt_bindx(sk
, (struct sockaddr __user
*)optval
,
3186 optlen
, SCTP_BINDX_REM_ADDR
);
3189 case SCTP_SOCKOPT_CONNECTX
:
3190 /* 'optlen' is the size of the addresses buffer. */
3191 retval
= sctp_setsockopt_connectx(sk
, (struct sockaddr __user
*)optval
,
3195 case SCTP_DISABLE_FRAGMENTS
:
3196 retval
= sctp_setsockopt_disable_fragments(sk
, optval
, optlen
);
3200 retval
= sctp_setsockopt_events(sk
, optval
, optlen
);
3203 case SCTP_AUTOCLOSE
:
3204 retval
= sctp_setsockopt_autoclose(sk
, optval
, optlen
);
3207 case SCTP_PEER_ADDR_PARAMS
:
3208 retval
= sctp_setsockopt_peer_addr_params(sk
, optval
, optlen
);
3211 case SCTP_DELAYED_ACK_TIME
:
3212 retval
= sctp_setsockopt_delayed_ack_time(sk
, optval
, optlen
);
3214 case SCTP_PARTIAL_DELIVERY_POINT
:
3215 retval
= sctp_setsockopt_partial_delivery_point(sk
, optval
, optlen
);
3219 retval
= sctp_setsockopt_initmsg(sk
, optval
, optlen
);
3221 case SCTP_DEFAULT_SEND_PARAM
:
3222 retval
= sctp_setsockopt_default_send_param(sk
, optval
,
3225 case SCTP_PRIMARY_ADDR
:
3226 retval
= sctp_setsockopt_primary_addr(sk
, optval
, optlen
);
3228 case SCTP_SET_PEER_PRIMARY_ADDR
:
3229 retval
= sctp_setsockopt_peer_primary_addr(sk
, optval
, optlen
);
3232 retval
= sctp_setsockopt_nodelay(sk
, optval
, optlen
);
3235 retval
= sctp_setsockopt_rtoinfo(sk
, optval
, optlen
);
3237 case SCTP_ASSOCINFO
:
3238 retval
= sctp_setsockopt_associnfo(sk
, optval
, optlen
);
3240 case SCTP_I_WANT_MAPPED_V4_ADDR
:
3241 retval
= sctp_setsockopt_mappedv4(sk
, optval
, optlen
);
3244 retval
= sctp_setsockopt_maxseg(sk
, optval
, optlen
);
3246 case SCTP_ADAPTATION_LAYER
:
3247 retval
= sctp_setsockopt_adaptation_layer(sk
, optval
, optlen
);
3250 retval
= sctp_setsockopt_context(sk
, optval
, optlen
);
3252 case SCTP_FRAGMENT_INTERLEAVE
:
3253 retval
= sctp_setsockopt_fragment_interleave(sk
, optval
, optlen
);
3255 case SCTP_MAX_BURST
:
3256 retval
= sctp_setsockopt_maxburst(sk
, optval
, optlen
);
3258 case SCTP_AUTH_CHUNK
:
3259 retval
= sctp_setsockopt_auth_chunk(sk
, optval
, optlen
);
3261 case SCTP_HMAC_IDENT
:
3262 retval
= sctp_setsockopt_hmac_ident(sk
, optval
, optlen
);
3265 retval
= sctp_setsockopt_auth_key(sk
, optval
, optlen
);
3267 case SCTP_AUTH_ACTIVE_KEY
:
3268 retval
= sctp_setsockopt_active_key(sk
, optval
, optlen
);
3270 case SCTP_AUTH_DELETE_KEY
:
3271 retval
= sctp_setsockopt_del_key(sk
, optval
, optlen
);
3274 retval
= -ENOPROTOOPT
;
3278 sctp_release_sock(sk
);
3284 /* API 3.1.6 connect() - UDP Style Syntax
3286 * An application may use the connect() call in the UDP model to initiate an
3287 * association without sending data.
3291 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
3293 * sd: the socket descriptor to have a new association added to.
3295 * nam: the address structure (either struct sockaddr_in or struct
3296 * sockaddr_in6 defined in RFC2553 [7]).
3298 * len: the size of the address.
3300 SCTP_STATIC
int sctp_connect(struct sock
*sk
, struct sockaddr
*addr
,
3308 SCTP_DEBUG_PRINTK("%s - sk: %p, sockaddr: %p, addr_len: %d\n",
3309 __func__
, sk
, addr
, addr_len
);
3311 /* Validate addr_len before calling common connect/connectx routine. */
3312 af
= sctp_get_af_specific(addr
->sa_family
);
3313 if (!af
|| addr_len
< af
->sockaddr_len
) {
3316 /* Pass correct addr len to common routine (so it knows there
3317 * is only one address being passed.
3319 err
= __sctp_connect(sk
, addr
, af
->sockaddr_len
);
3322 sctp_release_sock(sk
);
3326 /* FIXME: Write comments. */
3327 SCTP_STATIC
int sctp_disconnect(struct sock
*sk
, int flags
)
3329 return -EOPNOTSUPP
; /* STUB */
3332 /* 4.1.4 accept() - TCP Style Syntax
3334 * Applications use accept() call to remove an established SCTP
3335 * association from the accept queue of the endpoint. A new socket
3336 * descriptor will be returned from accept() to represent the newly
3337 * formed association.
3339 SCTP_STATIC
struct sock
*sctp_accept(struct sock
*sk
, int flags
, int *err
)
3341 struct sctp_sock
*sp
;
3342 struct sctp_endpoint
*ep
;
3343 struct sock
*newsk
= NULL
;
3344 struct sctp_association
*asoc
;
3353 if (!sctp_style(sk
, TCP
)) {
3354 error
= -EOPNOTSUPP
;
3358 if (!sctp_sstate(sk
, LISTENING
)) {
3363 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
3365 error
= sctp_wait_for_accept(sk
, timeo
);
3369 /* We treat the list of associations on the endpoint as the accept
3370 * queue and pick the first association on the list.
3372 asoc
= list_entry(ep
->asocs
.next
, struct sctp_association
, asocs
);
3374 newsk
= sp
->pf
->create_accept_sk(sk
, asoc
);
3380 /* Populate the fields of the newsk from the oldsk and migrate the
3381 * asoc to the newsk.
3383 sctp_sock_migrate(sk
, newsk
, asoc
, SCTP_SOCKET_TCP
);
3386 sctp_release_sock(sk
);
3391 /* The SCTP ioctl handler. */
3392 SCTP_STATIC
int sctp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
3394 return -ENOIOCTLCMD
;
3397 /* This is the function which gets called during socket creation to
3398 * initialized the SCTP-specific portion of the sock.
3399 * The sock structure should already be zero-filled memory.
3401 SCTP_STATIC
int sctp_init_sock(struct sock
*sk
)
3403 struct sctp_endpoint
*ep
;
3404 struct sctp_sock
*sp
;
3406 SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk
);
3410 /* Initialize the SCTP per socket area. */
3411 switch (sk
->sk_type
) {
3412 case SOCK_SEQPACKET
:
3413 sp
->type
= SCTP_SOCKET_UDP
;
3416 sp
->type
= SCTP_SOCKET_TCP
;
3419 return -ESOCKTNOSUPPORT
;
3422 /* Initialize default send parameters. These parameters can be
3423 * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
3425 sp
->default_stream
= 0;
3426 sp
->default_ppid
= 0;
3427 sp
->default_flags
= 0;
3428 sp
->default_context
= 0;
3429 sp
->default_timetolive
= 0;
3431 sp
->default_rcv_context
= 0;
3432 sp
->max_burst
= sctp_max_burst
;
3434 /* Initialize default setup parameters. These parameters
3435 * can be modified with the SCTP_INITMSG socket option or
3436 * overridden by the SCTP_INIT CMSG.
3438 sp
->initmsg
.sinit_num_ostreams
= sctp_max_outstreams
;
3439 sp
->initmsg
.sinit_max_instreams
= sctp_max_instreams
;
3440 sp
->initmsg
.sinit_max_attempts
= sctp_max_retrans_init
;
3441 sp
->initmsg
.sinit_max_init_timeo
= sctp_rto_max
;
3443 /* Initialize default RTO related parameters. These parameters can
3444 * be modified for with the SCTP_RTOINFO socket option.
3446 sp
->rtoinfo
.srto_initial
= sctp_rto_initial
;
3447 sp
->rtoinfo
.srto_max
= sctp_rto_max
;
3448 sp
->rtoinfo
.srto_min
= sctp_rto_min
;
3450 /* Initialize default association related parameters. These parameters
3451 * can be modified with the SCTP_ASSOCINFO socket option.
3453 sp
->assocparams
.sasoc_asocmaxrxt
= sctp_max_retrans_association
;
3454 sp
->assocparams
.sasoc_number_peer_destinations
= 0;
3455 sp
->assocparams
.sasoc_peer_rwnd
= 0;
3456 sp
->assocparams
.sasoc_local_rwnd
= 0;
3457 sp
->assocparams
.sasoc_cookie_life
= sctp_valid_cookie_life
;
3459 /* Initialize default event subscriptions. By default, all the
3462 memset(&sp
->subscribe
, 0, sizeof(struct sctp_event_subscribe
));
3464 /* Default Peer Address Parameters. These defaults can
3465 * be modified via SCTP_PEER_ADDR_PARAMS
3467 sp
->hbinterval
= sctp_hb_interval
;
3468 sp
->pathmaxrxt
= sctp_max_retrans_path
;
3469 sp
->pathmtu
= 0; // allow default discovery
3470 sp
->sackdelay
= sctp_sack_timeout
;
3471 sp
->param_flags
= SPP_HB_ENABLE
|
3473 SPP_SACKDELAY_ENABLE
;
3475 /* If enabled no SCTP message fragmentation will be performed.
3476 * Configure through SCTP_DISABLE_FRAGMENTS socket option.
3478 sp
->disable_fragments
= 0;
3480 /* Enable Nagle algorithm by default. */
3483 /* Enable by default. */
3486 /* Auto-close idle associations after the configured
3487 * number of seconds. A value of 0 disables this
3488 * feature. Configure through the SCTP_AUTOCLOSE socket option,
3489 * for UDP-style sockets only.
3493 /* User specified fragmentation limit. */
3496 sp
->adaptation_ind
= 0;
3498 sp
->pf
= sctp_get_pf_specific(sk
->sk_family
);
3500 /* Control variables for partial data delivery. */
3501 atomic_set(&sp
->pd_mode
, 0);
3502 skb_queue_head_init(&sp
->pd_lobby
);
3503 sp
->frag_interleave
= 0;
3505 /* Create a per socket endpoint structure. Even if we
3506 * change the data structure relationships, this may still
3507 * be useful for storing pre-connect address information.
3509 ep
= sctp_endpoint_new(sk
, GFP_KERNEL
);
3516 SCTP_DBG_OBJCNT_INC(sock
);
3517 atomic_inc(&sctp_sockets_allocated
);
3521 /* Cleanup any SCTP per socket resources. */
3522 SCTP_STATIC
int sctp_destroy_sock(struct sock
*sk
)
3524 struct sctp_endpoint
*ep
;
3526 SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk
);
3528 /* Release our hold on the endpoint. */
3529 ep
= sctp_sk(sk
)->ep
;
3530 sctp_endpoint_free(ep
);
3531 atomic_dec(&sctp_sockets_allocated
);
3535 /* API 4.1.7 shutdown() - TCP Style Syntax
3536 * int shutdown(int socket, int how);
3538 * sd - the socket descriptor of the association to be closed.
3539 * how - Specifies the type of shutdown. The values are
3542 * Disables further receive operations. No SCTP
3543 * protocol action is taken.
3545 * Disables further send operations, and initiates
3546 * the SCTP shutdown sequence.
3548 * Disables further send and receive operations
3549 * and initiates the SCTP shutdown sequence.
3551 SCTP_STATIC
void sctp_shutdown(struct sock
*sk
, int how
)
3553 struct sctp_endpoint
*ep
;
3554 struct sctp_association
*asoc
;
3556 if (!sctp_style(sk
, TCP
))
3559 if (how
& SEND_SHUTDOWN
) {
3560 ep
= sctp_sk(sk
)->ep
;
3561 if (!list_empty(&ep
->asocs
)) {
3562 asoc
= list_entry(ep
->asocs
.next
,
3563 struct sctp_association
, asocs
);
3564 sctp_primitive_SHUTDOWN(asoc
, NULL
);
3569 /* 7.2.1 Association Status (SCTP_STATUS)
3571 * Applications can retrieve current status information about an
3572 * association, including association state, peer receiver window size,
3573 * number of unacked data chunks, and number of data chunks pending
3574 * receipt. This information is read-only.
3576 static int sctp_getsockopt_sctp_status(struct sock
*sk
, int len
,
3577 char __user
*optval
,
3580 struct sctp_status status
;
3581 struct sctp_association
*asoc
= NULL
;
3582 struct sctp_transport
*transport
;
3583 sctp_assoc_t associd
;
3586 if (len
< sizeof(status
)) {
3591 len
= sizeof(status
);
3592 if (copy_from_user(&status
, optval
, len
)) {
3597 associd
= status
.sstat_assoc_id
;
3598 asoc
= sctp_id2assoc(sk
, associd
);
3604 transport
= asoc
->peer
.primary_path
;
3606 status
.sstat_assoc_id
= sctp_assoc2id(asoc
);
3607 status
.sstat_state
= asoc
->state
;
3608 status
.sstat_rwnd
= asoc
->peer
.rwnd
;
3609 status
.sstat_unackdata
= asoc
->unack_data
;
3611 status
.sstat_penddata
= sctp_tsnmap_pending(&asoc
->peer
.tsn_map
);
3612 status
.sstat_instrms
= asoc
->c
.sinit_max_instreams
;
3613 status
.sstat_outstrms
= asoc
->c
.sinit_num_ostreams
;
3614 status
.sstat_fragmentation_point
= asoc
->frag_point
;
3615 status
.sstat_primary
.spinfo_assoc_id
= sctp_assoc2id(transport
->asoc
);
3616 memcpy(&status
.sstat_primary
.spinfo_address
, &transport
->ipaddr
,
3617 transport
->af_specific
->sockaddr_len
);
3618 /* Map ipv4 address into v4-mapped-on-v6 address. */
3619 sctp_get_pf_specific(sk
->sk_family
)->addr_v4map(sctp_sk(sk
),
3620 (union sctp_addr
*)&status
.sstat_primary
.spinfo_address
);
3621 status
.sstat_primary
.spinfo_state
= transport
->state
;
3622 status
.sstat_primary
.spinfo_cwnd
= transport
->cwnd
;
3623 status
.sstat_primary
.spinfo_srtt
= transport
->srtt
;
3624 status
.sstat_primary
.spinfo_rto
= jiffies_to_msecs(transport
->rto
);
3625 status
.sstat_primary
.spinfo_mtu
= transport
->pathmtu
;
3627 if (status
.sstat_primary
.spinfo_state
== SCTP_UNKNOWN
)
3628 status
.sstat_primary
.spinfo_state
= SCTP_ACTIVE
;
3630 if (put_user(len
, optlen
)) {
3635 SCTP_DEBUG_PRINTK("sctp_getsockopt_sctp_status(%d): %d %d %d\n",
3636 len
, status
.sstat_state
, status
.sstat_rwnd
,
3637 status
.sstat_assoc_id
);
3639 if (copy_to_user(optval
, &status
, len
)) {
3649 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
3651 * Applications can retrieve information about a specific peer address
3652 * of an association, including its reachability state, congestion
3653 * window, and retransmission timer values. This information is
3656 static int sctp_getsockopt_peer_addr_info(struct sock
*sk
, int len
,
3657 char __user
*optval
,
3660 struct sctp_paddrinfo pinfo
;
3661 struct sctp_transport
*transport
;
3664 if (len
< sizeof(pinfo
)) {
3669 len
= sizeof(pinfo
);
3670 if (copy_from_user(&pinfo
, optval
, len
)) {
3675 transport
= sctp_addr_id2transport(sk
, &pinfo
.spinfo_address
,
3676 pinfo
.spinfo_assoc_id
);
3680 pinfo
.spinfo_assoc_id
= sctp_assoc2id(transport
->asoc
);
3681 pinfo
.spinfo_state
= transport
->state
;
3682 pinfo
.spinfo_cwnd
= transport
->cwnd
;
3683 pinfo
.spinfo_srtt
= transport
->srtt
;
3684 pinfo
.spinfo_rto
= jiffies_to_msecs(transport
->rto
);
3685 pinfo
.spinfo_mtu
= transport
->pathmtu
;
3687 if (pinfo
.spinfo_state
== SCTP_UNKNOWN
)
3688 pinfo
.spinfo_state
= SCTP_ACTIVE
;
3690 if (put_user(len
, optlen
)) {
3695 if (copy_to_user(optval
, &pinfo
, len
)) {
3704 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
3706 * This option is a on/off flag. If enabled no SCTP message
3707 * fragmentation will be performed. Instead if a message being sent
3708 * exceeds the current PMTU size, the message will NOT be sent and
3709 * instead a error will be indicated to the user.
3711 static int sctp_getsockopt_disable_fragments(struct sock
*sk
, int len
,
3712 char __user
*optval
, int __user
*optlen
)
3716 if (len
< sizeof(int))
3720 val
= (sctp_sk(sk
)->disable_fragments
== 1);
3721 if (put_user(len
, optlen
))
3723 if (copy_to_user(optval
, &val
, len
))
3728 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
3730 * This socket option is used to specify various notifications and
3731 * ancillary data the user wishes to receive.
3733 static int sctp_getsockopt_events(struct sock
*sk
, int len
, char __user
*optval
,
3736 if (len
< sizeof(struct sctp_event_subscribe
))
3738 len
= sizeof(struct sctp_event_subscribe
);
3739 if (put_user(len
, optlen
))
3741 if (copy_to_user(optval
, &sctp_sk(sk
)->subscribe
, len
))
3746 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
3748 * This socket option is applicable to the UDP-style socket only. When
3749 * set it will cause associations that are idle for more than the
3750 * specified number of seconds to automatically close. An association
3751 * being idle is defined an association that has NOT sent or received
3752 * user data. The special value of '0' indicates that no automatic
3753 * close of any associations should be performed. The option expects an
3754 * integer defining the number of seconds of idle time before an
3755 * association is closed.
3757 static int sctp_getsockopt_autoclose(struct sock
*sk
, int len
, char __user
*optval
, int __user
*optlen
)
3759 /* Applicable to UDP-style socket only */
3760 if (sctp_style(sk
, TCP
))
3762 if (len
< sizeof(int))
3765 if (put_user(len
, optlen
))
3767 if (copy_to_user(optval
, &sctp_sk(sk
)->autoclose
, sizeof(int)))
3772 /* Helper routine to branch off an association to a new socket. */
3773 SCTP_STATIC
int sctp_do_peeloff(struct sctp_association
*asoc
,
3774 struct socket
**sockp
)
3776 struct sock
*sk
= asoc
->base
.sk
;
3777 struct socket
*sock
;
3778 struct inet_sock
*inetsk
;
3782 /* An association cannot be branched off from an already peeled-off
3783 * socket, nor is this supported for tcp style sockets.
3785 if (!sctp_style(sk
, UDP
))
3788 /* Create a new socket. */
3789 err
= sock_create(sk
->sk_family
, SOCK_SEQPACKET
, IPPROTO_SCTP
, &sock
);
3793 /* Populate the fields of the newsk from the oldsk and migrate the
3794 * asoc to the newsk.
3796 sctp_sock_migrate(sk
, sock
->sk
, asoc
, SCTP_SOCKET_UDP_HIGH_BANDWIDTH
);
3798 /* Make peeled-off sockets more like 1-1 accepted sockets.
3799 * Set the daddr and initialize id to something more random
3801 af
= sctp_get_af_specific(asoc
->peer
.primary_addr
.sa
.sa_family
);
3802 af
->to_sk_daddr(&asoc
->peer
.primary_addr
, sk
);
3803 inetsk
= inet_sk(sock
->sk
);
3804 inetsk
->id
= asoc
->next_tsn
^ jiffies
;
3811 static int sctp_getsockopt_peeloff(struct sock
*sk
, int len
, char __user
*optval
, int __user
*optlen
)
3813 sctp_peeloff_arg_t peeloff
;
3814 struct socket
*newsock
;
3816 struct sctp_association
*asoc
;
3818 if (len
< sizeof(sctp_peeloff_arg_t
))
3820 len
= sizeof(sctp_peeloff_arg_t
);
3821 if (copy_from_user(&peeloff
, optval
, len
))
3824 asoc
= sctp_id2assoc(sk
, peeloff
.associd
);
3830 SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p\n", __func__
, sk
, asoc
);
3832 retval
= sctp_do_peeloff(asoc
, &newsock
);
3836 /* Map the socket to an unused fd that can be returned to the user. */
3837 retval
= sock_map_fd(newsock
);
3839 sock_release(newsock
);
3843 SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p newsk: %p sd: %d\n",
3844 __func__
, sk
, asoc
, newsock
->sk
, retval
);
3846 /* Return the fd mapped to the new socket. */
3847 peeloff
.sd
= retval
;
3848 if (put_user(len
, optlen
))
3850 if (copy_to_user(optval
, &peeloff
, len
))
3857 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
3859 * Applications can enable or disable heartbeats for any peer address of
3860 * an association, modify an address's heartbeat interval, force a
3861 * heartbeat to be sent immediately, and adjust the address's maximum
3862 * number of retransmissions sent before an address is considered
3863 * unreachable. The following structure is used to access and modify an
3864 * address's parameters:
3866 * struct sctp_paddrparams {
3867 * sctp_assoc_t spp_assoc_id;
3868 * struct sockaddr_storage spp_address;
3869 * uint32_t spp_hbinterval;
3870 * uint16_t spp_pathmaxrxt;
3871 * uint32_t spp_pathmtu;
3872 * uint32_t spp_sackdelay;
3873 * uint32_t spp_flags;
3876 * spp_assoc_id - (one-to-many style socket) This is filled in the
3877 * application, and identifies the association for
3879 * spp_address - This specifies which address is of interest.
3880 * spp_hbinterval - This contains the value of the heartbeat interval,
3881 * in milliseconds. If a value of zero
3882 * is present in this field then no changes are to
3883 * be made to this parameter.
3884 * spp_pathmaxrxt - This contains the maximum number of
3885 * retransmissions before this address shall be
3886 * considered unreachable. If a value of zero
3887 * is present in this field then no changes are to
3888 * be made to this parameter.
3889 * spp_pathmtu - When Path MTU discovery is disabled the value
3890 * specified here will be the "fixed" path mtu.
3891 * Note that if the spp_address field is empty
3892 * then all associations on this address will
3893 * have this fixed path mtu set upon them.
3895 * spp_sackdelay - When delayed sack is enabled, this value specifies
3896 * the number of milliseconds that sacks will be delayed
3897 * for. This value will apply to all addresses of an
3898 * association if the spp_address field is empty. Note
3899 * also, that if delayed sack is enabled and this
3900 * value is set to 0, no change is made to the last
3901 * recorded delayed sack timer value.
3903 * spp_flags - These flags are used to control various features
3904 * on an association. The flag field may contain
3905 * zero or more of the following options.
3907 * SPP_HB_ENABLE - Enable heartbeats on the
3908 * specified address. Note that if the address
3909 * field is empty all addresses for the association
3910 * have heartbeats enabled upon them.
3912 * SPP_HB_DISABLE - Disable heartbeats on the
3913 * speicifed address. Note that if the address
3914 * field is empty all addresses for the association
3915 * will have their heartbeats disabled. Note also
3916 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
3917 * mutually exclusive, only one of these two should
3918 * be specified. Enabling both fields will have
3919 * undetermined results.
3921 * SPP_HB_DEMAND - Request a user initiated heartbeat
3922 * to be made immediately.
3924 * SPP_PMTUD_ENABLE - This field will enable PMTU
3925 * discovery upon the specified address. Note that
3926 * if the address feild is empty then all addresses
3927 * on the association are effected.
3929 * SPP_PMTUD_DISABLE - This field will disable PMTU
3930 * discovery upon the specified address. Note that
3931 * if the address feild is empty then all addresses
3932 * on the association are effected. Not also that
3933 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
3934 * exclusive. Enabling both will have undetermined
3937 * SPP_SACKDELAY_ENABLE - Setting this flag turns
3938 * on delayed sack. The time specified in spp_sackdelay
3939 * is used to specify the sack delay for this address. Note
3940 * that if spp_address is empty then all addresses will
3941 * enable delayed sack and take on the sack delay
3942 * value specified in spp_sackdelay.
3943 * SPP_SACKDELAY_DISABLE - Setting this flag turns
3944 * off delayed sack. If the spp_address field is blank then
3945 * delayed sack is disabled for the entire association. Note
3946 * also that this field is mutually exclusive to
3947 * SPP_SACKDELAY_ENABLE, setting both will have undefined
3950 static int sctp_getsockopt_peer_addr_params(struct sock
*sk
, int len
,
3951 char __user
*optval
, int __user
*optlen
)
3953 struct sctp_paddrparams params
;
3954 struct sctp_transport
*trans
= NULL
;
3955 struct sctp_association
*asoc
= NULL
;
3956 struct sctp_sock
*sp
= sctp_sk(sk
);
3958 if (len
< sizeof(struct sctp_paddrparams
))
3960 len
= sizeof(struct sctp_paddrparams
);
3961 if (copy_from_user(¶ms
, optval
, len
))
3964 /* If an address other than INADDR_ANY is specified, and
3965 * no transport is found, then the request is invalid.
3967 if (!sctp_is_any(( union sctp_addr
*)¶ms
.spp_address
)) {
3968 trans
= sctp_addr_id2transport(sk
, ¶ms
.spp_address
,
3969 params
.spp_assoc_id
);
3971 SCTP_DEBUG_PRINTK("Failed no transport\n");
3976 /* Get association, if assoc_id != 0 and the socket is a one
3977 * to many style socket, and an association was not found, then
3978 * the id was invalid.
3980 asoc
= sctp_id2assoc(sk
, params
.spp_assoc_id
);
3981 if (!asoc
&& params
.spp_assoc_id
&& sctp_style(sk
, UDP
)) {
3982 SCTP_DEBUG_PRINTK("Failed no association\n");
3987 /* Fetch transport values. */
3988 params
.spp_hbinterval
= jiffies_to_msecs(trans
->hbinterval
);
3989 params
.spp_pathmtu
= trans
->pathmtu
;
3990 params
.spp_pathmaxrxt
= trans
->pathmaxrxt
;
3991 params
.spp_sackdelay
= jiffies_to_msecs(trans
->sackdelay
);
3993 /*draft-11 doesn't say what to return in spp_flags*/
3994 params
.spp_flags
= trans
->param_flags
;
3996 /* Fetch association values. */
3997 params
.spp_hbinterval
= jiffies_to_msecs(asoc
->hbinterval
);
3998 params
.spp_pathmtu
= asoc
->pathmtu
;
3999 params
.spp_pathmaxrxt
= asoc
->pathmaxrxt
;
4000 params
.spp_sackdelay
= jiffies_to_msecs(asoc
->sackdelay
);
4002 /*draft-11 doesn't say what to return in spp_flags*/
4003 params
.spp_flags
= asoc
->param_flags
;
4005 /* Fetch socket values. */
4006 params
.spp_hbinterval
= sp
->hbinterval
;
4007 params
.spp_pathmtu
= sp
->pathmtu
;
4008 params
.spp_sackdelay
= sp
->sackdelay
;
4009 params
.spp_pathmaxrxt
= sp
->pathmaxrxt
;
4011 /*draft-11 doesn't say what to return in spp_flags*/
4012 params
.spp_flags
= sp
->param_flags
;
4015 if (copy_to_user(optval
, ¶ms
, len
))
4018 if (put_user(len
, optlen
))
4024 /* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME)
4026 * This options will get or set the delayed ack timer. The time is set
4027 * in milliseconds. If the assoc_id is 0, then this sets or gets the
4028 * endpoints default delayed ack timer value. If the assoc_id field is
4029 * non-zero, then the set or get effects the specified association.
4031 * struct sctp_assoc_value {
4032 * sctp_assoc_t assoc_id;
4033 * uint32_t assoc_value;
4036 * assoc_id - This parameter, indicates which association the
4037 * user is preforming an action upon. Note that if
4038 * this field's value is zero then the endpoints
4039 * default value is changed (effecting future
4040 * associations only).
4042 * assoc_value - This parameter contains the number of milliseconds
4043 * that the user is requesting the delayed ACK timer
4044 * be set to. Note that this value is defined in
4045 * the standard to be between 200 and 500 milliseconds.
4047 * Note: a value of zero will leave the value alone,
4048 * but disable SACK delay. A non-zero value will also
4049 * enable SACK delay.
4051 static int sctp_getsockopt_delayed_ack_time(struct sock
*sk
, int len
,
4052 char __user
*optval
,
4055 struct sctp_assoc_value params
;
4056 struct sctp_association
*asoc
= NULL
;
4057 struct sctp_sock
*sp
= sctp_sk(sk
);
4059 if (len
< sizeof(struct sctp_assoc_value
))
4062 len
= sizeof(struct sctp_assoc_value
);
4064 if (copy_from_user(¶ms
, optval
, len
))
4067 /* Get association, if assoc_id != 0 and the socket is a one
4068 * to many style socket, and an association was not found, then
4069 * the id was invalid.
4071 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4072 if (!asoc
&& params
.assoc_id
&& sctp_style(sk
, UDP
))
4076 /* Fetch association values. */
4077 if (asoc
->param_flags
& SPP_SACKDELAY_ENABLE
)
4078 params
.assoc_value
= jiffies_to_msecs(
4081 params
.assoc_value
= 0;
4083 /* Fetch socket values. */
4084 if (sp
->param_flags
& SPP_SACKDELAY_ENABLE
)
4085 params
.assoc_value
= sp
->sackdelay
;
4087 params
.assoc_value
= 0;
4090 if (copy_to_user(optval
, ¶ms
, len
))
4093 if (put_user(len
, optlen
))
4099 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
4101 * Applications can specify protocol parameters for the default association
4102 * initialization. The option name argument to setsockopt() and getsockopt()
4105 * Setting initialization parameters is effective only on an unconnected
4106 * socket (for UDP-style sockets only future associations are effected
4107 * by the change). With TCP-style sockets, this option is inherited by
4108 * sockets derived from a listener socket.
4110 static int sctp_getsockopt_initmsg(struct sock
*sk
, int len
, char __user
*optval
, int __user
*optlen
)
4112 if (len
< sizeof(struct sctp_initmsg
))
4114 len
= sizeof(struct sctp_initmsg
);
4115 if (put_user(len
, optlen
))
4117 if (copy_to_user(optval
, &sctp_sk(sk
)->initmsg
, len
))
4122 static int sctp_getsockopt_peer_addrs_num_old(struct sock
*sk
, int len
,
4123 char __user
*optval
,
4127 struct sctp_association
*asoc
;
4128 struct list_head
*pos
;
4131 if (len
< sizeof(sctp_assoc_t
))
4134 if (copy_from_user(&id
, optval
, sizeof(sctp_assoc_t
)))
4137 /* For UDP-style sockets, id specifies the association to query. */
4138 asoc
= sctp_id2assoc(sk
, id
);
4142 list_for_each(pos
, &asoc
->peer
.transport_addr_list
) {
4150 * Old API for getting list of peer addresses. Does not work for 32-bit
4151 * programs running on a 64-bit kernel
4153 static int sctp_getsockopt_peer_addrs_old(struct sock
*sk
, int len
,
4154 char __user
*optval
,
4157 struct sctp_association
*asoc
;
4159 struct sctp_getaddrs_old getaddrs
;
4160 struct sctp_transport
*from
;
4162 union sctp_addr temp
;
4163 struct sctp_sock
*sp
= sctp_sk(sk
);
4166 if (len
< sizeof(struct sctp_getaddrs_old
))
4169 len
= sizeof(struct sctp_getaddrs_old
);
4171 if (copy_from_user(&getaddrs
, optval
, len
))
4174 if (getaddrs
.addr_num
<= 0) return -EINVAL
;
4176 /* For UDP-style sockets, id specifies the association to query. */
4177 asoc
= sctp_id2assoc(sk
, getaddrs
.assoc_id
);
4181 to
= (void __user
*)getaddrs
.addrs
;
4182 list_for_each_entry(from
, &asoc
->peer
.transport_addr_list
,
4184 memcpy(&temp
, &from
->ipaddr
, sizeof(temp
));
4185 sctp_get_pf_specific(sk
->sk_family
)->addr_v4map(sp
, &temp
);
4186 addrlen
= sctp_get_af_specific(sk
->sk_family
)->sockaddr_len
;
4187 if (copy_to_user(to
, &temp
, addrlen
))
4191 if (cnt
>= getaddrs
.addr_num
) break;
4193 getaddrs
.addr_num
= cnt
;
4194 if (put_user(len
, optlen
))
4196 if (copy_to_user(optval
, &getaddrs
, len
))
4202 static int sctp_getsockopt_peer_addrs(struct sock
*sk
, int len
,
4203 char __user
*optval
, int __user
*optlen
)
4205 struct sctp_association
*asoc
;
4207 struct sctp_getaddrs getaddrs
;
4208 struct sctp_transport
*from
;
4210 union sctp_addr temp
;
4211 struct sctp_sock
*sp
= sctp_sk(sk
);
4216 if (len
< sizeof(struct sctp_getaddrs
))
4219 if (copy_from_user(&getaddrs
, optval
, sizeof(struct sctp_getaddrs
)))
4222 /* For UDP-style sockets, id specifies the association to query. */
4223 asoc
= sctp_id2assoc(sk
, getaddrs
.assoc_id
);
4227 to
= optval
+ offsetof(struct sctp_getaddrs
,addrs
);
4228 space_left
= len
- offsetof(struct sctp_getaddrs
,addrs
);
4230 list_for_each_entry(from
, &asoc
->peer
.transport_addr_list
,
4232 memcpy(&temp
, &from
->ipaddr
, sizeof(temp
));
4233 sctp_get_pf_specific(sk
->sk_family
)->addr_v4map(sp
, &temp
);
4234 addrlen
= sctp_get_af_specific(sk
->sk_family
)->sockaddr_len
;
4235 if (space_left
< addrlen
)
4237 if (copy_to_user(to
, &temp
, addrlen
))
4241 space_left
-= addrlen
;
4244 if (put_user(cnt
, &((struct sctp_getaddrs __user
*)optval
)->addr_num
))
4246 bytes_copied
= ((char __user
*)to
) - optval
;
4247 if (put_user(bytes_copied
, optlen
))
4253 static int sctp_getsockopt_local_addrs_num_old(struct sock
*sk
, int len
,
4254 char __user
*optval
,
4258 struct sctp_bind_addr
*bp
;
4259 struct sctp_association
*asoc
;
4260 struct sctp_sockaddr_entry
*addr
;
4263 if (len
< sizeof(sctp_assoc_t
))
4266 if (copy_from_user(&id
, optval
, sizeof(sctp_assoc_t
)))
4270 * For UDP-style sockets, id specifies the association to query.
4271 * If the id field is set to the value '0' then the locally bound
4272 * addresses are returned without regard to any particular
4276 bp
= &sctp_sk(sk
)->ep
->base
.bind_addr
;
4278 asoc
= sctp_id2assoc(sk
, id
);
4281 bp
= &asoc
->base
.bind_addr
;
4284 /* If the endpoint is bound to 0.0.0.0 or ::0, count the valid
4285 * addresses from the global local address list.
4287 if (sctp_list_single_entry(&bp
->address_list
)) {
4288 addr
= list_entry(bp
->address_list
.next
,
4289 struct sctp_sockaddr_entry
, list
);
4290 if (sctp_is_any(&addr
->a
)) {
4292 list_for_each_entry_rcu(addr
,
4293 &sctp_local_addr_list
, list
) {
4297 if ((PF_INET
== sk
->sk_family
) &&
4298 (AF_INET6
== addr
->a
.sa
.sa_family
))
4310 /* Protection on the bound address list is not needed,
4311 * since in the socket option context we hold the socket lock,
4312 * so there is no way that the bound address list can change.
4314 list_for_each_entry(addr
, &bp
->address_list
, list
) {
4321 /* Helper function that copies local addresses to user and returns the number
4322 * of addresses copied.
4324 static int sctp_copy_laddrs_old(struct sock
*sk
, __u16 port
,
4325 int max_addrs
, void *to
,
4328 struct sctp_sockaddr_entry
*addr
;
4329 union sctp_addr temp
;
4334 list_for_each_entry_rcu(addr
, &sctp_local_addr_list
, list
) {
4338 if ((PF_INET
== sk
->sk_family
) &&
4339 (AF_INET6
== addr
->a
.sa
.sa_family
))
4341 memcpy(&temp
, &addr
->a
, sizeof(temp
));
4342 if (!temp
.v4
.sin_port
)
4343 temp
.v4
.sin_port
= htons(port
);
4345 sctp_get_pf_specific(sk
->sk_family
)->addr_v4map(sctp_sk(sk
),
4347 addrlen
= sctp_get_af_specific(temp
.sa
.sa_family
)->sockaddr_len
;
4348 memcpy(to
, &temp
, addrlen
);
4351 *bytes_copied
+= addrlen
;
4353 if (cnt
>= max_addrs
) break;
4360 static int sctp_copy_laddrs(struct sock
*sk
, __u16 port
, void *to
,
4361 size_t space_left
, int *bytes_copied
)
4363 struct sctp_sockaddr_entry
*addr
;
4364 union sctp_addr temp
;
4369 list_for_each_entry_rcu(addr
, &sctp_local_addr_list
, list
) {
4373 if ((PF_INET
== sk
->sk_family
) &&
4374 (AF_INET6
== addr
->a
.sa
.sa_family
))
4376 memcpy(&temp
, &addr
->a
, sizeof(temp
));
4377 if (!temp
.v4
.sin_port
)
4378 temp
.v4
.sin_port
= htons(port
);
4380 sctp_get_pf_specific(sk
->sk_family
)->addr_v4map(sctp_sk(sk
),
4382 addrlen
= sctp_get_af_specific(temp
.sa
.sa_family
)->sockaddr_len
;
4383 if (space_left
< addrlen
) {
4387 memcpy(to
, &temp
, addrlen
);
4391 space_left
-= addrlen
;
4392 *bytes_copied
+= addrlen
;
4399 /* Old API for getting list of local addresses. Does not work for 32-bit
4400 * programs running on a 64-bit kernel
4402 static int sctp_getsockopt_local_addrs_old(struct sock
*sk
, int len
,
4403 char __user
*optval
, int __user
*optlen
)
4405 struct sctp_bind_addr
*bp
;
4406 struct sctp_association
*asoc
;
4408 struct sctp_getaddrs_old getaddrs
;
4409 struct sctp_sockaddr_entry
*addr
;
4411 union sctp_addr temp
;
4412 struct sctp_sock
*sp
= sctp_sk(sk
);
4417 int bytes_copied
= 0;
4419 if (len
< sizeof(struct sctp_getaddrs_old
))
4422 len
= sizeof(struct sctp_getaddrs_old
);
4423 if (copy_from_user(&getaddrs
, optval
, len
))
4426 if (getaddrs
.addr_num
<= 0 ||
4427 getaddrs
.addr_num
>= (INT_MAX
/ sizeof(union sctp_addr
)))
4430 * For UDP-style sockets, id specifies the association to query.
4431 * If the id field is set to the value '0' then the locally bound
4432 * addresses are returned without regard to any particular
4435 if (0 == getaddrs
.assoc_id
) {
4436 bp
= &sctp_sk(sk
)->ep
->base
.bind_addr
;
4438 asoc
= sctp_id2assoc(sk
, getaddrs
.assoc_id
);
4441 bp
= &asoc
->base
.bind_addr
;
4444 to
= getaddrs
.addrs
;
4446 /* Allocate space for a local instance of packed array to hold all
4447 * the data. We store addresses here first and then put write them
4448 * to the user in one shot.
4450 addrs
= kmalloc(sizeof(union sctp_addr
) * getaddrs
.addr_num
,
4455 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
4456 * addresses from the global local address list.
4458 if (sctp_list_single_entry(&bp
->address_list
)) {
4459 addr
= list_entry(bp
->address_list
.next
,
4460 struct sctp_sockaddr_entry
, list
);
4461 if (sctp_is_any(&addr
->a
)) {
4462 cnt
= sctp_copy_laddrs_old(sk
, bp
->port
,
4464 addrs
, &bytes_copied
);
4470 /* Protection on the bound address list is not needed since
4471 * in the socket option context we hold a socket lock and
4472 * thus the bound address list can't change.
4474 list_for_each_entry(addr
, &bp
->address_list
, list
) {
4475 memcpy(&temp
, &addr
->a
, sizeof(temp
));
4476 sctp_get_pf_specific(sk
->sk_family
)->addr_v4map(sp
, &temp
);
4477 addrlen
= sctp_get_af_specific(temp
.sa
.sa_family
)->sockaddr_len
;
4478 memcpy(buf
, &temp
, addrlen
);
4480 bytes_copied
+= addrlen
;
4482 if (cnt
>= getaddrs
.addr_num
) break;
4486 /* copy the entire address list into the user provided space */
4487 if (copy_to_user(to
, addrs
, bytes_copied
)) {
4492 /* copy the leading structure back to user */
4493 getaddrs
.addr_num
= cnt
;
4494 if (copy_to_user(optval
, &getaddrs
, len
))
4502 static int sctp_getsockopt_local_addrs(struct sock
*sk
, int len
,
4503 char __user
*optval
, int __user
*optlen
)
4505 struct sctp_bind_addr
*bp
;
4506 struct sctp_association
*asoc
;
4508 struct sctp_getaddrs getaddrs
;
4509 struct sctp_sockaddr_entry
*addr
;
4511 union sctp_addr temp
;
4512 struct sctp_sock
*sp
= sctp_sk(sk
);
4516 int bytes_copied
= 0;
4520 if (len
< sizeof(struct sctp_getaddrs
))
4523 if (copy_from_user(&getaddrs
, optval
, sizeof(struct sctp_getaddrs
)))
4527 * For UDP-style sockets, id specifies the association to query.
4528 * If the id field is set to the value '0' then the locally bound
4529 * addresses are returned without regard to any particular
4532 if (0 == getaddrs
.assoc_id
) {
4533 bp
= &sctp_sk(sk
)->ep
->base
.bind_addr
;
4535 asoc
= sctp_id2assoc(sk
, getaddrs
.assoc_id
);
4538 bp
= &asoc
->base
.bind_addr
;
4541 to
= optval
+ offsetof(struct sctp_getaddrs
,addrs
);
4542 space_left
= len
- offsetof(struct sctp_getaddrs
,addrs
);
4544 addrs
= kmalloc(space_left
, GFP_KERNEL
);
4548 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
4549 * addresses from the global local address list.
4551 if (sctp_list_single_entry(&bp
->address_list
)) {
4552 addr
= list_entry(bp
->address_list
.next
,
4553 struct sctp_sockaddr_entry
, list
);
4554 if (sctp_is_any(&addr
->a
)) {
4555 cnt
= sctp_copy_laddrs(sk
, bp
->port
, addrs
,
4556 space_left
, &bytes_copied
);
4566 /* Protection on the bound address list is not needed since
4567 * in the socket option context we hold a socket lock and
4568 * thus the bound address list can't change.
4570 list_for_each_entry(addr
, &bp
->address_list
, list
) {
4571 memcpy(&temp
, &addr
->a
, sizeof(temp
));
4572 sctp_get_pf_specific(sk
->sk_family
)->addr_v4map(sp
, &temp
);
4573 addrlen
= sctp_get_af_specific(temp
.sa
.sa_family
)->sockaddr_len
;
4574 if (space_left
< addrlen
) {
4575 err
= -ENOMEM
; /*fixme: right error?*/
4578 memcpy(buf
, &temp
, addrlen
);
4580 bytes_copied
+= addrlen
;
4582 space_left
-= addrlen
;
4586 if (copy_to_user(to
, addrs
, bytes_copied
)) {
4590 if (put_user(cnt
, &((struct sctp_getaddrs __user
*)optval
)->addr_num
)) {
4594 if (put_user(bytes_copied
, optlen
))
4601 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
4603 * Requests that the local SCTP stack use the enclosed peer address as
4604 * the association primary. The enclosed address must be one of the
4605 * association peer's addresses.
4607 static int sctp_getsockopt_primary_addr(struct sock
*sk
, int len
,
4608 char __user
*optval
, int __user
*optlen
)
4610 struct sctp_prim prim
;
4611 struct sctp_association
*asoc
;
4612 struct sctp_sock
*sp
= sctp_sk(sk
);
4614 if (len
< sizeof(struct sctp_prim
))
4617 len
= sizeof(struct sctp_prim
);
4619 if (copy_from_user(&prim
, optval
, len
))
4622 asoc
= sctp_id2assoc(sk
, prim
.ssp_assoc_id
);
4626 if (!asoc
->peer
.primary_path
)
4629 memcpy(&prim
.ssp_addr
, &asoc
->peer
.primary_path
->ipaddr
,
4630 asoc
->peer
.primary_path
->af_specific
->sockaddr_len
);
4632 sctp_get_pf_specific(sk
->sk_family
)->addr_v4map(sp
,
4633 (union sctp_addr
*)&prim
.ssp_addr
);
4635 if (put_user(len
, optlen
))
4637 if (copy_to_user(optval
, &prim
, len
))
4644 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
4646 * Requests that the local endpoint set the specified Adaptation Layer
4647 * Indication parameter for all future INIT and INIT-ACK exchanges.
4649 static int sctp_getsockopt_adaptation_layer(struct sock
*sk
, int len
,
4650 char __user
*optval
, int __user
*optlen
)
4652 struct sctp_setadaptation adaptation
;
4654 if (len
< sizeof(struct sctp_setadaptation
))
4657 len
= sizeof(struct sctp_setadaptation
);
4659 adaptation
.ssb_adaptation_ind
= sctp_sk(sk
)->adaptation_ind
;
4661 if (put_user(len
, optlen
))
4663 if (copy_to_user(optval
, &adaptation
, len
))
4671 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
4673 * Applications that wish to use the sendto() system call may wish to
4674 * specify a default set of parameters that would normally be supplied
4675 * through the inclusion of ancillary data. This socket option allows
4676 * such an application to set the default sctp_sndrcvinfo structure.
4679 * The application that wishes to use this socket option simply passes
4680 * in to this call the sctp_sndrcvinfo structure defined in Section
4681 * 5.2.2) The input parameters accepted by this call include
4682 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
4683 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
4684 * to this call if the caller is using the UDP model.
4686 * For getsockopt, it get the default sctp_sndrcvinfo structure.
4688 static int sctp_getsockopt_default_send_param(struct sock
*sk
,
4689 int len
, char __user
*optval
,
4692 struct sctp_sndrcvinfo info
;
4693 struct sctp_association
*asoc
;
4694 struct sctp_sock
*sp
= sctp_sk(sk
);
4696 if (len
< sizeof(struct sctp_sndrcvinfo
))
4699 len
= sizeof(struct sctp_sndrcvinfo
);
4701 if (copy_from_user(&info
, optval
, len
))
4704 asoc
= sctp_id2assoc(sk
, info
.sinfo_assoc_id
);
4705 if (!asoc
&& info
.sinfo_assoc_id
&& sctp_style(sk
, UDP
))
4709 info
.sinfo_stream
= asoc
->default_stream
;
4710 info
.sinfo_flags
= asoc
->default_flags
;
4711 info
.sinfo_ppid
= asoc
->default_ppid
;
4712 info
.sinfo_context
= asoc
->default_context
;
4713 info
.sinfo_timetolive
= asoc
->default_timetolive
;
4715 info
.sinfo_stream
= sp
->default_stream
;
4716 info
.sinfo_flags
= sp
->default_flags
;
4717 info
.sinfo_ppid
= sp
->default_ppid
;
4718 info
.sinfo_context
= sp
->default_context
;
4719 info
.sinfo_timetolive
= sp
->default_timetolive
;
4722 if (put_user(len
, optlen
))
4724 if (copy_to_user(optval
, &info
, len
))
4732 * 7.1.5 SCTP_NODELAY
4734 * Turn on/off any Nagle-like algorithm. This means that packets are
4735 * generally sent as soon as possible and no unnecessary delays are
4736 * introduced, at the cost of more packets in the network. Expects an
4737 * integer boolean flag.
4740 static int sctp_getsockopt_nodelay(struct sock
*sk
, int len
,
4741 char __user
*optval
, int __user
*optlen
)
4745 if (len
< sizeof(int))
4749 val
= (sctp_sk(sk
)->nodelay
== 1);
4750 if (put_user(len
, optlen
))
4752 if (copy_to_user(optval
, &val
, len
))
4759 * 7.1.1 SCTP_RTOINFO
4761 * The protocol parameters used to initialize and bound retransmission
4762 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
4763 * and modify these parameters.
4764 * All parameters are time values, in milliseconds. A value of 0, when
4765 * modifying the parameters, indicates that the current value should not
4769 static int sctp_getsockopt_rtoinfo(struct sock
*sk
, int len
,
4770 char __user
*optval
,
4771 int __user
*optlen
) {
4772 struct sctp_rtoinfo rtoinfo
;
4773 struct sctp_association
*asoc
;
4775 if (len
< sizeof (struct sctp_rtoinfo
))
4778 len
= sizeof(struct sctp_rtoinfo
);
4780 if (copy_from_user(&rtoinfo
, optval
, len
))
4783 asoc
= sctp_id2assoc(sk
, rtoinfo
.srto_assoc_id
);
4785 if (!asoc
&& rtoinfo
.srto_assoc_id
&& sctp_style(sk
, UDP
))
4788 /* Values corresponding to the specific association. */
4790 rtoinfo
.srto_initial
= jiffies_to_msecs(asoc
->rto_initial
);
4791 rtoinfo
.srto_max
= jiffies_to_msecs(asoc
->rto_max
);
4792 rtoinfo
.srto_min
= jiffies_to_msecs(asoc
->rto_min
);
4794 /* Values corresponding to the endpoint. */
4795 struct sctp_sock
*sp
= sctp_sk(sk
);
4797 rtoinfo
.srto_initial
= sp
->rtoinfo
.srto_initial
;
4798 rtoinfo
.srto_max
= sp
->rtoinfo
.srto_max
;
4799 rtoinfo
.srto_min
= sp
->rtoinfo
.srto_min
;
4802 if (put_user(len
, optlen
))
4805 if (copy_to_user(optval
, &rtoinfo
, len
))
4813 * 7.1.2 SCTP_ASSOCINFO
4815 * This option is used to tune the maximum retransmission attempts
4816 * of the association.
4817 * Returns an error if the new association retransmission value is
4818 * greater than the sum of the retransmission value of the peer.
4819 * See [SCTP] for more information.
4822 static int sctp_getsockopt_associnfo(struct sock
*sk
, int len
,
4823 char __user
*optval
,
4827 struct sctp_assocparams assocparams
;
4828 struct sctp_association
*asoc
;
4829 struct list_head
*pos
;
4832 if (len
< sizeof (struct sctp_assocparams
))
4835 len
= sizeof(struct sctp_assocparams
);
4837 if (copy_from_user(&assocparams
, optval
, len
))
4840 asoc
= sctp_id2assoc(sk
, assocparams
.sasoc_assoc_id
);
4842 if (!asoc
&& assocparams
.sasoc_assoc_id
&& sctp_style(sk
, UDP
))
4845 /* Values correspoinding to the specific association */
4847 assocparams
.sasoc_asocmaxrxt
= asoc
->max_retrans
;
4848 assocparams
.sasoc_peer_rwnd
= asoc
->peer
.rwnd
;
4849 assocparams
.sasoc_local_rwnd
= asoc
->a_rwnd
;
4850 assocparams
.sasoc_cookie_life
= (asoc
->cookie_life
.tv_sec
4852 (asoc
->cookie_life
.tv_usec
4855 list_for_each(pos
, &asoc
->peer
.transport_addr_list
) {
4859 assocparams
.sasoc_number_peer_destinations
= cnt
;
4861 /* Values corresponding to the endpoint */
4862 struct sctp_sock
*sp
= sctp_sk(sk
);
4864 assocparams
.sasoc_asocmaxrxt
= sp
->assocparams
.sasoc_asocmaxrxt
;
4865 assocparams
.sasoc_peer_rwnd
= sp
->assocparams
.sasoc_peer_rwnd
;
4866 assocparams
.sasoc_local_rwnd
= sp
->assocparams
.sasoc_local_rwnd
;
4867 assocparams
.sasoc_cookie_life
=
4868 sp
->assocparams
.sasoc_cookie_life
;
4869 assocparams
.sasoc_number_peer_destinations
=
4871 sasoc_number_peer_destinations
;
4874 if (put_user(len
, optlen
))
4877 if (copy_to_user(optval
, &assocparams
, len
))
4884 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
4886 * This socket option is a boolean flag which turns on or off mapped V4
4887 * addresses. If this option is turned on and the socket is type
4888 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
4889 * If this option is turned off, then no mapping will be done of V4
4890 * addresses and a user will receive both PF_INET6 and PF_INET type
4891 * addresses on the socket.
4893 static int sctp_getsockopt_mappedv4(struct sock
*sk
, int len
,
4894 char __user
*optval
, int __user
*optlen
)
4897 struct sctp_sock
*sp
= sctp_sk(sk
);
4899 if (len
< sizeof(int))
4904 if (put_user(len
, optlen
))
4906 if (copy_to_user(optval
, &val
, len
))
4913 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
4914 * (chapter and verse is quoted at sctp_setsockopt_context())
4916 static int sctp_getsockopt_context(struct sock
*sk
, int len
,
4917 char __user
*optval
, int __user
*optlen
)
4919 struct sctp_assoc_value params
;
4920 struct sctp_sock
*sp
;
4921 struct sctp_association
*asoc
;
4923 if (len
< sizeof(struct sctp_assoc_value
))
4926 len
= sizeof(struct sctp_assoc_value
);
4928 if (copy_from_user(¶ms
, optval
, len
))
4933 if (params
.assoc_id
!= 0) {
4934 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
4937 params
.assoc_value
= asoc
->default_rcv_context
;
4939 params
.assoc_value
= sp
->default_rcv_context
;
4942 if (put_user(len
, optlen
))
4944 if (copy_to_user(optval
, ¶ms
, len
))
4951 * 7.1.17 Set the maximum fragrmentation size (SCTP_MAXSEG)
4953 * This socket option specifies the maximum size to put in any outgoing
4954 * SCTP chunk. If a message is larger than this size it will be
4955 * fragmented by SCTP into the specified size. Note that the underlying
4956 * SCTP implementation may fragment into smaller sized chunks when the
4957 * PMTU of the underlying association is smaller than the value set by
4960 static int sctp_getsockopt_maxseg(struct sock
*sk
, int len
,
4961 char __user
*optval
, int __user
*optlen
)
4965 if (len
< sizeof(int))
4970 val
= sctp_sk(sk
)->user_frag
;
4971 if (put_user(len
, optlen
))
4973 if (copy_to_user(optval
, &val
, len
))
4980 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
4981 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
4983 static int sctp_getsockopt_fragment_interleave(struct sock
*sk
, int len
,
4984 char __user
*optval
, int __user
*optlen
)
4988 if (len
< sizeof(int))
4993 val
= sctp_sk(sk
)->frag_interleave
;
4994 if (put_user(len
, optlen
))
4996 if (copy_to_user(optval
, &val
, len
))
5003 * 7.1.25. Set or Get the sctp partial delivery point
5004 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
5006 static int sctp_getsockopt_partial_delivery_point(struct sock
*sk
, int len
,
5007 char __user
*optval
,
5012 if (len
< sizeof(u32
))
5017 val
= sctp_sk(sk
)->pd_point
;
5018 if (put_user(len
, optlen
))
5020 if (copy_to_user(optval
, &val
, len
))
5027 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
5028 * (chapter and verse is quoted at sctp_setsockopt_maxburst())
5030 static int sctp_getsockopt_maxburst(struct sock
*sk
, int len
,
5031 char __user
*optval
,
5034 struct sctp_assoc_value params
;
5035 struct sctp_sock
*sp
;
5036 struct sctp_association
*asoc
;
5038 if (len
< sizeof(int))
5041 if (len
== sizeof(int)) {
5043 "SCTP: Use of int in max_burst socket option deprecated\n");
5045 "SCTP: Use struct sctp_assoc_value instead\n");
5046 params
.assoc_id
= 0;
5047 } else if (len
== sizeof (struct sctp_assoc_value
)) {
5048 if (copy_from_user(¶ms
, optval
, len
))
5055 if (params
.assoc_id
!= 0) {
5056 asoc
= sctp_id2assoc(sk
, params
.assoc_id
);
5059 params
.assoc_value
= asoc
->max_burst
;
5061 params
.assoc_value
= sp
->max_burst
;
5063 if (len
== sizeof(int)) {
5064 if (copy_to_user(optval
, ¶ms
.assoc_value
, len
))
5067 if (copy_to_user(optval
, ¶ms
, len
))
5075 static int sctp_getsockopt_hmac_ident(struct sock
*sk
, int len
,
5076 char __user
*optval
, int __user
*optlen
)
5078 struct sctp_hmacalgo __user
*p
= (void __user
*)optval
;
5079 struct sctp_hmac_algo_param
*hmacs
;
5083 if (!sctp_auth_enable
)
5086 hmacs
= sctp_sk(sk
)->ep
->auth_hmacs_list
;
5087 data_len
= ntohs(hmacs
->param_hdr
.length
) - sizeof(sctp_paramhdr_t
);
5089 if (len
< sizeof(struct sctp_hmacalgo
) + data_len
)
5092 len
= sizeof(struct sctp_hmacalgo
) + data_len
;
5093 num_idents
= data_len
/ sizeof(u16
);
5095 if (put_user(len
, optlen
))
5097 if (put_user(num_idents
, &p
->shmac_num_idents
))
5099 if (copy_to_user(p
->shmac_idents
, hmacs
->hmac_ids
, data_len
))
5104 static int sctp_getsockopt_active_key(struct sock
*sk
, int len
,
5105 char __user
*optval
, int __user
*optlen
)
5107 struct sctp_authkeyid val
;
5108 struct sctp_association
*asoc
;
5110 if (!sctp_auth_enable
)
5113 if (len
< sizeof(struct sctp_authkeyid
))
5115 if (copy_from_user(&val
, optval
, sizeof(struct sctp_authkeyid
)))
5118 asoc
= sctp_id2assoc(sk
, val
.scact_assoc_id
);
5119 if (!asoc
&& val
.scact_assoc_id
&& sctp_style(sk
, UDP
))
5123 val
.scact_keynumber
= asoc
->active_key_id
;
5125 val
.scact_keynumber
= sctp_sk(sk
)->ep
->active_key_id
;
5127 len
= sizeof(struct sctp_authkeyid
);
5128 if (put_user(len
, optlen
))
5130 if (copy_to_user(optval
, &val
, len
))
5136 static int sctp_getsockopt_peer_auth_chunks(struct sock
*sk
, int len
,
5137 char __user
*optval
, int __user
*optlen
)
5139 struct sctp_authchunks __user
*p
= (void __user
*)optval
;
5140 struct sctp_authchunks val
;
5141 struct sctp_association
*asoc
;
5142 struct sctp_chunks_param
*ch
;
5146 if (!sctp_auth_enable
)
5149 if (len
< sizeof(struct sctp_authchunks
))
5152 if (copy_from_user(&val
, optval
, sizeof(struct sctp_authchunks
)))
5155 to
= p
->gauth_chunks
;
5156 asoc
= sctp_id2assoc(sk
, val
.gauth_assoc_id
);
5160 ch
= asoc
->peer
.peer_chunks
;
5164 /* See if the user provided enough room for all the data */
5165 num_chunks
= ntohs(ch
->param_hdr
.length
) - sizeof(sctp_paramhdr_t
);
5166 if (len
< num_chunks
)
5169 if (copy_to_user(to
, ch
->chunks
, num_chunks
))
5172 len
= sizeof(struct sctp_authchunks
) + num_chunks
;
5173 if (put_user(len
, optlen
)) return -EFAULT
;
5174 if (put_user(num_chunks
, &p
->gauth_number_of_chunks
))
5179 static int sctp_getsockopt_local_auth_chunks(struct sock
*sk
, int len
,
5180 char __user
*optval
, int __user
*optlen
)
5182 struct sctp_authchunks __user
*p
= (void __user
*)optval
;
5183 struct sctp_authchunks val
;
5184 struct sctp_association
*asoc
;
5185 struct sctp_chunks_param
*ch
;
5189 if (!sctp_auth_enable
)
5192 if (len
< sizeof(struct sctp_authchunks
))
5195 if (copy_from_user(&val
, optval
, sizeof(struct sctp_authchunks
)))
5198 to
= p
->gauth_chunks
;
5199 asoc
= sctp_id2assoc(sk
, val
.gauth_assoc_id
);
5200 if (!asoc
&& val
.gauth_assoc_id
&& sctp_style(sk
, UDP
))
5204 ch
= (struct sctp_chunks_param
*)asoc
->c
.auth_chunks
;
5206 ch
= sctp_sk(sk
)->ep
->auth_chunk_list
;
5211 num_chunks
= ntohs(ch
->param_hdr
.length
) - sizeof(sctp_paramhdr_t
);
5212 if (len
< sizeof(struct sctp_authchunks
) + num_chunks
)
5215 if (copy_to_user(to
, ch
->chunks
, num_chunks
))
5218 len
= sizeof(struct sctp_authchunks
) + num_chunks
;
5219 if (put_user(len
, optlen
))
5221 if (put_user(num_chunks
, &p
->gauth_number_of_chunks
))
5227 SCTP_STATIC
int sctp_getsockopt(struct sock
*sk
, int level
, int optname
,
5228 char __user
*optval
, int __user
*optlen
)
5233 SCTP_DEBUG_PRINTK("sctp_getsockopt(sk: %p... optname: %d)\n",
5236 /* I can hardly begin to describe how wrong this is. This is
5237 * so broken as to be worse than useless. The API draft
5238 * REALLY is NOT helpful here... I am not convinced that the
5239 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP
5240 * are at all well-founded.
5242 if (level
!= SOL_SCTP
) {
5243 struct sctp_af
*af
= sctp_sk(sk
)->pf
->af
;
5245 retval
= af
->getsockopt(sk
, level
, optname
, optval
, optlen
);
5249 if (get_user(len
, optlen
))
5256 retval
= sctp_getsockopt_sctp_status(sk
, len
, optval
, optlen
);
5258 case SCTP_DISABLE_FRAGMENTS
:
5259 retval
= sctp_getsockopt_disable_fragments(sk
, len
, optval
,
5263 retval
= sctp_getsockopt_events(sk
, len
, optval
, optlen
);
5265 case SCTP_AUTOCLOSE
:
5266 retval
= sctp_getsockopt_autoclose(sk
, len
, optval
, optlen
);
5268 case SCTP_SOCKOPT_PEELOFF
:
5269 retval
= sctp_getsockopt_peeloff(sk
, len
, optval
, optlen
);
5271 case SCTP_PEER_ADDR_PARAMS
:
5272 retval
= sctp_getsockopt_peer_addr_params(sk
, len
, optval
,
5275 case SCTP_DELAYED_ACK_TIME
:
5276 retval
= sctp_getsockopt_delayed_ack_time(sk
, len
, optval
,
5280 retval
= sctp_getsockopt_initmsg(sk
, len
, optval
, optlen
);
5282 case SCTP_GET_PEER_ADDRS_NUM_OLD
:
5283 retval
= sctp_getsockopt_peer_addrs_num_old(sk
, len
, optval
,
5286 case SCTP_GET_LOCAL_ADDRS_NUM_OLD
:
5287 retval
= sctp_getsockopt_local_addrs_num_old(sk
, len
, optval
,
5290 case SCTP_GET_PEER_ADDRS_OLD
:
5291 retval
= sctp_getsockopt_peer_addrs_old(sk
, len
, optval
,
5294 case SCTP_GET_LOCAL_ADDRS_OLD
:
5295 retval
= sctp_getsockopt_local_addrs_old(sk
, len
, optval
,
5298 case SCTP_GET_PEER_ADDRS
:
5299 retval
= sctp_getsockopt_peer_addrs(sk
, len
, optval
,
5302 case SCTP_GET_LOCAL_ADDRS
:
5303 retval
= sctp_getsockopt_local_addrs(sk
, len
, optval
,
5306 case SCTP_DEFAULT_SEND_PARAM
:
5307 retval
= sctp_getsockopt_default_send_param(sk
, len
,
5310 case SCTP_PRIMARY_ADDR
:
5311 retval
= sctp_getsockopt_primary_addr(sk
, len
, optval
, optlen
);
5314 retval
= sctp_getsockopt_nodelay(sk
, len
, optval
, optlen
);
5317 retval
= sctp_getsockopt_rtoinfo(sk
, len
, optval
, optlen
);
5319 case SCTP_ASSOCINFO
:
5320 retval
= sctp_getsockopt_associnfo(sk
, len
, optval
, optlen
);
5322 case SCTP_I_WANT_MAPPED_V4_ADDR
:
5323 retval
= sctp_getsockopt_mappedv4(sk
, len
, optval
, optlen
);
5326 retval
= sctp_getsockopt_maxseg(sk
, len
, optval
, optlen
);
5328 case SCTP_GET_PEER_ADDR_INFO
:
5329 retval
= sctp_getsockopt_peer_addr_info(sk
, len
, optval
,
5332 case SCTP_ADAPTATION_LAYER
:
5333 retval
= sctp_getsockopt_adaptation_layer(sk
, len
, optval
,
5337 retval
= sctp_getsockopt_context(sk
, len
, optval
, optlen
);
5339 case SCTP_FRAGMENT_INTERLEAVE
:
5340 retval
= sctp_getsockopt_fragment_interleave(sk
, len
, optval
,
5343 case SCTP_PARTIAL_DELIVERY_POINT
:
5344 retval
= sctp_getsockopt_partial_delivery_point(sk
, len
, optval
,
5347 case SCTP_MAX_BURST
:
5348 retval
= sctp_getsockopt_maxburst(sk
, len
, optval
, optlen
);
5351 case SCTP_AUTH_CHUNK
:
5352 case SCTP_AUTH_DELETE_KEY
:
5353 retval
= -EOPNOTSUPP
;
5355 case SCTP_HMAC_IDENT
:
5356 retval
= sctp_getsockopt_hmac_ident(sk
, len
, optval
, optlen
);
5358 case SCTP_AUTH_ACTIVE_KEY
:
5359 retval
= sctp_getsockopt_active_key(sk
, len
, optval
, optlen
);
5361 case SCTP_PEER_AUTH_CHUNKS
:
5362 retval
= sctp_getsockopt_peer_auth_chunks(sk
, len
, optval
,
5365 case SCTP_LOCAL_AUTH_CHUNKS
:
5366 retval
= sctp_getsockopt_local_auth_chunks(sk
, len
, optval
,
5370 retval
= -ENOPROTOOPT
;
5374 sctp_release_sock(sk
);
5378 static void sctp_hash(struct sock
*sk
)
5383 static void sctp_unhash(struct sock
*sk
)
5388 /* Check if port is acceptable. Possibly find first available port.
5390 * The port hash table (contained in the 'global' SCTP protocol storage
5391 * returned by struct sctp_protocol *sctp_get_protocol()). The hash
5392 * table is an array of 4096 lists (sctp_bind_hashbucket). Each
5393 * list (the list number is the port number hashed out, so as you
5394 * would expect from a hash function, all the ports in a given list have
5395 * such a number that hashes out to the same list number; you were
5396 * expecting that, right?); so each list has a set of ports, with a
5397 * link to the socket (struct sock) that uses it, the port number and
5398 * a fastreuse flag (FIXME: NPI ipg).
5400 static struct sctp_bind_bucket
*sctp_bucket_create(
5401 struct sctp_bind_hashbucket
*head
, unsigned short snum
);
5403 static long sctp_get_port_local(struct sock
*sk
, union sctp_addr
*addr
)
5405 struct sctp_bind_hashbucket
*head
; /* hash list */
5406 struct sctp_bind_bucket
*pp
; /* hash list port iterator */
5407 struct hlist_node
*node
;
5408 unsigned short snum
;
5411 snum
= ntohs(addr
->v4
.sin_port
);
5413 SCTP_DEBUG_PRINTK("sctp_get_port() begins, snum=%d\n", snum
);
5414 sctp_local_bh_disable();
5417 /* Search for an available port. */
5418 int low
, high
, remaining
, index
;
5421 inet_get_local_port_range(&low
, &high
);
5422 remaining
= (high
- low
) + 1;
5423 rover
= net_random() % remaining
+ low
;
5427 if ((rover
< low
) || (rover
> high
))
5429 index
= sctp_phashfn(rover
);
5430 head
= &sctp_port_hashtable
[index
];
5431 sctp_spin_lock(&head
->lock
);
5432 sctp_for_each_hentry(pp
, node
, &head
->chain
)
5433 if (pp
->port
== rover
)
5437 sctp_spin_unlock(&head
->lock
);
5438 } while (--remaining
> 0);
5440 /* Exhausted local port range during search? */
5445 /* OK, here is the one we will use. HEAD (the port
5446 * hash table list entry) is non-NULL and we hold it's
5451 /* We are given an specific port number; we verify
5452 * that it is not being used. If it is used, we will
5453 * exahust the search in the hash list corresponding
5454 * to the port number (snum) - we detect that with the
5455 * port iterator, pp being NULL.
5457 head
= &sctp_port_hashtable
[sctp_phashfn(snum
)];
5458 sctp_spin_lock(&head
->lock
);
5459 sctp_for_each_hentry(pp
, node
, &head
->chain
) {
5460 if (pp
->port
== snum
)
5467 if (!hlist_empty(&pp
->owner
)) {
5468 /* We had a port hash table hit - there is an
5469 * available port (pp != NULL) and it is being
5470 * used by other socket (pp->owner not empty); that other
5471 * socket is going to be sk2.
5473 int reuse
= sk
->sk_reuse
;
5475 struct hlist_node
*node
;
5477 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
5478 if (pp
->fastreuse
&& sk
->sk_reuse
&&
5479 sk
->sk_state
!= SCTP_SS_LISTENING
)
5482 /* Run through the list of sockets bound to the port
5483 * (pp->port) [via the pointers bind_next and
5484 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
5485 * we get the endpoint they describe and run through
5486 * the endpoint's list of IP (v4 or v6) addresses,
5487 * comparing each of the addresses with the address of
5488 * the socket sk. If we find a match, then that means
5489 * that this port/socket (sk) combination are already
5492 sk_for_each_bound(sk2
, node
, &pp
->owner
) {
5493 struct sctp_endpoint
*ep2
;
5494 ep2
= sctp_sk(sk2
)->ep
;
5496 if (reuse
&& sk2
->sk_reuse
&&
5497 sk2
->sk_state
!= SCTP_SS_LISTENING
)
5500 if (sctp_bind_addr_match(&ep2
->base
.bind_addr
, addr
,
5506 SCTP_DEBUG_PRINTK("sctp_get_port(): Found a match\n");
5509 /* If there was a hash table miss, create a new port. */
5511 if (!pp
&& !(pp
= sctp_bucket_create(head
, snum
)))
5514 /* In either case (hit or miss), make sure fastreuse is 1 only
5515 * if sk->sk_reuse is too (that is, if the caller requested
5516 * SO_REUSEADDR on this socket -sk-).
5518 if (hlist_empty(&pp
->owner
)) {
5519 if (sk
->sk_reuse
&& sk
->sk_state
!= SCTP_SS_LISTENING
)
5523 } else if (pp
->fastreuse
&&
5524 (!sk
->sk_reuse
|| sk
->sk_state
== SCTP_SS_LISTENING
))
5527 /* We are set, so fill up all the data in the hash table
5528 * entry, tie the socket list information with the rest of the
5529 * sockets FIXME: Blurry, NPI (ipg).
5532 if (!sctp_sk(sk
)->bind_hash
) {
5533 inet_sk(sk
)->num
= snum
;
5534 sk_add_bind_node(sk
, &pp
->owner
);
5535 sctp_sk(sk
)->bind_hash
= pp
;
5540 sctp_spin_unlock(&head
->lock
);
5543 sctp_local_bh_enable();
5547 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
5548 * port is requested.
5550 static int sctp_get_port(struct sock
*sk
, unsigned short snum
)
5553 union sctp_addr addr
;
5554 struct sctp_af
*af
= sctp_sk(sk
)->pf
->af
;
5556 /* Set up a dummy address struct from the sk. */
5557 af
->from_sk(&addr
, sk
);
5558 addr
.v4
.sin_port
= htons(snum
);
5560 /* Note: sk->sk_num gets filled in if ephemeral port request. */
5561 ret
= sctp_get_port_local(sk
, &addr
);
5563 return (ret
? 1 : 0);
5567 * 3.1.3 listen() - UDP Style Syntax
5569 * By default, new associations are not accepted for UDP style sockets.
5570 * An application uses listen() to mark a socket as being able to
5571 * accept new associations.
5573 SCTP_STATIC
int sctp_seqpacket_listen(struct sock
*sk
, int backlog
)
5575 struct sctp_sock
*sp
= sctp_sk(sk
);
5576 struct sctp_endpoint
*ep
= sp
->ep
;
5578 /* Only UDP style sockets that are not peeled off are allowed to
5581 if (!sctp_style(sk
, UDP
))
5584 /* If backlog is zero, disable listening. */
5586 if (sctp_sstate(sk
, CLOSED
))
5589 sctp_unhash_endpoint(ep
);
5590 sk
->sk_state
= SCTP_SS_CLOSED
;
5594 /* Return if we are already listening. */
5595 if (sctp_sstate(sk
, LISTENING
))
5599 * If a bind() or sctp_bindx() is not called prior to a listen()
5600 * call that allows new associations to be accepted, the system
5601 * picks an ephemeral port and will choose an address set equivalent
5602 * to binding with a wildcard address.
5604 * This is not currently spelled out in the SCTP sockets
5605 * extensions draft, but follows the practice as seen in TCP
5608 * Additionally, turn off fastreuse flag since we are not listening
5610 sk
->sk_state
= SCTP_SS_LISTENING
;
5611 if (!ep
->base
.bind_addr
.port
) {
5612 if (sctp_autobind(sk
))
5615 sctp_sk(sk
)->bind_hash
->fastreuse
= 0;
5617 sctp_hash_endpoint(ep
);
5622 * 4.1.3 listen() - TCP Style Syntax
5624 * Applications uses listen() to ready the SCTP endpoint for accepting
5625 * inbound associations.
5627 SCTP_STATIC
int sctp_stream_listen(struct sock
*sk
, int backlog
)
5629 struct sctp_sock
*sp
= sctp_sk(sk
);
5630 struct sctp_endpoint
*ep
= sp
->ep
;
5632 /* If backlog is zero, disable listening. */
5634 if (sctp_sstate(sk
, CLOSED
))
5637 sctp_unhash_endpoint(ep
);
5638 sk
->sk_state
= SCTP_SS_CLOSED
;
5642 if (sctp_sstate(sk
, LISTENING
))
5646 * If a bind() or sctp_bindx() is not called prior to a listen()
5647 * call that allows new associations to be accepted, the system
5648 * picks an ephemeral port and will choose an address set equivalent
5649 * to binding with a wildcard address.
5651 * This is not currently spelled out in the SCTP sockets
5652 * extensions draft, but follows the practice as seen in TCP
5655 sk
->sk_state
= SCTP_SS_LISTENING
;
5656 if (!ep
->base
.bind_addr
.port
) {
5657 if (sctp_autobind(sk
))
5660 sctp_sk(sk
)->bind_hash
->fastreuse
= 0;
5662 sk
->sk_max_ack_backlog
= backlog
;
5663 sctp_hash_endpoint(ep
);
5668 * Move a socket to LISTENING state.
5670 int sctp_inet_listen(struct socket
*sock
, int backlog
)
5672 struct sock
*sk
= sock
->sk
;
5673 struct crypto_hash
*tfm
= NULL
;
5676 if (unlikely(backlog
< 0))
5681 if (sock
->state
!= SS_UNCONNECTED
)
5684 /* Allocate HMAC for generating cookie. */
5685 if (sctp_hmac_alg
) {
5686 tfm
= crypto_alloc_hash(sctp_hmac_alg
, 0, CRYPTO_ALG_ASYNC
);
5688 if (net_ratelimit()) {
5690 "SCTP: failed to load transform for %s: %ld\n",
5691 sctp_hmac_alg
, PTR_ERR(tfm
));
5698 switch (sock
->type
) {
5699 case SOCK_SEQPACKET
:
5700 err
= sctp_seqpacket_listen(sk
, backlog
);
5703 err
= sctp_stream_listen(sk
, backlog
);
5712 /* Store away the transform reference. */
5713 sctp_sk(sk
)->hmac
= tfm
;
5715 sctp_release_sock(sk
);
5718 crypto_free_hash(tfm
);
5723 * This function is done by modeling the current datagram_poll() and the
5724 * tcp_poll(). Note that, based on these implementations, we don't
5725 * lock the socket in this function, even though it seems that,
5726 * ideally, locking or some other mechanisms can be used to ensure
5727 * the integrity of the counters (sndbuf and wmem_alloc) used
5728 * in this place. We assume that we don't need locks either until proven
5731 * Another thing to note is that we include the Async I/O support
5732 * here, again, by modeling the current TCP/UDP code. We don't have
5733 * a good way to test with it yet.
5735 unsigned int sctp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
5737 struct sock
*sk
= sock
->sk
;
5738 struct sctp_sock
*sp
= sctp_sk(sk
);
5741 poll_wait(file
, sk
->sk_sleep
, wait
);
5743 /* A TCP-style listening socket becomes readable when the accept queue
5746 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))
5747 return (!list_empty(&sp
->ep
->asocs
)) ?
5748 (POLLIN
| POLLRDNORM
) : 0;
5752 /* Is there any exceptional events? */
5753 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
5755 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
5757 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
5760 /* Is it readable? Reconsider this code with TCP-style support. */
5761 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
5762 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
5763 mask
|= POLLIN
| POLLRDNORM
;
5765 /* The association is either gone or not ready. */
5766 if (!sctp_style(sk
, UDP
) && sctp_sstate(sk
, CLOSED
))
5769 /* Is it writable? */
5770 if (sctp_writeable(sk
)) {
5771 mask
|= POLLOUT
| POLLWRNORM
;
5773 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
5775 * Since the socket is not locked, the buffer
5776 * might be made available after the writeable check and
5777 * before the bit is set. This could cause a lost I/O
5778 * signal. tcp_poll() has a race breaker for this race
5779 * condition. Based on their implementation, we put
5780 * in the following code to cover it as well.
5782 if (sctp_writeable(sk
))
5783 mask
|= POLLOUT
| POLLWRNORM
;
5788 /********************************************************************
5789 * 2nd Level Abstractions
5790 ********************************************************************/
5792 static struct sctp_bind_bucket
*sctp_bucket_create(
5793 struct sctp_bind_hashbucket
*head
, unsigned short snum
)
5795 struct sctp_bind_bucket
*pp
;
5797 pp
= kmem_cache_alloc(sctp_bucket_cachep
, GFP_ATOMIC
);
5799 SCTP_DBG_OBJCNT_INC(bind_bucket
);
5802 INIT_HLIST_HEAD(&pp
->owner
);
5803 hlist_add_head(&pp
->node
, &head
->chain
);
5808 /* Caller must hold hashbucket lock for this tb with local BH disabled */
5809 static void sctp_bucket_destroy(struct sctp_bind_bucket
*pp
)
5811 if (pp
&& hlist_empty(&pp
->owner
)) {
5812 __hlist_del(&pp
->node
);
5813 kmem_cache_free(sctp_bucket_cachep
, pp
);
5814 SCTP_DBG_OBJCNT_DEC(bind_bucket
);
5818 /* Release this socket's reference to a local port. */
5819 static inline void __sctp_put_port(struct sock
*sk
)
5821 struct sctp_bind_hashbucket
*head
=
5822 &sctp_port_hashtable
[sctp_phashfn(inet_sk(sk
)->num
)];
5823 struct sctp_bind_bucket
*pp
;
5825 sctp_spin_lock(&head
->lock
);
5826 pp
= sctp_sk(sk
)->bind_hash
;
5827 __sk_del_bind_node(sk
);
5828 sctp_sk(sk
)->bind_hash
= NULL
;
5829 inet_sk(sk
)->num
= 0;
5830 sctp_bucket_destroy(pp
);
5831 sctp_spin_unlock(&head
->lock
);
5834 void sctp_put_port(struct sock
*sk
)
5836 sctp_local_bh_disable();
5837 __sctp_put_port(sk
);
5838 sctp_local_bh_enable();
5842 * The system picks an ephemeral port and choose an address set equivalent
5843 * to binding with a wildcard address.
5844 * One of those addresses will be the primary address for the association.
5845 * This automatically enables the multihoming capability of SCTP.
5847 static int sctp_autobind(struct sock
*sk
)
5849 union sctp_addr autoaddr
;
5853 /* Initialize a local sockaddr structure to INADDR_ANY. */
5854 af
= sctp_sk(sk
)->pf
->af
;
5856 port
= htons(inet_sk(sk
)->num
);
5857 af
->inaddr_any(&autoaddr
, port
);
5859 return sctp_do_bind(sk
, &autoaddr
, af
->sockaddr_len
);
5862 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
5865 * 4.2 The cmsghdr Structure *
5867 * When ancillary data is sent or received, any number of ancillary data
5868 * objects can be specified by the msg_control and msg_controllen members of
5869 * the msghdr structure, because each object is preceded by
5870 * a cmsghdr structure defining the object's length (the cmsg_len member).
5871 * Historically Berkeley-derived implementations have passed only one object
5872 * at a time, but this API allows multiple objects to be
5873 * passed in a single call to sendmsg() or recvmsg(). The following example
5874 * shows two ancillary data objects in a control buffer.
5876 * |<--------------------------- msg_controllen -------------------------->|
5879 * |<----- ancillary data object ----->|<----- ancillary data object ----->|
5881 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
5884 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
5886 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
5889 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
5890 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
5892 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
5894 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
5901 SCTP_STATIC
int sctp_msghdr_parse(const struct msghdr
*msg
,
5902 sctp_cmsgs_t
*cmsgs
)
5904 struct cmsghdr
*cmsg
;
5905 struct msghdr
*my_msg
= (struct msghdr
*)msg
;
5907 for (cmsg
= CMSG_FIRSTHDR(msg
);
5909 cmsg
= CMSG_NXTHDR(my_msg
, cmsg
)) {
5910 if (!CMSG_OK(my_msg
, cmsg
))
5913 /* Should we parse this header or ignore? */
5914 if (cmsg
->cmsg_level
!= IPPROTO_SCTP
)
5917 /* Strictly check lengths following example in SCM code. */
5918 switch (cmsg
->cmsg_type
) {
5920 /* SCTP Socket API Extension
5921 * 5.2.1 SCTP Initiation Structure (SCTP_INIT)
5923 * This cmsghdr structure provides information for
5924 * initializing new SCTP associations with sendmsg().
5925 * The SCTP_INITMSG socket option uses this same data
5926 * structure. This structure is not used for
5929 * cmsg_level cmsg_type cmsg_data[]
5930 * ------------ ------------ ----------------------
5931 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
5933 if (cmsg
->cmsg_len
!=
5934 CMSG_LEN(sizeof(struct sctp_initmsg
)))
5936 cmsgs
->init
= (struct sctp_initmsg
*)CMSG_DATA(cmsg
);
5940 /* SCTP Socket API Extension
5941 * 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV)
5943 * This cmsghdr structure specifies SCTP options for
5944 * sendmsg() and describes SCTP header information
5945 * about a received message through recvmsg().
5947 * cmsg_level cmsg_type cmsg_data[]
5948 * ------------ ------------ ----------------------
5949 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
5951 if (cmsg
->cmsg_len
!=
5952 CMSG_LEN(sizeof(struct sctp_sndrcvinfo
)))
5956 (struct sctp_sndrcvinfo
*)CMSG_DATA(cmsg
);
5958 /* Minimally, validate the sinfo_flags. */
5959 if (cmsgs
->info
->sinfo_flags
&
5960 ~(SCTP_UNORDERED
| SCTP_ADDR_OVER
|
5961 SCTP_ABORT
| SCTP_EOF
))
5973 * Wait for a packet..
5974 * Note: This function is the same function as in core/datagram.c
5975 * with a few modifications to make lksctp work.
5977 static int sctp_wait_for_packet(struct sock
* sk
, int *err
, long *timeo_p
)
5982 prepare_to_wait_exclusive(sk
->sk_sleep
, &wait
, TASK_INTERRUPTIBLE
);
5984 /* Socket errors? */
5985 error
= sock_error(sk
);
5989 if (!skb_queue_empty(&sk
->sk_receive_queue
))
5992 /* Socket shut down? */
5993 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
5996 /* Sequenced packets can come disconnected. If so we report the
6001 /* Is there a good reason to think that we may receive some data? */
6002 if (list_empty(&sctp_sk(sk
)->ep
->asocs
) && !sctp_sstate(sk
, LISTENING
))
6005 /* Handle signals. */
6006 if (signal_pending(current
))
6009 /* Let another process have a go. Since we are going to sleep
6010 * anyway. Note: This may cause odd behaviors if the message
6011 * does not fit in the user's buffer, but this seems to be the
6012 * only way to honor MSG_DONTWAIT realistically.
6014 sctp_release_sock(sk
);
6015 *timeo_p
= schedule_timeout(*timeo_p
);
6019 finish_wait(sk
->sk_sleep
, &wait
);
6023 error
= sock_intr_errno(*timeo_p
);
6026 finish_wait(sk
->sk_sleep
, &wait
);
6031 /* Receive a datagram.
6032 * Note: This is pretty much the same routine as in core/datagram.c
6033 * with a few changes to make lksctp work.
6035 static struct sk_buff
*sctp_skb_recv_datagram(struct sock
*sk
, int flags
,
6036 int noblock
, int *err
)
6039 struct sk_buff
*skb
;
6042 timeo
= sock_rcvtimeo(sk
, noblock
);
6044 SCTP_DEBUG_PRINTK("Timeout: timeo: %ld, MAX: %ld.\n",
6045 timeo
, MAX_SCHEDULE_TIMEOUT
);
6048 /* Again only user level code calls this function,
6049 * so nothing interrupt level
6050 * will suddenly eat the receive_queue.
6052 * Look at current nfs client by the way...
6053 * However, this function was corrent in any case. 8)
6055 if (flags
& MSG_PEEK
) {
6056 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
6057 skb
= skb_peek(&sk
->sk_receive_queue
);
6059 atomic_inc(&skb
->users
);
6060 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
6062 skb
= skb_dequeue(&sk
->sk_receive_queue
);
6068 /* Caller is allowed not to check sk->sk_err before calling. */
6069 error
= sock_error(sk
);
6073 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
6076 /* User doesn't want to wait. */
6080 } while (sctp_wait_for_packet(sk
, err
, &timeo
) == 0);
6089 /* If sndbuf has changed, wake up per association sndbuf waiters. */
6090 static void __sctp_write_space(struct sctp_association
*asoc
)
6092 struct sock
*sk
= asoc
->base
.sk
;
6093 struct socket
*sock
= sk
->sk_socket
;
6095 if ((sctp_wspace(asoc
) > 0) && sock
) {
6096 if (waitqueue_active(&asoc
->wait
))
6097 wake_up_interruptible(&asoc
->wait
);
6099 if (sctp_writeable(sk
)) {
6100 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
6101 wake_up_interruptible(sk
->sk_sleep
);
6103 /* Note that we try to include the Async I/O support
6104 * here by modeling from the current TCP/UDP code.
6105 * We have not tested with it yet.
6107 if (sock
->fasync_list
&&
6108 !(sk
->sk_shutdown
& SEND_SHUTDOWN
))
6109 sock_wake_async(sock
,
6110 SOCK_WAKE_SPACE
, POLL_OUT
);
6115 /* Do accounting for the sndbuf space.
6116 * Decrement the used sndbuf space of the corresponding association by the
6117 * data size which was just transmitted(freed).
6119 static void sctp_wfree(struct sk_buff
*skb
)
6121 struct sctp_association
*asoc
;
6122 struct sctp_chunk
*chunk
;
6125 /* Get the saved chunk pointer. */
6126 chunk
= *((struct sctp_chunk
**)(skb
->cb
));
6129 asoc
->sndbuf_used
-= SCTP_DATA_SNDSIZE(chunk
) +
6130 sizeof(struct sk_buff
) +
6131 sizeof(struct sctp_chunk
);
6133 atomic_sub(sizeof(struct sctp_chunk
), &sk
->sk_wmem_alloc
);
6136 * This undoes what is done via sctp_set_owner_w and sk_mem_charge
6138 sk
->sk_wmem_queued
-= skb
->truesize
;
6139 sk_mem_uncharge(sk
, skb
->truesize
);
6142 __sctp_write_space(asoc
);
6144 sctp_association_put(asoc
);
6147 /* Do accounting for the receive space on the socket.
6148 * Accounting for the association is done in ulpevent.c
6149 * We set this as a destructor for the cloned data skbs so that
6150 * accounting is done at the correct time.
6152 void sctp_sock_rfree(struct sk_buff
*skb
)
6154 struct sock
*sk
= skb
->sk
;
6155 struct sctp_ulpevent
*event
= sctp_skb2event(skb
);
6157 atomic_sub(event
->rmem_len
, &sk
->sk_rmem_alloc
);
6160 * Mimic the behavior of sock_rfree
6162 sk_mem_uncharge(sk
, event
->rmem_len
);
6166 /* Helper function to wait for space in the sndbuf. */
6167 static int sctp_wait_for_sndbuf(struct sctp_association
*asoc
, long *timeo_p
,
6170 struct sock
*sk
= asoc
->base
.sk
;
6172 long current_timeo
= *timeo_p
;
6175 SCTP_DEBUG_PRINTK("wait_for_sndbuf: asoc=%p, timeo=%ld, msg_len=%zu\n",
6176 asoc
, (long)(*timeo_p
), msg_len
);
6178 /* Increment the association's refcnt. */
6179 sctp_association_hold(asoc
);
6181 /* Wait on the association specific sndbuf space. */
6183 prepare_to_wait_exclusive(&asoc
->wait
, &wait
,
6184 TASK_INTERRUPTIBLE
);
6187 if (sk
->sk_err
|| asoc
->state
>= SCTP_STATE_SHUTDOWN_PENDING
||
6190 if (signal_pending(current
))
6191 goto do_interrupted
;
6192 if (msg_len
<= sctp_wspace(asoc
))
6195 /* Let another process have a go. Since we are going
6198 sctp_release_sock(sk
);
6199 current_timeo
= schedule_timeout(current_timeo
);
6200 BUG_ON(sk
!= asoc
->base
.sk
);
6203 *timeo_p
= current_timeo
;
6207 finish_wait(&asoc
->wait
, &wait
);
6209 /* Release the association's refcnt. */
6210 sctp_association_put(asoc
);
6219 err
= sock_intr_errno(*timeo_p
);
6227 /* If socket sndbuf has changed, wake up all per association waiters. */
6228 void sctp_write_space(struct sock
*sk
)
6230 struct sctp_association
*asoc
;
6232 /* Wake up the tasks in each wait queue. */
6233 list_for_each_entry(asoc
, &((sctp_sk(sk
))->ep
->asocs
), asocs
) {
6234 __sctp_write_space(asoc
);
6238 /* Is there any sndbuf space available on the socket?
6240 * Note that sk_wmem_alloc is the sum of the send buffers on all of the
6241 * associations on the same socket. For a UDP-style socket with
6242 * multiple associations, it is possible for it to be "unwriteable"
6243 * prematurely. I assume that this is acceptable because
6244 * a premature "unwriteable" is better than an accidental "writeable" which
6245 * would cause an unwanted block under certain circumstances. For the 1-1
6246 * UDP-style sockets or TCP-style sockets, this code should work.
6249 static int sctp_writeable(struct sock
*sk
)
6253 amt
= sk
->sk_sndbuf
- atomic_read(&sk
->sk_wmem_alloc
);
6259 /* Wait for an association to go into ESTABLISHED state. If timeout is 0,
6260 * returns immediately with EINPROGRESS.
6262 static int sctp_wait_for_connect(struct sctp_association
*asoc
, long *timeo_p
)
6264 struct sock
*sk
= asoc
->base
.sk
;
6266 long current_timeo
= *timeo_p
;
6269 SCTP_DEBUG_PRINTK("%s: asoc=%p, timeo=%ld\n", __func__
, asoc
,
6272 /* Increment the association's refcnt. */
6273 sctp_association_hold(asoc
);
6276 prepare_to_wait_exclusive(&asoc
->wait
, &wait
,
6277 TASK_INTERRUPTIBLE
);
6280 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
6282 if (sk
->sk_err
|| asoc
->state
>= SCTP_STATE_SHUTDOWN_PENDING
||
6285 if (signal_pending(current
))
6286 goto do_interrupted
;
6288 if (sctp_state(asoc
, ESTABLISHED
))
6291 /* Let another process have a go. Since we are going
6294 sctp_release_sock(sk
);
6295 current_timeo
= schedule_timeout(current_timeo
);
6298 *timeo_p
= current_timeo
;
6302 finish_wait(&asoc
->wait
, &wait
);
6304 /* Release the association's refcnt. */
6305 sctp_association_put(asoc
);
6310 if (asoc
->init_err_counter
+ 1 > asoc
->max_init_attempts
)
6313 err
= -ECONNREFUSED
;
6317 err
= sock_intr_errno(*timeo_p
);
6325 static int sctp_wait_for_accept(struct sock
*sk
, long timeo
)
6327 struct sctp_endpoint
*ep
;
6331 ep
= sctp_sk(sk
)->ep
;
6335 prepare_to_wait_exclusive(sk
->sk_sleep
, &wait
,
6336 TASK_INTERRUPTIBLE
);
6338 if (list_empty(&ep
->asocs
)) {
6339 sctp_release_sock(sk
);
6340 timeo
= schedule_timeout(timeo
);
6345 if (!sctp_sstate(sk
, LISTENING
))
6349 if (!list_empty(&ep
->asocs
))
6352 err
= sock_intr_errno(timeo
);
6353 if (signal_pending(current
))
6361 finish_wait(sk
->sk_sleep
, &wait
);
6366 static void sctp_wait_for_close(struct sock
*sk
, long timeout
)
6371 prepare_to_wait(sk
->sk_sleep
, &wait
, TASK_INTERRUPTIBLE
);
6372 if (list_empty(&sctp_sk(sk
)->ep
->asocs
))
6374 sctp_release_sock(sk
);
6375 timeout
= schedule_timeout(timeout
);
6377 } while (!signal_pending(current
) && timeout
);
6379 finish_wait(sk
->sk_sleep
, &wait
);
6382 static void sctp_sock_rfree_frag(struct sk_buff
*skb
)
6384 struct sk_buff
*frag
;
6389 /* Don't forget the fragments. */
6390 for (frag
= skb_shinfo(skb
)->frag_list
; frag
; frag
= frag
->next
)
6391 sctp_sock_rfree_frag(frag
);
6394 sctp_sock_rfree(skb
);
6397 static void sctp_skb_set_owner_r_frag(struct sk_buff
*skb
, struct sock
*sk
)
6399 struct sk_buff
*frag
;
6404 /* Don't forget the fragments. */
6405 for (frag
= skb_shinfo(skb
)->frag_list
; frag
; frag
= frag
->next
)
6406 sctp_skb_set_owner_r_frag(frag
, sk
);
6409 sctp_skb_set_owner_r(skb
, sk
);
6412 /* Populate the fields of the newsk from the oldsk and migrate the assoc
6413 * and its messages to the newsk.
6415 static void sctp_sock_migrate(struct sock
*oldsk
, struct sock
*newsk
,
6416 struct sctp_association
*assoc
,
6417 sctp_socket_type_t type
)
6419 struct sctp_sock
*oldsp
= sctp_sk(oldsk
);
6420 struct sctp_sock
*newsp
= sctp_sk(newsk
);
6421 struct sctp_bind_bucket
*pp
; /* hash list port iterator */
6422 struct sctp_endpoint
*newep
= newsp
->ep
;
6423 struct sk_buff
*skb
, *tmp
;
6424 struct sctp_ulpevent
*event
;
6425 struct sctp_bind_hashbucket
*head
;
6427 /* Migrate socket buffer sizes and all the socket level options to the
6430 newsk
->sk_sndbuf
= oldsk
->sk_sndbuf
;
6431 newsk
->sk_rcvbuf
= oldsk
->sk_rcvbuf
;
6432 /* Brute force copy old sctp opt. */
6433 inet_sk_copy_descendant(newsk
, oldsk
);
6435 /* Restore the ep value that was overwritten with the above structure
6441 /* Hook this new socket in to the bind_hash list. */
6442 head
= &sctp_port_hashtable
[sctp_phashfn(inet_sk(oldsk
)->num
)];
6443 sctp_local_bh_disable();
6444 sctp_spin_lock(&head
->lock
);
6445 pp
= sctp_sk(oldsk
)->bind_hash
;
6446 sk_add_bind_node(newsk
, &pp
->owner
);
6447 sctp_sk(newsk
)->bind_hash
= pp
;
6448 inet_sk(newsk
)->num
= inet_sk(oldsk
)->num
;
6449 sctp_spin_unlock(&head
->lock
);
6450 sctp_local_bh_enable();
6452 /* Copy the bind_addr list from the original endpoint to the new
6453 * endpoint so that we can handle restarts properly
6455 sctp_bind_addr_dup(&newsp
->ep
->base
.bind_addr
,
6456 &oldsp
->ep
->base
.bind_addr
, GFP_KERNEL
);
6458 /* Move any messages in the old socket's receive queue that are for the
6459 * peeled off association to the new socket's receive queue.
6461 sctp_skb_for_each(skb
, &oldsk
->sk_receive_queue
, tmp
) {
6462 event
= sctp_skb2event(skb
);
6463 if (event
->asoc
== assoc
) {
6464 sctp_sock_rfree_frag(skb
);
6465 __skb_unlink(skb
, &oldsk
->sk_receive_queue
);
6466 __skb_queue_tail(&newsk
->sk_receive_queue
, skb
);
6467 sctp_skb_set_owner_r_frag(skb
, newsk
);
6471 /* Clean up any messages pending delivery due to partial
6472 * delivery. Three cases:
6473 * 1) No partial deliver; no work.
6474 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
6475 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
6477 skb_queue_head_init(&newsp
->pd_lobby
);
6478 atomic_set(&sctp_sk(newsk
)->pd_mode
, assoc
->ulpq
.pd_mode
);
6480 if (atomic_read(&sctp_sk(oldsk
)->pd_mode
)) {
6481 struct sk_buff_head
*queue
;
6483 /* Decide which queue to move pd_lobby skbs to. */
6484 if (assoc
->ulpq
.pd_mode
) {
6485 queue
= &newsp
->pd_lobby
;
6487 queue
= &newsk
->sk_receive_queue
;
6489 /* Walk through the pd_lobby, looking for skbs that
6490 * need moved to the new socket.
6492 sctp_skb_for_each(skb
, &oldsp
->pd_lobby
, tmp
) {
6493 event
= sctp_skb2event(skb
);
6494 if (event
->asoc
== assoc
) {
6495 sctp_sock_rfree_frag(skb
);
6496 __skb_unlink(skb
, &oldsp
->pd_lobby
);
6497 __skb_queue_tail(queue
, skb
);
6498 sctp_skb_set_owner_r_frag(skb
, newsk
);
6502 /* Clear up any skbs waiting for the partial
6503 * delivery to finish.
6505 if (assoc
->ulpq
.pd_mode
)
6506 sctp_clear_pd(oldsk
, NULL
);
6510 sctp_skb_for_each(skb
, &assoc
->ulpq
.reasm
, tmp
) {
6511 sctp_sock_rfree_frag(skb
);
6512 sctp_skb_set_owner_r_frag(skb
, newsk
);
6515 sctp_skb_for_each(skb
, &assoc
->ulpq
.lobby
, tmp
) {
6516 sctp_sock_rfree_frag(skb
);
6517 sctp_skb_set_owner_r_frag(skb
, newsk
);
6520 /* Set the type of socket to indicate that it is peeled off from the
6521 * original UDP-style socket or created with the accept() call on a
6522 * TCP-style socket..
6526 /* Mark the new socket "in-use" by the user so that any packets
6527 * that may arrive on the association after we've moved it are
6528 * queued to the backlog. This prevents a potential race between
6529 * backlog processing on the old socket and new-packet processing
6530 * on the new socket.
6532 * The caller has just allocated newsk so we can guarantee that other
6533 * paths won't try to lock it and then oldsk.
6535 lock_sock_nested(newsk
, SINGLE_DEPTH_NESTING
);
6536 sctp_assoc_migrate(assoc
, newsk
);
6538 /* If the association on the newsk is already closed before accept()
6539 * is called, set RCV_SHUTDOWN flag.
6541 if (sctp_state(assoc
, CLOSED
) && sctp_style(newsk
, TCP
))
6542 newsk
->sk_shutdown
|= RCV_SHUTDOWN
;
6544 newsk
->sk_state
= SCTP_SS_ESTABLISHED
;
6545 sctp_release_sock(newsk
);
6549 /* This proto struct describes the ULP interface for SCTP. */
6550 struct proto sctp_prot
= {
6552 .owner
= THIS_MODULE
,
6553 .close
= sctp_close
,
6554 .connect
= sctp_connect
,
6555 .disconnect
= sctp_disconnect
,
6556 .accept
= sctp_accept
,
6557 .ioctl
= sctp_ioctl
,
6558 .init
= sctp_init_sock
,
6559 .destroy
= sctp_destroy_sock
,
6560 .shutdown
= sctp_shutdown
,
6561 .setsockopt
= sctp_setsockopt
,
6562 .getsockopt
= sctp_getsockopt
,
6563 .sendmsg
= sctp_sendmsg
,
6564 .recvmsg
= sctp_recvmsg
,
6566 .backlog_rcv
= sctp_backlog_rcv
,
6568 .unhash
= sctp_unhash
,
6569 .get_port
= sctp_get_port
,
6570 .obj_size
= sizeof(struct sctp_sock
),
6571 .sysctl_mem
= sysctl_sctp_mem
,
6572 .sysctl_rmem
= sysctl_sctp_rmem
,
6573 .sysctl_wmem
= sysctl_sctp_wmem
,
6574 .memory_pressure
= &sctp_memory_pressure
,
6575 .enter_memory_pressure
= sctp_enter_memory_pressure
,
6576 .memory_allocated
= &sctp_memory_allocated
,
6577 .sockets_allocated
= &sctp_sockets_allocated
,
6580 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
6582 struct proto sctpv6_prot
= {
6584 .owner
= THIS_MODULE
,
6585 .close
= sctp_close
,
6586 .connect
= sctp_connect
,
6587 .disconnect
= sctp_disconnect
,
6588 .accept
= sctp_accept
,
6589 .ioctl
= sctp_ioctl
,
6590 .init
= sctp_init_sock
,
6591 .destroy
= sctp_destroy_sock
,
6592 .shutdown
= sctp_shutdown
,
6593 .setsockopt
= sctp_setsockopt
,
6594 .getsockopt
= sctp_getsockopt
,
6595 .sendmsg
= sctp_sendmsg
,
6596 .recvmsg
= sctp_recvmsg
,
6598 .backlog_rcv
= sctp_backlog_rcv
,
6600 .unhash
= sctp_unhash
,
6601 .get_port
= sctp_get_port
,
6602 .obj_size
= sizeof(struct sctp6_sock
),
6603 .sysctl_mem
= sysctl_sctp_mem
,
6604 .sysctl_rmem
= sysctl_sctp_rmem
,
6605 .sysctl_wmem
= sysctl_sctp_wmem
,
6606 .memory_pressure
= &sctp_memory_pressure
,
6607 .enter_memory_pressure
= sctp_enter_memory_pressure
,
6608 .memory_allocated
= &sctp_memory_allocated
,
6609 .sockets_allocated
= &sctp_sockets_allocated
,
6611 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */