1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This file is part of the SCTP kernel implementation
11 * Initialization/cleanup for SCTP protocol support.
13 * This SCTP implementation is free software;
14 * you can redistribute it and/or modify it under the terms of
15 * the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This SCTP implementation is distributed in the hope that it
20 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
21 * ************************
22 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
23 * See the GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License
26 * along with GNU CC; see the file COPYING. If not, see
27 * <http://www.gnu.org/licenses/>.
29 * Please send any bug reports or fixes you make to the
31 * lksctp developers <linux-sctp@vger.kernel.org>
33 * Written or modified by:
34 * La Monte H.P. Yarroll <piggy@acm.org>
35 * Karl Knutson <karl@athena.chicago.il.us>
36 * Jon Grimm <jgrimm@us.ibm.com>
37 * Sridhar Samudrala <sri@us.ibm.com>
38 * Daisy Chang <daisyc@us.ibm.com>
39 * Ardelle Fan <ardelle.fan@intel.com>
42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 #include <linux/module.h>
45 #include <linux/init.h>
46 #include <linux/netdevice.h>
47 #include <linux/inetdevice.h>
48 #include <linux/seq_file.h>
49 #include <linux/bootmem.h>
50 #include <linux/highmem.h>
51 #include <linux/swap.h>
52 #include <linux/slab.h>
53 #include <net/net_namespace.h>
54 #include <net/protocol.h>
57 #include <net/route.h>
58 #include <net/sctp/sctp.h>
59 #include <net/addrconf.h>
60 #include <net/inet_common.h>
61 #include <net/inet_ecn.h>
63 #define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
65 /* Global data structures. */
66 struct sctp_globals sctp_globals __read_mostly
;
68 struct idr sctp_assocs_id
;
69 DEFINE_SPINLOCK(sctp_assocs_id_lock
);
71 static struct sctp_pf
*sctp_pf_inet6_specific
;
72 static struct sctp_pf
*sctp_pf_inet_specific
;
73 static struct sctp_af
*sctp_af_v4_specific
;
74 static struct sctp_af
*sctp_af_v6_specific
;
76 struct kmem_cache
*sctp_chunk_cachep __read_mostly
;
77 struct kmem_cache
*sctp_bucket_cachep __read_mostly
;
79 long sysctl_sctp_mem
[3];
80 int sysctl_sctp_rmem
[3];
81 int sysctl_sctp_wmem
[3];
83 /* Private helper to extract ipv4 address and stash them in
84 * the protocol structure.
86 static void sctp_v4_copy_addrlist(struct list_head
*addrlist
,
87 struct net_device
*dev
)
89 struct in_device
*in_dev
;
90 struct in_ifaddr
*ifa
;
91 struct sctp_sockaddr_entry
*addr
;
94 if ((in_dev
= __in_dev_get_rcu(dev
)) == NULL
) {
99 for (ifa
= in_dev
->ifa_list
; ifa
; ifa
= ifa
->ifa_next
) {
100 /* Add the address to the local list. */
101 addr
= kzalloc(sizeof(*addr
), GFP_ATOMIC
);
103 addr
->a
.v4
.sin_family
= AF_INET
;
104 addr
->a
.v4
.sin_port
= 0;
105 addr
->a
.v4
.sin_addr
.s_addr
= ifa
->ifa_local
;
107 INIT_LIST_HEAD(&addr
->list
);
108 list_add_tail(&addr
->list
, addrlist
);
115 /* Extract our IP addresses from the system and stash them in the
116 * protocol structure.
118 static void sctp_get_local_addr_list(struct net
*net
)
120 struct net_device
*dev
;
121 struct list_head
*pos
;
125 for_each_netdev_rcu(net
, dev
) {
126 list_for_each(pos
, &sctp_address_families
) {
127 af
= list_entry(pos
, struct sctp_af
, list
);
128 af
->copy_addrlist(&net
->sctp
.local_addr_list
, dev
);
134 /* Free the existing local addresses. */
135 static void sctp_free_local_addr_list(struct net
*net
)
137 struct sctp_sockaddr_entry
*addr
;
138 struct list_head
*pos
, *temp
;
140 list_for_each_safe(pos
, temp
, &net
->sctp
.local_addr_list
) {
141 addr
= list_entry(pos
, struct sctp_sockaddr_entry
, list
);
147 /* Copy the local addresses which are valid for 'scope' into 'bp'. */
148 int sctp_copy_local_addr_list(struct net
*net
, struct sctp_bind_addr
*bp
,
149 enum sctp_scope scope
, gfp_t gfp
, int copy_flags
)
151 struct sctp_sockaddr_entry
*addr
;
152 union sctp_addr laddr
;
156 list_for_each_entry_rcu(addr
, &net
->sctp
.local_addr_list
, list
) {
159 if (!sctp_in_scope(net
, &addr
->a
, scope
))
162 /* Now that the address is in scope, check to see if
163 * the address type is really supported by the local
164 * sock as well as the remote peer.
166 if (addr
->a
.sa
.sa_family
== AF_INET
&&
167 !(copy_flags
& SCTP_ADDR4_PEERSUPP
))
169 if (addr
->a
.sa
.sa_family
== AF_INET6
&&
170 (!(copy_flags
& SCTP_ADDR6_ALLOWED
) ||
171 !(copy_flags
& SCTP_ADDR6_PEERSUPP
)))
175 /* also works for setting ipv6 address port */
176 laddr
.v4
.sin_port
= htons(bp
->port
);
177 if (sctp_bind_addr_state(bp
, &laddr
) != -1)
180 error
= sctp_add_bind_addr(bp
, &addr
->a
, sizeof(addr
->a
),
181 SCTP_ADDR_SRC
, GFP_ATOMIC
);
190 /* Copy over any ip options */
191 static void sctp_v4_copy_ip_options(struct sock
*sk
, struct sock
*newsk
)
193 struct inet_sock
*newinet
, *inet
= inet_sk(sk
);
194 struct ip_options_rcu
*inet_opt
, *newopt
= NULL
;
196 newinet
= inet_sk(newsk
);
199 inet_opt
= rcu_dereference(inet
->inet_opt
);
201 newopt
= sock_kmalloc(newsk
, sizeof(*inet_opt
) +
202 inet_opt
->opt
.optlen
, GFP_ATOMIC
);
204 memcpy(newopt
, inet_opt
, sizeof(*inet_opt
) +
205 inet_opt
->opt
.optlen
);
207 pr_err("%s: Failed to copy ip options\n", __func__
);
209 RCU_INIT_POINTER(newinet
->inet_opt
, newopt
);
213 /* Account for the IP options */
214 static int sctp_v4_ip_options_len(struct sock
*sk
)
216 struct inet_sock
*inet
= inet_sk(sk
);
217 struct ip_options_rcu
*inet_opt
;
221 inet_opt
= rcu_dereference(inet
->inet_opt
);
223 len
= inet_opt
->opt
.optlen
;
229 /* Initialize a sctp_addr from in incoming skb. */
230 static void sctp_v4_from_skb(union sctp_addr
*addr
, struct sk_buff
*skb
,
233 /* Always called on head skb, so this is safe */
234 struct sctphdr
*sh
= sctp_hdr(skb
);
235 struct sockaddr_in
*sa
= &addr
->v4
;
237 addr
->v4
.sin_family
= AF_INET
;
240 sa
->sin_port
= sh
->source
;
241 sa
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
243 sa
->sin_port
= sh
->dest
;
244 sa
->sin_addr
.s_addr
= ip_hdr(skb
)->daddr
;
248 /* Initialize an sctp_addr from a socket. */
249 static void sctp_v4_from_sk(union sctp_addr
*addr
, struct sock
*sk
)
251 addr
->v4
.sin_family
= AF_INET
;
252 addr
->v4
.sin_port
= 0;
253 addr
->v4
.sin_addr
.s_addr
= inet_sk(sk
)->inet_rcv_saddr
;
256 /* Initialize sk->sk_rcv_saddr from sctp_addr. */
257 static void sctp_v4_to_sk_saddr(union sctp_addr
*addr
, struct sock
*sk
)
259 inet_sk(sk
)->inet_rcv_saddr
= addr
->v4
.sin_addr
.s_addr
;
262 /* Initialize sk->sk_daddr from sctp_addr. */
263 static void sctp_v4_to_sk_daddr(union sctp_addr
*addr
, struct sock
*sk
)
265 inet_sk(sk
)->inet_daddr
= addr
->v4
.sin_addr
.s_addr
;
268 /* Initialize a sctp_addr from an address parameter. */
269 static void sctp_v4_from_addr_param(union sctp_addr
*addr
,
270 union sctp_addr_param
*param
,
271 __be16 port
, int iif
)
273 addr
->v4
.sin_family
= AF_INET
;
274 addr
->v4
.sin_port
= port
;
275 addr
->v4
.sin_addr
.s_addr
= param
->v4
.addr
.s_addr
;
278 /* Initialize an address parameter from a sctp_addr and return the length
279 * of the address parameter.
281 static int sctp_v4_to_addr_param(const union sctp_addr
*addr
,
282 union sctp_addr_param
*param
)
284 int length
= sizeof(struct sctp_ipv4addr_param
);
286 param
->v4
.param_hdr
.type
= SCTP_PARAM_IPV4_ADDRESS
;
287 param
->v4
.param_hdr
.length
= htons(length
);
288 param
->v4
.addr
.s_addr
= addr
->v4
.sin_addr
.s_addr
;
293 /* Initialize a sctp_addr from a dst_entry. */
294 static void sctp_v4_dst_saddr(union sctp_addr
*saddr
, struct flowi4
*fl4
,
297 saddr
->v4
.sin_family
= AF_INET
;
298 saddr
->v4
.sin_port
= port
;
299 saddr
->v4
.sin_addr
.s_addr
= fl4
->saddr
;
302 /* Compare two addresses exactly. */
303 static int sctp_v4_cmp_addr(const union sctp_addr
*addr1
,
304 const union sctp_addr
*addr2
)
306 if (addr1
->sa
.sa_family
!= addr2
->sa
.sa_family
)
308 if (addr1
->v4
.sin_port
!= addr2
->v4
.sin_port
)
310 if (addr1
->v4
.sin_addr
.s_addr
!= addr2
->v4
.sin_addr
.s_addr
)
316 /* Initialize addr struct to INADDR_ANY. */
317 static void sctp_v4_inaddr_any(union sctp_addr
*addr
, __be16 port
)
319 addr
->v4
.sin_family
= AF_INET
;
320 addr
->v4
.sin_addr
.s_addr
= htonl(INADDR_ANY
);
321 addr
->v4
.sin_port
= port
;
324 /* Is this a wildcard address? */
325 static int sctp_v4_is_any(const union sctp_addr
*addr
)
327 return htonl(INADDR_ANY
) == addr
->v4
.sin_addr
.s_addr
;
330 /* This function checks if the address is a valid address to be used for
334 * Return 0 - If the address is a non-unicast or an illegal address.
335 * Return 1 - If the address is a unicast.
337 static int sctp_v4_addr_valid(union sctp_addr
*addr
,
338 struct sctp_sock
*sp
,
339 const struct sk_buff
*skb
)
341 /* IPv4 addresses not allowed */
342 if (sp
&& ipv6_only_sock(sctp_opt2sk(sp
)))
345 /* Is this a non-unicast address or a unusable SCTP address? */
346 if (IS_IPV4_UNUSABLE_ADDRESS(addr
->v4
.sin_addr
.s_addr
))
349 /* Is this a broadcast address? */
350 if (skb
&& skb_rtable(skb
)->rt_flags
& RTCF_BROADCAST
)
356 /* Should this be available for binding? */
357 static int sctp_v4_available(union sctp_addr
*addr
, struct sctp_sock
*sp
)
359 struct net
*net
= sock_net(&sp
->inet
.sk
);
360 int ret
= inet_addr_type(net
, addr
->v4
.sin_addr
.s_addr
);
363 if (addr
->v4
.sin_addr
.s_addr
!= htonl(INADDR_ANY
) &&
365 !sp
->inet
.freebind
&&
366 !net
->ipv4
.sysctl_ip_nonlocal_bind
)
369 if (ipv6_only_sock(sctp_opt2sk(sp
)))
375 /* Checking the loopback, private and other address scopes as defined in
376 * RFC 1918. The IPv4 scoping is based on the draft for SCTP IPv4
377 * scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>.
379 * Level 0 - unusable SCTP addresses
380 * Level 1 - loopback address
381 * Level 2 - link-local addresses
382 * Level 3 - private addresses.
383 * Level 4 - global addresses
384 * For INIT and INIT-ACK address list, let L be the level of
385 * of requested destination address, sender and receiver
386 * SHOULD include all of its addresses with level greater
387 * than or equal to L.
389 * IPv4 scoping can be controlled through sysctl option
390 * net.sctp.addr_scope_policy
392 static enum sctp_scope
sctp_v4_scope(union sctp_addr
*addr
)
394 enum sctp_scope retval
;
396 /* Check for unusable SCTP addresses. */
397 if (IS_IPV4_UNUSABLE_ADDRESS(addr
->v4
.sin_addr
.s_addr
)) {
398 retval
= SCTP_SCOPE_UNUSABLE
;
399 } else if (ipv4_is_loopback(addr
->v4
.sin_addr
.s_addr
)) {
400 retval
= SCTP_SCOPE_LOOPBACK
;
401 } else if (ipv4_is_linklocal_169(addr
->v4
.sin_addr
.s_addr
)) {
402 retval
= SCTP_SCOPE_LINK
;
403 } else if (ipv4_is_private_10(addr
->v4
.sin_addr
.s_addr
) ||
404 ipv4_is_private_172(addr
->v4
.sin_addr
.s_addr
) ||
405 ipv4_is_private_192(addr
->v4
.sin_addr
.s_addr
)) {
406 retval
= SCTP_SCOPE_PRIVATE
;
408 retval
= SCTP_SCOPE_GLOBAL
;
414 /* Returns a valid dst cache entry for the given source and destination ip
415 * addresses. If an association is passed, trys to get a dst entry with a
416 * source address that matches an address in the bind address list.
418 static void sctp_v4_get_dst(struct sctp_transport
*t
, union sctp_addr
*saddr
,
419 struct flowi
*fl
, struct sock
*sk
)
421 struct sctp_association
*asoc
= t
->asoc
;
423 struct flowi4
*fl4
= &fl
->u
.ip4
;
424 struct sctp_bind_addr
*bp
;
425 struct sctp_sockaddr_entry
*laddr
;
426 struct dst_entry
*dst
= NULL
;
427 union sctp_addr
*daddr
= &t
->ipaddr
;
428 union sctp_addr dst_saddr
;
430 memset(fl4
, 0x0, sizeof(struct flowi4
));
431 fl4
->daddr
= daddr
->v4
.sin_addr
.s_addr
;
432 fl4
->fl4_dport
= daddr
->v4
.sin_port
;
433 fl4
->flowi4_proto
= IPPROTO_SCTP
;
435 fl4
->flowi4_tos
= RT_CONN_FLAGS(asoc
->base
.sk
);
436 fl4
->flowi4_oif
= asoc
->base
.sk
->sk_bound_dev_if
;
437 fl4
->fl4_sport
= htons(asoc
->base
.bind_addr
.port
);
440 fl4
->saddr
= saddr
->v4
.sin_addr
.s_addr
;
441 fl4
->fl4_sport
= saddr
->v4
.sin_port
;
444 pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__
, &fl4
->daddr
,
447 rt
= ip_route_output_key(sock_net(sk
), fl4
);
451 /* If there is no association or if a source address is passed, no
452 * more validation is required.
457 bp
= &asoc
->base
.bind_addr
;
460 /* Walk through the bind address list and look for a bind
461 * address that matches the source address of the returned dst.
463 sctp_v4_dst_saddr(&dst_saddr
, fl4
, htons(bp
->port
));
465 list_for_each_entry_rcu(laddr
, &bp
->address_list
, list
) {
466 if (!laddr
->valid
|| (laddr
->state
== SCTP_ADDR_DEL
) ||
467 (laddr
->state
!= SCTP_ADDR_SRC
&&
468 !asoc
->src_out_of_asoc_ok
))
470 if (sctp_v4_cmp_addr(&dst_saddr
, &laddr
->a
))
475 /* None of the bound addresses match the source address of the
476 * dst. So release it.
482 /* Walk through the bind address list and try to get a dst that
483 * matches a bind address as the source address.
486 list_for_each_entry_rcu(laddr
, &bp
->address_list
, list
) {
487 struct net_device
*odev
;
491 if (laddr
->state
!= SCTP_ADDR_SRC
||
492 AF_INET
!= laddr
->a
.sa
.sa_family
)
495 fl4
->fl4_sport
= laddr
->a
.v4
.sin_port
;
496 flowi4_update_output(fl4
,
497 asoc
->base
.sk
->sk_bound_dev_if
,
498 RT_CONN_FLAGS(asoc
->base
.sk
),
499 daddr
->v4
.sin_addr
.s_addr
,
500 laddr
->a
.v4
.sin_addr
.s_addr
);
502 rt
= ip_route_output_key(sock_net(sk
), fl4
);
506 /* Ensure the src address belongs to the output
509 odev
= __ip_dev_find(sock_net(sk
), laddr
->a
.v4
.sin_addr
.s_addr
,
511 if (!odev
|| odev
->ifindex
!= fl4
->flowi4_oif
) {
515 dst_release(&rt
->dst
);
529 pr_debug("rt_dst:%pI4, rt_src:%pI4\n",
530 &fl4
->daddr
, &fl4
->saddr
);
532 pr_debug("no route\n");
535 /* For v4, the source address is cached in the route entry(dst). So no need
536 * to cache it separately and hence this is an empty routine.
538 static void sctp_v4_get_saddr(struct sctp_sock
*sk
,
539 struct sctp_transport
*t
,
542 union sctp_addr
*saddr
= &t
->saddr
;
543 struct rtable
*rt
= (struct rtable
*)t
->dst
;
546 saddr
->v4
.sin_family
= AF_INET
;
547 saddr
->v4
.sin_addr
.s_addr
= fl
->u
.ip4
.saddr
;
551 /* What interface did this skb arrive on? */
552 static int sctp_v4_skb_iif(const struct sk_buff
*skb
)
554 return inet_iif(skb
);
557 /* Was this packet marked by Explicit Congestion Notification? */
558 static int sctp_v4_is_ce(const struct sk_buff
*skb
)
560 return INET_ECN_is_ce(ip_hdr(skb
)->tos
);
563 /* Create and initialize a new sk for the socket returned by accept(). */
564 static struct sock
*sctp_v4_create_accept_sk(struct sock
*sk
,
565 struct sctp_association
*asoc
,
568 struct sock
*newsk
= sk_alloc(sock_net(sk
), PF_INET
, GFP_KERNEL
,
570 struct inet_sock
*newinet
;
575 sock_init_data(NULL
, newsk
);
577 sctp_copy_sock(newsk
, sk
, asoc
);
578 sock_reset_flag(newsk
, SOCK_ZAPPED
);
580 sctp_v4_copy_ip_options(sk
, newsk
);
582 newinet
= inet_sk(newsk
);
584 newinet
->inet_daddr
= asoc
->peer
.primary_addr
.v4
.sin_addr
.s_addr
;
586 sk_refcnt_debug_inc(newsk
);
588 if (newsk
->sk_prot
->init(newsk
)) {
589 sk_common_release(newsk
);
597 static int sctp_v4_addr_to_user(struct sctp_sock
*sp
, union sctp_addr
*addr
)
599 /* No address mapping for V4 sockets */
600 return sizeof(struct sockaddr_in
);
603 /* Dump the v4 addr to the seq file. */
604 static void sctp_v4_seq_dump_addr(struct seq_file
*seq
, union sctp_addr
*addr
)
606 seq_printf(seq
, "%pI4 ", &addr
->v4
.sin_addr
);
609 static void sctp_v4_ecn_capable(struct sock
*sk
)
614 static void sctp_addr_wq_timeout_handler(struct timer_list
*t
)
616 struct net
*net
= from_timer(net
, t
, sctp
.addr_wq_timer
);
617 struct sctp_sockaddr_entry
*addrw
, *temp
;
618 struct sctp_sock
*sp
;
620 spin_lock_bh(&net
->sctp
.addr_wq_lock
);
622 list_for_each_entry_safe(addrw
, temp
, &net
->sctp
.addr_waitq
, list
) {
623 pr_debug("%s: the first ent in wq:%p is addr:%pISc for cmd:%d at "
624 "entry:%p\n", __func__
, &net
->sctp
.addr_waitq
, &addrw
->a
.sa
,
625 addrw
->state
, addrw
);
627 #if IS_ENABLED(CONFIG_IPV6)
628 /* Now we send an ASCONF for each association */
629 /* Note. we currently don't handle link local IPv6 addressees */
630 if (addrw
->a
.sa
.sa_family
== AF_INET6
) {
631 struct in6_addr
*in6
;
633 if (ipv6_addr_type(&addrw
->a
.v6
.sin6_addr
) &
637 in6
= (struct in6_addr
*)&addrw
->a
.v6
.sin6_addr
;
638 if (ipv6_chk_addr(net
, in6
, NULL
, 0) == 0 &&
639 addrw
->state
== SCTP_ADDR_NEW
) {
640 unsigned long timeo_val
;
642 pr_debug("%s: this is on DAD, trying %d sec "
644 SCTP_ADDRESS_TICK_DELAY
);
647 timeo_val
+= msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY
);
648 mod_timer(&net
->sctp
.addr_wq_timer
, timeo_val
);
653 list_for_each_entry(sp
, &net
->sctp
.auto_asconf_splist
, auto_asconf_list
) {
656 sk
= sctp_opt2sk(sp
);
657 /* ignore bound-specific endpoints */
658 if (!sctp_is_ep_boundall(sk
))
661 if (sctp_asconf_mgmt(sp
, addrw
) < 0)
662 pr_debug("%s: sctp_asconf_mgmt failed\n", __func__
);
665 #if IS_ENABLED(CONFIG_IPV6)
668 list_del(&addrw
->list
);
671 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
674 static void sctp_free_addr_wq(struct net
*net
)
676 struct sctp_sockaddr_entry
*addrw
;
677 struct sctp_sockaddr_entry
*temp
;
679 spin_lock_bh(&net
->sctp
.addr_wq_lock
);
680 del_timer(&net
->sctp
.addr_wq_timer
);
681 list_for_each_entry_safe(addrw
, temp
, &net
->sctp
.addr_waitq
, list
) {
682 list_del(&addrw
->list
);
685 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
688 /* lookup the entry for the same address in the addr_waitq
689 * sctp_addr_wq MUST be locked
691 static struct sctp_sockaddr_entry
*sctp_addr_wq_lookup(struct net
*net
,
692 struct sctp_sockaddr_entry
*addr
)
694 struct sctp_sockaddr_entry
*addrw
;
696 list_for_each_entry(addrw
, &net
->sctp
.addr_waitq
, list
) {
697 if (addrw
->a
.sa
.sa_family
!= addr
->a
.sa
.sa_family
)
699 if (addrw
->a
.sa
.sa_family
== AF_INET
) {
700 if (addrw
->a
.v4
.sin_addr
.s_addr
==
701 addr
->a
.v4
.sin_addr
.s_addr
)
703 } else if (addrw
->a
.sa
.sa_family
== AF_INET6
) {
704 if (ipv6_addr_equal(&addrw
->a
.v6
.sin6_addr
,
705 &addr
->a
.v6
.sin6_addr
))
712 void sctp_addr_wq_mgmt(struct net
*net
, struct sctp_sockaddr_entry
*addr
, int cmd
)
714 struct sctp_sockaddr_entry
*addrw
;
715 unsigned long timeo_val
;
717 /* first, we check if an opposite message already exist in the queue.
718 * If we found such message, it is removed.
719 * This operation is a bit stupid, but the DHCP client attaches the
720 * new address after a couple of addition and deletion of that address
723 spin_lock_bh(&net
->sctp
.addr_wq_lock
);
724 /* Offsets existing events in addr_wq */
725 addrw
= sctp_addr_wq_lookup(net
, addr
);
727 if (addrw
->state
!= cmd
) {
728 pr_debug("%s: offsets existing entry for %d, addr:%pISc "
729 "in wq:%p\n", __func__
, addrw
->state
, &addrw
->a
.sa
,
730 &net
->sctp
.addr_waitq
);
732 list_del(&addrw
->list
);
735 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
739 /* OK, we have to add the new address to the wait queue */
740 addrw
= kmemdup(addr
, sizeof(struct sctp_sockaddr_entry
), GFP_ATOMIC
);
742 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
746 list_add_tail(&addrw
->list
, &net
->sctp
.addr_waitq
);
748 pr_debug("%s: add new entry for cmd:%d, addr:%pISc in wq:%p\n",
749 __func__
, addrw
->state
, &addrw
->a
.sa
, &net
->sctp
.addr_waitq
);
751 if (!timer_pending(&net
->sctp
.addr_wq_timer
)) {
753 timeo_val
+= msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY
);
754 mod_timer(&net
->sctp
.addr_wq_timer
, timeo_val
);
756 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
759 /* Event handler for inet address addition/deletion events.
760 * The sctp_local_addr_list needs to be protocted by a spin lock since
761 * multiple notifiers (say IPv4 and IPv6) may be running at the same
762 * time and thus corrupt the list.
763 * The reader side is protected with RCU.
765 static int sctp_inetaddr_event(struct notifier_block
*this, unsigned long ev
,
768 struct in_ifaddr
*ifa
= (struct in_ifaddr
*)ptr
;
769 struct sctp_sockaddr_entry
*addr
= NULL
;
770 struct sctp_sockaddr_entry
*temp
;
771 struct net
*net
= dev_net(ifa
->ifa_dev
->dev
);
776 addr
= kmalloc(sizeof(struct sctp_sockaddr_entry
), GFP_ATOMIC
);
778 addr
->a
.v4
.sin_family
= AF_INET
;
779 addr
->a
.v4
.sin_port
= 0;
780 addr
->a
.v4
.sin_addr
.s_addr
= ifa
->ifa_local
;
782 spin_lock_bh(&net
->sctp
.local_addr_lock
);
783 list_add_tail_rcu(&addr
->list
, &net
->sctp
.local_addr_list
);
784 sctp_addr_wq_mgmt(net
, addr
, SCTP_ADDR_NEW
);
785 spin_unlock_bh(&net
->sctp
.local_addr_lock
);
789 spin_lock_bh(&net
->sctp
.local_addr_lock
);
790 list_for_each_entry_safe(addr
, temp
,
791 &net
->sctp
.local_addr_list
, list
) {
792 if (addr
->a
.sa
.sa_family
== AF_INET
&&
793 addr
->a
.v4
.sin_addr
.s_addr
==
795 sctp_addr_wq_mgmt(net
, addr
, SCTP_ADDR_DEL
);
798 list_del_rcu(&addr
->list
);
802 spin_unlock_bh(&net
->sctp
.local_addr_lock
);
804 kfree_rcu(addr
, rcu
);
812 * Initialize the control inode/socket with a control endpoint data
813 * structure. This endpoint is reserved exclusively for the OOTB processing.
815 static int sctp_ctl_sock_init(struct net
*net
)
818 sa_family_t family
= PF_INET
;
820 if (sctp_get_pf_specific(PF_INET6
))
823 err
= inet_ctl_sock_create(&net
->sctp
.ctl_sock
, family
,
824 SOCK_SEQPACKET
, IPPROTO_SCTP
, net
);
826 /* If IPv6 socket could not be created, try the IPv4 socket */
827 if (err
< 0 && family
== PF_INET6
)
828 err
= inet_ctl_sock_create(&net
->sctp
.ctl_sock
, AF_INET
,
829 SOCK_SEQPACKET
, IPPROTO_SCTP
,
833 pr_err("Failed to create the SCTP control socket\n");
839 /* Register address family specific functions. */
840 int sctp_register_af(struct sctp_af
*af
)
842 switch (af
->sa_family
) {
844 if (sctp_af_v4_specific
)
846 sctp_af_v4_specific
= af
;
849 if (sctp_af_v6_specific
)
851 sctp_af_v6_specific
= af
;
857 INIT_LIST_HEAD(&af
->list
);
858 list_add_tail(&af
->list
, &sctp_address_families
);
862 /* Get the table of functions for manipulating a particular address
865 struct sctp_af
*sctp_get_af_specific(sa_family_t family
)
869 return sctp_af_v4_specific
;
871 return sctp_af_v6_specific
;
877 /* Common code to initialize a AF_INET msg_name. */
878 static void sctp_inet_msgname(char *msgname
, int *addr_len
)
880 struct sockaddr_in
*sin
;
882 sin
= (struct sockaddr_in
*)msgname
;
883 *addr_len
= sizeof(struct sockaddr_in
);
884 sin
->sin_family
= AF_INET
;
885 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
888 /* Copy the primary address of the peer primary address as the msg_name. */
889 static void sctp_inet_event_msgname(struct sctp_ulpevent
*event
, char *msgname
,
892 struct sockaddr_in
*sin
, *sinfrom
;
895 struct sctp_association
*asoc
;
898 sctp_inet_msgname(msgname
, addr_len
);
899 sin
= (struct sockaddr_in
*)msgname
;
900 sinfrom
= &asoc
->peer
.primary_addr
.v4
;
901 sin
->sin_port
= htons(asoc
->peer
.port
);
902 sin
->sin_addr
.s_addr
= sinfrom
->sin_addr
.s_addr
;
906 /* Initialize and copy out a msgname from an inbound skb. */
907 static void sctp_inet_skb_msgname(struct sk_buff
*skb
, char *msgname
, int *len
)
910 struct sctphdr
*sh
= sctp_hdr(skb
);
911 struct sockaddr_in
*sin
= (struct sockaddr_in
*)msgname
;
913 sctp_inet_msgname(msgname
, len
);
914 sin
->sin_port
= sh
->source
;
915 sin
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
919 /* Do we support this AF? */
920 static int sctp_inet_af_supported(sa_family_t family
, struct sctp_sock
*sp
)
922 /* PF_INET only supports AF_INET addresses. */
923 return AF_INET
== family
;
926 /* Address matching with wildcards allowed. */
927 static int sctp_inet_cmp_addr(const union sctp_addr
*addr1
,
928 const union sctp_addr
*addr2
,
929 struct sctp_sock
*opt
)
931 /* PF_INET only supports AF_INET addresses. */
932 if (addr1
->sa
.sa_family
!= addr2
->sa
.sa_family
)
934 if (htonl(INADDR_ANY
) == addr1
->v4
.sin_addr
.s_addr
||
935 htonl(INADDR_ANY
) == addr2
->v4
.sin_addr
.s_addr
)
937 if (addr1
->v4
.sin_addr
.s_addr
== addr2
->v4
.sin_addr
.s_addr
)
943 /* Verify that provided sockaddr looks bindable. Common verification has
944 * already been taken care of.
946 static int sctp_inet_bind_verify(struct sctp_sock
*opt
, union sctp_addr
*addr
)
948 return sctp_v4_available(addr
, opt
);
951 /* Verify that sockaddr looks sendable. Common verification has already
952 * been taken care of.
954 static int sctp_inet_send_verify(struct sctp_sock
*opt
, union sctp_addr
*addr
)
959 /* Fill in Supported Address Type information for INIT and INIT-ACK
960 * chunks. Returns number of addresses supported.
962 static int sctp_inet_supported_addrs(const struct sctp_sock
*opt
,
965 types
[0] = SCTP_PARAM_IPV4_ADDRESS
;
969 /* Wrapper routine that calls the ip transmit routine. */
970 static inline int sctp_v4_xmit(struct sk_buff
*skb
,
971 struct sctp_transport
*transport
)
973 struct inet_sock
*inet
= inet_sk(skb
->sk
);
975 pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__
, skb
,
976 skb
->len
, &transport
->fl
.u
.ip4
.saddr
, &transport
->fl
.u
.ip4
.daddr
);
978 inet
->pmtudisc
= transport
->param_flags
& SPP_PMTUD_ENABLE
?
979 IP_PMTUDISC_DO
: IP_PMTUDISC_DONT
;
981 SCTP_INC_STATS(sock_net(&inet
->sk
), SCTP_MIB_OUTSCTPPACKS
);
983 return ip_queue_xmit(&inet
->sk
, skb
, &transport
->fl
);
986 static struct sctp_af sctp_af_inet
;
988 static struct sctp_pf sctp_pf_inet
= {
989 .event_msgname
= sctp_inet_event_msgname
,
990 .skb_msgname
= sctp_inet_skb_msgname
,
991 .af_supported
= sctp_inet_af_supported
,
992 .cmp_addr
= sctp_inet_cmp_addr
,
993 .bind_verify
= sctp_inet_bind_verify
,
994 .send_verify
= sctp_inet_send_verify
,
995 .supported_addrs
= sctp_inet_supported_addrs
,
996 .create_accept_sk
= sctp_v4_create_accept_sk
,
997 .addr_to_user
= sctp_v4_addr_to_user
,
998 .to_sk_saddr
= sctp_v4_to_sk_saddr
,
999 .to_sk_daddr
= sctp_v4_to_sk_daddr
,
1000 .copy_ip_options
= sctp_v4_copy_ip_options
,
1004 /* Notifier for inetaddr addition/deletion events. */
1005 static struct notifier_block sctp_inetaddr_notifier
= {
1006 .notifier_call
= sctp_inetaddr_event
,
1009 /* Socket operations. */
1010 static const struct proto_ops inet_seqpacket_ops
= {
1012 .owner
= THIS_MODULE
,
1013 .release
= inet_release
, /* Needs to be wrapped... */
1015 .connect
= sctp_inet_connect
,
1016 .socketpair
= sock_no_socketpair
,
1017 .accept
= inet_accept
,
1018 .getname
= inet_getname
, /* Semantics are different. */
1020 .ioctl
= inet_ioctl
,
1021 .listen
= sctp_inet_listen
,
1022 .shutdown
= inet_shutdown
, /* Looks harmless. */
1023 .setsockopt
= sock_common_setsockopt
, /* IP_SOL IP_OPTION is a problem */
1024 .getsockopt
= sock_common_getsockopt
,
1025 .sendmsg
= inet_sendmsg
,
1026 .recvmsg
= inet_recvmsg
,
1027 .mmap
= sock_no_mmap
,
1028 .sendpage
= sock_no_sendpage
,
1029 #ifdef CONFIG_COMPAT
1030 .compat_setsockopt
= compat_sock_common_setsockopt
,
1031 .compat_getsockopt
= compat_sock_common_getsockopt
,
1035 /* Registration with AF_INET family. */
1036 static struct inet_protosw sctp_seqpacket_protosw
= {
1037 .type
= SOCK_SEQPACKET
,
1038 .protocol
= IPPROTO_SCTP
,
1040 .ops
= &inet_seqpacket_ops
,
1041 .flags
= SCTP_PROTOSW_FLAG
1043 static struct inet_protosw sctp_stream_protosw
= {
1044 .type
= SOCK_STREAM
,
1045 .protocol
= IPPROTO_SCTP
,
1047 .ops
= &inet_seqpacket_ops
,
1048 .flags
= SCTP_PROTOSW_FLAG
1051 /* Register with IP layer. */
1052 static const struct net_protocol sctp_protocol
= {
1053 .handler
= sctp_rcv
,
1054 .err_handler
= sctp_v4_err
,
1057 .icmp_strict_tag_validation
= 1,
1060 /* IPv4 address related functions. */
1061 static struct sctp_af sctp_af_inet
= {
1062 .sa_family
= AF_INET
,
1063 .sctp_xmit
= sctp_v4_xmit
,
1064 .setsockopt
= ip_setsockopt
,
1065 .getsockopt
= ip_getsockopt
,
1066 .get_dst
= sctp_v4_get_dst
,
1067 .get_saddr
= sctp_v4_get_saddr
,
1068 .copy_addrlist
= sctp_v4_copy_addrlist
,
1069 .from_skb
= sctp_v4_from_skb
,
1070 .from_sk
= sctp_v4_from_sk
,
1071 .from_addr_param
= sctp_v4_from_addr_param
,
1072 .to_addr_param
= sctp_v4_to_addr_param
,
1073 .cmp_addr
= sctp_v4_cmp_addr
,
1074 .addr_valid
= sctp_v4_addr_valid
,
1075 .inaddr_any
= sctp_v4_inaddr_any
,
1076 .is_any
= sctp_v4_is_any
,
1077 .available
= sctp_v4_available
,
1078 .scope
= sctp_v4_scope
,
1079 .skb_iif
= sctp_v4_skb_iif
,
1080 .is_ce
= sctp_v4_is_ce
,
1081 .seq_dump_addr
= sctp_v4_seq_dump_addr
,
1082 .ecn_capable
= sctp_v4_ecn_capable
,
1083 .net_header_len
= sizeof(struct iphdr
),
1084 .sockaddr_len
= sizeof(struct sockaddr_in
),
1085 .ip_options_len
= sctp_v4_ip_options_len
,
1086 #ifdef CONFIG_COMPAT
1087 .compat_setsockopt
= compat_ip_setsockopt
,
1088 .compat_getsockopt
= compat_ip_getsockopt
,
1092 struct sctp_pf
*sctp_get_pf_specific(sa_family_t family
)
1096 return sctp_pf_inet_specific
;
1098 return sctp_pf_inet6_specific
;
1104 /* Register the PF specific function table. */
1105 int sctp_register_pf(struct sctp_pf
*pf
, sa_family_t family
)
1109 if (sctp_pf_inet_specific
)
1111 sctp_pf_inet_specific
= pf
;
1114 if (sctp_pf_inet6_specific
)
1116 sctp_pf_inet6_specific
= pf
;
1124 static inline int init_sctp_mibs(struct net
*net
)
1126 net
->sctp
.sctp_statistics
= alloc_percpu(struct sctp_mib
);
1127 if (!net
->sctp
.sctp_statistics
)
1132 static inline void cleanup_sctp_mibs(struct net
*net
)
1134 free_percpu(net
->sctp
.sctp_statistics
);
1137 static void sctp_v4_pf_init(void)
1139 /* Initialize the SCTP specific PF functions. */
1140 sctp_register_pf(&sctp_pf_inet
, PF_INET
);
1141 sctp_register_af(&sctp_af_inet
);
1144 static void sctp_v4_pf_exit(void)
1146 list_del(&sctp_af_inet
.list
);
1149 static int sctp_v4_protosw_init(void)
1153 rc
= proto_register(&sctp_prot
, 1);
1157 /* Register SCTP(UDP and TCP style) with socket layer. */
1158 inet_register_protosw(&sctp_seqpacket_protosw
);
1159 inet_register_protosw(&sctp_stream_protosw
);
1164 static void sctp_v4_protosw_exit(void)
1166 inet_unregister_protosw(&sctp_stream_protosw
);
1167 inet_unregister_protosw(&sctp_seqpacket_protosw
);
1168 proto_unregister(&sctp_prot
);
1171 static int sctp_v4_add_protocol(void)
1173 /* Register notifier for inet address additions/deletions. */
1174 register_inetaddr_notifier(&sctp_inetaddr_notifier
);
1176 /* Register SCTP with inet layer. */
1177 if (inet_add_protocol(&sctp_protocol
, IPPROTO_SCTP
) < 0)
1183 static void sctp_v4_del_protocol(void)
1185 inet_del_protocol(&sctp_protocol
, IPPROTO_SCTP
);
1186 unregister_inetaddr_notifier(&sctp_inetaddr_notifier
);
1189 static int __net_init
sctp_defaults_init(struct net
*net
)
1194 * 14. Suggested SCTP Protocol Parameter Values
1196 /* The following protocol parameters are RECOMMENDED: */
1197 /* RTO.Initial - 3 seconds */
1198 net
->sctp
.rto_initial
= SCTP_RTO_INITIAL
;
1199 /* RTO.Min - 1 second */
1200 net
->sctp
.rto_min
= SCTP_RTO_MIN
;
1201 /* RTO.Max - 60 seconds */
1202 net
->sctp
.rto_max
= SCTP_RTO_MAX
;
1203 /* RTO.Alpha - 1/8 */
1204 net
->sctp
.rto_alpha
= SCTP_RTO_ALPHA
;
1205 /* RTO.Beta - 1/4 */
1206 net
->sctp
.rto_beta
= SCTP_RTO_BETA
;
1208 /* Valid.Cookie.Life - 60 seconds */
1209 net
->sctp
.valid_cookie_life
= SCTP_DEFAULT_COOKIE_LIFE
;
1211 /* Whether Cookie Preservative is enabled(1) or not(0) */
1212 net
->sctp
.cookie_preserve_enable
= 1;
1214 /* Default sctp sockets to use md5 as their hmac alg */
1215 #if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5)
1216 net
->sctp
.sctp_hmac_alg
= "md5";
1217 #elif defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1)
1218 net
->sctp
.sctp_hmac_alg
= "sha1";
1220 net
->sctp
.sctp_hmac_alg
= NULL
;
1224 net
->sctp
.max_burst
= SCTP_DEFAULT_MAX_BURST
;
1226 /* Enable pf state by default */
1227 net
->sctp
.pf_enable
= 1;
1229 /* Association.Max.Retrans - 10 attempts
1230 * Path.Max.Retrans - 5 attempts (per destination address)
1231 * Max.Init.Retransmits - 8 attempts
1233 net
->sctp
.max_retrans_association
= 10;
1234 net
->sctp
.max_retrans_path
= 5;
1235 net
->sctp
.max_retrans_init
= 8;
1237 /* Sendbuffer growth - do per-socket accounting */
1238 net
->sctp
.sndbuf_policy
= 0;
1240 /* Rcvbuffer growth - do per-socket accounting */
1241 net
->sctp
.rcvbuf_policy
= 0;
1243 /* HB.interval - 30 seconds */
1244 net
->sctp
.hb_interval
= SCTP_DEFAULT_TIMEOUT_HEARTBEAT
;
1246 /* delayed SACK timeout */
1247 net
->sctp
.sack_timeout
= SCTP_DEFAULT_TIMEOUT_SACK
;
1249 /* Disable ADDIP by default. */
1250 net
->sctp
.addip_enable
= 0;
1251 net
->sctp
.addip_noauth
= 0;
1252 net
->sctp
.default_auto_asconf
= 0;
1254 /* Enable PR-SCTP by default. */
1255 net
->sctp
.prsctp_enable
= 1;
1257 /* Disable RECONF by default. */
1258 net
->sctp
.reconf_enable
= 0;
1260 /* Disable AUTH by default. */
1261 net
->sctp
.auth_enable
= 0;
1263 /* Set SCOPE policy to enabled */
1264 net
->sctp
.scope_policy
= SCTP_SCOPE_POLICY_ENABLE
;
1266 /* Set the default rwnd update threshold */
1267 net
->sctp
.rwnd_upd_shift
= SCTP_DEFAULT_RWND_SHIFT
;
1269 /* Initialize maximum autoclose timeout. */
1270 net
->sctp
.max_autoclose
= INT_MAX
/ HZ
;
1272 status
= sctp_sysctl_net_register(net
);
1274 goto err_sysctl_register
;
1276 /* Allocate and initialise sctp mibs. */
1277 status
= init_sctp_mibs(net
);
1281 #ifdef CONFIG_PROC_FS
1282 /* Initialize proc fs directory. */
1283 status
= sctp_proc_init(net
);
1288 sctp_dbg_objcnt_init(net
);
1290 /* Initialize the local address list. */
1291 INIT_LIST_HEAD(&net
->sctp
.local_addr_list
);
1292 spin_lock_init(&net
->sctp
.local_addr_lock
);
1293 sctp_get_local_addr_list(net
);
1295 /* Initialize the address event list */
1296 INIT_LIST_HEAD(&net
->sctp
.addr_waitq
);
1297 INIT_LIST_HEAD(&net
->sctp
.auto_asconf_splist
);
1298 spin_lock_init(&net
->sctp
.addr_wq_lock
);
1299 net
->sctp
.addr_wq_timer
.expires
= 0;
1300 timer_setup(&net
->sctp
.addr_wq_timer
, sctp_addr_wq_timeout_handler
, 0);
1304 #ifdef CONFIG_PROC_FS
1306 cleanup_sctp_mibs(net
);
1309 sctp_sysctl_net_unregister(net
);
1310 err_sysctl_register
:
1314 static void __net_exit
sctp_defaults_exit(struct net
*net
)
1316 /* Free the local address list */
1317 sctp_free_addr_wq(net
);
1318 sctp_free_local_addr_list(net
);
1320 #ifdef CONFIG_PROC_FS
1321 remove_proc_subtree("sctp", net
->proc_net
);
1322 net
->sctp
.proc_net_sctp
= NULL
;
1324 cleanup_sctp_mibs(net
);
1325 sctp_sysctl_net_unregister(net
);
1328 static struct pernet_operations sctp_defaults_ops
= {
1329 .init
= sctp_defaults_init
,
1330 .exit
= sctp_defaults_exit
,
1333 static int __net_init
sctp_ctrlsock_init(struct net
*net
)
1337 /* Initialize the control inode/socket for handling OOTB packets. */
1338 status
= sctp_ctl_sock_init(net
);
1340 pr_err("Failed to initialize the SCTP control sock\n");
1345 static void __net_init
sctp_ctrlsock_exit(struct net
*net
)
1347 /* Free the control endpoint. */
1348 inet_ctl_sock_destroy(net
->sctp
.ctl_sock
);
1351 static struct pernet_operations sctp_ctrlsock_ops
= {
1352 .init
= sctp_ctrlsock_init
,
1353 .exit
= sctp_ctrlsock_exit
,
1356 /* Initialize the universe into something sensible. */
1357 static __init
int sctp_init(void)
1360 int status
= -EINVAL
;
1362 unsigned long limit
;
1366 int max_entry_order
;
1368 sock_skb_cb_check_size(sizeof(struct sctp_ulpevent
));
1370 /* Allocate bind_bucket and chunk caches. */
1372 sctp_bucket_cachep
= kmem_cache_create("sctp_bind_bucket",
1373 sizeof(struct sctp_bind_bucket
),
1374 0, SLAB_HWCACHE_ALIGN
,
1376 if (!sctp_bucket_cachep
)
1379 sctp_chunk_cachep
= kmem_cache_create("sctp_chunk",
1380 sizeof(struct sctp_chunk
),
1381 0, SLAB_HWCACHE_ALIGN
,
1383 if (!sctp_chunk_cachep
)
1384 goto err_chunk_cachep
;
1386 status
= percpu_counter_init(&sctp_sockets_allocated
, 0, GFP_KERNEL
);
1388 goto err_percpu_counter_init
;
1390 /* Implementation specific variables. */
1392 /* Initialize default stream count setup information. */
1393 sctp_max_instreams
= SCTP_DEFAULT_INSTREAMS
;
1394 sctp_max_outstreams
= SCTP_DEFAULT_OUTSTREAMS
;
1396 /* Initialize handle used for association ids. */
1397 idr_init(&sctp_assocs_id
);
1399 limit
= nr_free_buffer_pages() / 8;
1400 limit
= max(limit
, 128UL);
1401 sysctl_sctp_mem
[0] = limit
/ 4 * 3;
1402 sysctl_sctp_mem
[1] = limit
;
1403 sysctl_sctp_mem
[2] = sysctl_sctp_mem
[0] * 2;
1405 /* Set per-socket limits to no more than 1/128 the pressure threshold*/
1406 limit
= (sysctl_sctp_mem
[1]) << (PAGE_SHIFT
- 7);
1407 max_share
= min(4UL*1024*1024, limit
);
1409 sysctl_sctp_rmem
[0] = SK_MEM_QUANTUM
; /* give each asoc 1 page min */
1410 sysctl_sctp_rmem
[1] = 1500 * SKB_TRUESIZE(1);
1411 sysctl_sctp_rmem
[2] = max(sysctl_sctp_rmem
[1], max_share
);
1413 sysctl_sctp_wmem
[0] = SK_MEM_QUANTUM
;
1414 sysctl_sctp_wmem
[1] = 16*1024;
1415 sysctl_sctp_wmem
[2] = max(64*1024, max_share
);
1417 /* Size and allocate the association hash table.
1418 * The methodology is similar to that of the tcp hash tables.
1419 * Though not identical. Start by getting a goal size
1421 if (totalram_pages
>= (128 * 1024))
1422 goal
= totalram_pages
>> (22 - PAGE_SHIFT
);
1424 goal
= totalram_pages
>> (24 - PAGE_SHIFT
);
1426 /* Then compute the page order for said goal */
1427 order
= get_order(goal
);
1429 /* Now compute the required page order for the maximum sized table we
1432 max_entry_order
= get_order(MAX_SCTP_PORT_HASH_ENTRIES
*
1433 sizeof(struct sctp_bind_hashbucket
));
1435 /* Limit the page order by that maximum hash table size */
1436 order
= min(order
, max_entry_order
);
1438 /* Allocate and initialize the endpoint hash table. */
1439 sctp_ep_hashsize
= 64;
1441 kmalloc_array(64, sizeof(struct sctp_hashbucket
), GFP_KERNEL
);
1442 if (!sctp_ep_hashtable
) {
1443 pr_err("Failed endpoint_hash alloc\n");
1445 goto err_ehash_alloc
;
1447 for (i
= 0; i
< sctp_ep_hashsize
; i
++) {
1448 rwlock_init(&sctp_ep_hashtable
[i
].lock
);
1449 INIT_HLIST_HEAD(&sctp_ep_hashtable
[i
].chain
);
1452 /* Allocate and initialize the SCTP port hash table.
1453 * Note that order is initalized to start at the max sized
1454 * table we want to support. If we can't get that many pages
1455 * reduce the order and try again
1458 sctp_port_hashtable
= (struct sctp_bind_hashbucket
*)
1459 __get_free_pages(GFP_KERNEL
| __GFP_NOWARN
, order
);
1460 } while (!sctp_port_hashtable
&& --order
> 0);
1462 if (!sctp_port_hashtable
) {
1463 pr_err("Failed bind hash alloc\n");
1465 goto err_bhash_alloc
;
1468 /* Now compute the number of entries that will fit in the
1469 * port hash space we allocated
1471 num_entries
= (1UL << order
) * PAGE_SIZE
/
1472 sizeof(struct sctp_bind_hashbucket
);
1474 /* And finish by rounding it down to the nearest power of two
1475 * this wastes some memory of course, but its needed because
1476 * the hash function operates based on the assumption that
1477 * that the number of entries is a power of two
1479 sctp_port_hashsize
= rounddown_pow_of_two(num_entries
);
1481 for (i
= 0; i
< sctp_port_hashsize
; i
++) {
1482 spin_lock_init(&sctp_port_hashtable
[i
].lock
);
1483 INIT_HLIST_HEAD(&sctp_port_hashtable
[i
].chain
);
1486 status
= sctp_transport_hashtable_init();
1488 goto err_thash_alloc
;
1490 pr_info("Hash tables configured (bind %d/%d)\n", sctp_port_hashsize
,
1493 sctp_sysctl_register();
1495 INIT_LIST_HEAD(&sctp_address_families
);
1498 sctp_sched_ops_init();
1500 status
= register_pernet_subsys(&sctp_defaults_ops
);
1502 goto err_register_defaults
;
1504 status
= sctp_v4_protosw_init();
1506 goto err_protosw_init
;
1508 status
= sctp_v6_protosw_init();
1510 goto err_v6_protosw_init
;
1512 status
= register_pernet_subsys(&sctp_ctrlsock_ops
);
1514 goto err_register_ctrlsock
;
1516 status
= sctp_v4_add_protocol();
1518 goto err_add_protocol
;
1520 /* Register SCTP with inet6 layer. */
1521 status
= sctp_v6_add_protocol();
1523 goto err_v6_add_protocol
;
1525 if (sctp_offload_init() < 0)
1526 pr_crit("%s: Cannot add SCTP protocol offload\n", __func__
);
1530 err_v6_add_protocol
:
1531 sctp_v4_del_protocol();
1533 unregister_pernet_subsys(&sctp_ctrlsock_ops
);
1534 err_register_ctrlsock
:
1535 sctp_v6_protosw_exit();
1536 err_v6_protosw_init
:
1537 sctp_v4_protosw_exit();
1539 unregister_pernet_subsys(&sctp_defaults_ops
);
1540 err_register_defaults
:
1543 sctp_sysctl_unregister();
1544 free_pages((unsigned long)sctp_port_hashtable
,
1545 get_order(sctp_port_hashsize
*
1546 sizeof(struct sctp_bind_hashbucket
)));
1548 sctp_transport_hashtable_destroy();
1550 kfree(sctp_ep_hashtable
);
1552 percpu_counter_destroy(&sctp_sockets_allocated
);
1553 err_percpu_counter_init
:
1554 kmem_cache_destroy(sctp_chunk_cachep
);
1556 kmem_cache_destroy(sctp_bucket_cachep
);
1560 /* Exit handler for the SCTP protocol. */
1561 static __exit
void sctp_exit(void)
1563 /* BUG. This should probably do something useful like clean
1564 * up all the remaining associations and all that memory.
1567 /* Unregister with inet6/inet layers. */
1568 sctp_v6_del_protocol();
1569 sctp_v4_del_protocol();
1571 unregister_pernet_subsys(&sctp_ctrlsock_ops
);
1573 /* Free protosw registrations */
1574 sctp_v6_protosw_exit();
1575 sctp_v4_protosw_exit();
1577 unregister_pernet_subsys(&sctp_defaults_ops
);
1579 /* Unregister with socket layer. */
1583 sctp_sysctl_unregister();
1585 free_pages((unsigned long)sctp_port_hashtable
,
1586 get_order(sctp_port_hashsize
*
1587 sizeof(struct sctp_bind_hashbucket
)));
1588 kfree(sctp_ep_hashtable
);
1589 sctp_transport_hashtable_destroy();
1591 percpu_counter_destroy(&sctp_sockets_allocated
);
1593 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1595 kmem_cache_destroy(sctp_chunk_cachep
);
1596 kmem_cache_destroy(sctp_bucket_cachep
);
1599 module_init(sctp_init
);
1600 module_exit(sctp_exit
);
1603 * __stringify doesn't likes enums, so use IPPROTO_SCTP value (132) directly.
1605 MODULE_ALIAS("net-pf-" __stringify(PF_INET
) "-proto-132");
1606 MODULE_ALIAS("net-pf-" __stringify(PF_INET6
) "-proto-132");
1607 MODULE_AUTHOR("Linux Kernel SCTP developers <linux-sctp@vger.kernel.org>");
1608 MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
1609 module_param_named(no_checksums
, sctp_checksum_disable
, bool, 0644);
1610 MODULE_PARM_DESC(no_checksums
, "Disable checksums computing and verification");
1611 MODULE_LICENSE("GPL");