1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 * Copyright (c) 2001 Intel Corp.
7 * Copyright (c) 2001 Nokia, Inc.
8 * Copyright (c) 2001 La Monte H.P. Yarroll
10 * This file is part of the SCTP kernel implementation
12 * Initialization/cleanup for SCTP protocol support.
14 * Please send any bug reports or fixes you make to the
16 * lksctp developers <linux-sctp@vger.kernel.org>
18 * Written or modified by:
19 * La Monte H.P. Yarroll <piggy@acm.org>
20 * Karl Knutson <karl@athena.chicago.il.us>
21 * Jon Grimm <jgrimm@us.ibm.com>
22 * Sridhar Samudrala <sri@us.ibm.com>
23 * Daisy Chang <daisyc@us.ibm.com>
24 * Ardelle Fan <ardelle.fan@intel.com>
27 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/seq_file.h>
34 #include <linux/memblock.h>
35 #include <linux/highmem.h>
36 #include <linux/swap.h>
37 #include <linux/slab.h>
38 #include <net/net_namespace.h>
39 #include <net/protocol.h>
42 #include <net/route.h>
43 #include <net/sctp/sctp.h>
44 #include <net/addrconf.h>
45 #include <net/inet_common.h>
46 #include <net/inet_ecn.h>
48 #define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
50 /* Global data structures. */
51 struct sctp_globals sctp_globals __read_mostly
;
53 struct idr sctp_assocs_id
;
54 DEFINE_SPINLOCK(sctp_assocs_id_lock
);
56 static struct sctp_pf
*sctp_pf_inet6_specific
;
57 static struct sctp_pf
*sctp_pf_inet_specific
;
58 static struct sctp_af
*sctp_af_v4_specific
;
59 static struct sctp_af
*sctp_af_v6_specific
;
61 struct kmem_cache
*sctp_chunk_cachep __read_mostly
;
62 struct kmem_cache
*sctp_bucket_cachep __read_mostly
;
64 long sysctl_sctp_mem
[3];
65 int sysctl_sctp_rmem
[3];
66 int sysctl_sctp_wmem
[3];
68 /* Private helper to extract ipv4 address and stash them in
69 * the protocol structure.
71 static void sctp_v4_copy_addrlist(struct list_head
*addrlist
,
72 struct net_device
*dev
)
74 struct in_device
*in_dev
;
75 struct in_ifaddr
*ifa
;
76 struct sctp_sockaddr_entry
*addr
;
79 if ((in_dev
= __in_dev_get_rcu(dev
)) == NULL
) {
84 for (ifa
= in_dev
->ifa_list
; ifa
; ifa
= ifa
->ifa_next
) {
85 /* Add the address to the local list. */
86 addr
= kzalloc(sizeof(*addr
), GFP_ATOMIC
);
88 addr
->a
.v4
.sin_family
= AF_INET
;
89 addr
->a
.v4
.sin_addr
.s_addr
= ifa
->ifa_local
;
91 INIT_LIST_HEAD(&addr
->list
);
92 list_add_tail(&addr
->list
, addrlist
);
99 /* Extract our IP addresses from the system and stash them in the
100 * protocol structure.
102 static void sctp_get_local_addr_list(struct net
*net
)
104 struct net_device
*dev
;
105 struct list_head
*pos
;
109 for_each_netdev_rcu(net
, dev
) {
110 list_for_each(pos
, &sctp_address_families
) {
111 af
= list_entry(pos
, struct sctp_af
, list
);
112 af
->copy_addrlist(&net
->sctp
.local_addr_list
, dev
);
118 /* Free the existing local addresses. */
119 static void sctp_free_local_addr_list(struct net
*net
)
121 struct sctp_sockaddr_entry
*addr
;
122 struct list_head
*pos
, *temp
;
124 list_for_each_safe(pos
, temp
, &net
->sctp
.local_addr_list
) {
125 addr
= list_entry(pos
, struct sctp_sockaddr_entry
, list
);
131 /* Copy the local addresses which are valid for 'scope' into 'bp'. */
132 int sctp_copy_local_addr_list(struct net
*net
, struct sctp_bind_addr
*bp
,
133 enum sctp_scope scope
, gfp_t gfp
, int copy_flags
)
135 struct sctp_sockaddr_entry
*addr
;
136 union sctp_addr laddr
;
140 list_for_each_entry_rcu(addr
, &net
->sctp
.local_addr_list
, list
) {
143 if (!sctp_in_scope(net
, &addr
->a
, scope
))
146 /* Now that the address is in scope, check to see if
147 * the address type is really supported by the local
148 * sock as well as the remote peer.
150 if (addr
->a
.sa
.sa_family
== AF_INET
&&
151 !(copy_flags
& SCTP_ADDR4_PEERSUPP
))
153 if (addr
->a
.sa
.sa_family
== AF_INET6
&&
154 (!(copy_flags
& SCTP_ADDR6_ALLOWED
) ||
155 !(copy_flags
& SCTP_ADDR6_PEERSUPP
)))
159 /* also works for setting ipv6 address port */
160 laddr
.v4
.sin_port
= htons(bp
->port
);
161 if (sctp_bind_addr_state(bp
, &laddr
) != -1)
164 error
= sctp_add_bind_addr(bp
, &addr
->a
, sizeof(addr
->a
),
165 SCTP_ADDR_SRC
, GFP_ATOMIC
);
174 /* Copy over any ip options */
175 static void sctp_v4_copy_ip_options(struct sock
*sk
, struct sock
*newsk
)
177 struct inet_sock
*newinet
, *inet
= inet_sk(sk
);
178 struct ip_options_rcu
*inet_opt
, *newopt
= NULL
;
180 newinet
= inet_sk(newsk
);
183 inet_opt
= rcu_dereference(inet
->inet_opt
);
185 newopt
= sock_kmalloc(newsk
, sizeof(*inet_opt
) +
186 inet_opt
->opt
.optlen
, GFP_ATOMIC
);
188 memcpy(newopt
, inet_opt
, sizeof(*inet_opt
) +
189 inet_opt
->opt
.optlen
);
191 pr_err("%s: Failed to copy ip options\n", __func__
);
193 RCU_INIT_POINTER(newinet
->inet_opt
, newopt
);
197 /* Account for the IP options */
198 static int sctp_v4_ip_options_len(struct sock
*sk
)
200 struct inet_sock
*inet
= inet_sk(sk
);
201 struct ip_options_rcu
*inet_opt
;
205 inet_opt
= rcu_dereference(inet
->inet_opt
);
207 len
= inet_opt
->opt
.optlen
;
213 /* Initialize a sctp_addr from in incoming skb. */
214 static void sctp_v4_from_skb(union sctp_addr
*addr
, struct sk_buff
*skb
,
217 /* Always called on head skb, so this is safe */
218 struct sctphdr
*sh
= sctp_hdr(skb
);
219 struct sockaddr_in
*sa
= &addr
->v4
;
221 addr
->v4
.sin_family
= AF_INET
;
224 sa
->sin_port
= sh
->source
;
225 sa
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
227 sa
->sin_port
= sh
->dest
;
228 sa
->sin_addr
.s_addr
= ip_hdr(skb
)->daddr
;
232 /* Initialize an sctp_addr from a socket. */
233 static void sctp_v4_from_sk(union sctp_addr
*addr
, struct sock
*sk
)
235 addr
->v4
.sin_family
= AF_INET
;
236 addr
->v4
.sin_port
= 0;
237 addr
->v4
.sin_addr
.s_addr
= inet_sk(sk
)->inet_rcv_saddr
;
240 /* Initialize sk->sk_rcv_saddr from sctp_addr. */
241 static void sctp_v4_to_sk_saddr(union sctp_addr
*addr
, struct sock
*sk
)
243 inet_sk(sk
)->inet_rcv_saddr
= addr
->v4
.sin_addr
.s_addr
;
246 /* Initialize sk->sk_daddr from sctp_addr. */
247 static void sctp_v4_to_sk_daddr(union sctp_addr
*addr
, struct sock
*sk
)
249 inet_sk(sk
)->inet_daddr
= addr
->v4
.sin_addr
.s_addr
;
252 /* Initialize a sctp_addr from an address parameter. */
253 static void sctp_v4_from_addr_param(union sctp_addr
*addr
,
254 union sctp_addr_param
*param
,
255 __be16 port
, int iif
)
257 addr
->v4
.sin_family
= AF_INET
;
258 addr
->v4
.sin_port
= port
;
259 addr
->v4
.sin_addr
.s_addr
= param
->v4
.addr
.s_addr
;
262 /* Initialize an address parameter from a sctp_addr and return the length
263 * of the address parameter.
265 static int sctp_v4_to_addr_param(const union sctp_addr
*addr
,
266 union sctp_addr_param
*param
)
268 int length
= sizeof(struct sctp_ipv4addr_param
);
270 param
->v4
.param_hdr
.type
= SCTP_PARAM_IPV4_ADDRESS
;
271 param
->v4
.param_hdr
.length
= htons(length
);
272 param
->v4
.addr
.s_addr
= addr
->v4
.sin_addr
.s_addr
;
277 /* Initialize a sctp_addr from a dst_entry. */
278 static void sctp_v4_dst_saddr(union sctp_addr
*saddr
, struct flowi4
*fl4
,
281 saddr
->v4
.sin_family
= AF_INET
;
282 saddr
->v4
.sin_port
= port
;
283 saddr
->v4
.sin_addr
.s_addr
= fl4
->saddr
;
286 /* Compare two addresses exactly. */
287 static int sctp_v4_cmp_addr(const union sctp_addr
*addr1
,
288 const union sctp_addr
*addr2
)
290 if (addr1
->sa
.sa_family
!= addr2
->sa
.sa_family
)
292 if (addr1
->v4
.sin_port
!= addr2
->v4
.sin_port
)
294 if (addr1
->v4
.sin_addr
.s_addr
!= addr2
->v4
.sin_addr
.s_addr
)
300 /* Initialize addr struct to INADDR_ANY. */
301 static void sctp_v4_inaddr_any(union sctp_addr
*addr
, __be16 port
)
303 addr
->v4
.sin_family
= AF_INET
;
304 addr
->v4
.sin_addr
.s_addr
= htonl(INADDR_ANY
);
305 addr
->v4
.sin_port
= port
;
308 /* Is this a wildcard address? */
309 static int sctp_v4_is_any(const union sctp_addr
*addr
)
311 return htonl(INADDR_ANY
) == addr
->v4
.sin_addr
.s_addr
;
314 /* This function checks if the address is a valid address to be used for
318 * Return 0 - If the address is a non-unicast or an illegal address.
319 * Return 1 - If the address is a unicast.
321 static int sctp_v4_addr_valid(union sctp_addr
*addr
,
322 struct sctp_sock
*sp
,
323 const struct sk_buff
*skb
)
325 /* IPv4 addresses not allowed */
326 if (sp
&& ipv6_only_sock(sctp_opt2sk(sp
)))
329 /* Is this a non-unicast address or a unusable SCTP address? */
330 if (IS_IPV4_UNUSABLE_ADDRESS(addr
->v4
.sin_addr
.s_addr
))
333 /* Is this a broadcast address? */
334 if (skb
&& skb_rtable(skb
)->rt_flags
& RTCF_BROADCAST
)
340 /* Should this be available for binding? */
341 static int sctp_v4_available(union sctp_addr
*addr
, struct sctp_sock
*sp
)
343 struct net
*net
= sock_net(&sp
->inet
.sk
);
344 int ret
= inet_addr_type(net
, addr
->v4
.sin_addr
.s_addr
);
347 if (addr
->v4
.sin_addr
.s_addr
!= htonl(INADDR_ANY
) &&
349 !sp
->inet
.freebind
&&
350 !net
->ipv4
.sysctl_ip_nonlocal_bind
)
353 if (ipv6_only_sock(sctp_opt2sk(sp
)))
359 /* Checking the loopback, private and other address scopes as defined in
360 * RFC 1918. The IPv4 scoping is based on the draft for SCTP IPv4
361 * scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>.
363 * Level 0 - unusable SCTP addresses
364 * Level 1 - loopback address
365 * Level 2 - link-local addresses
366 * Level 3 - private addresses.
367 * Level 4 - global addresses
368 * For INIT and INIT-ACK address list, let L be the level of
369 * of requested destination address, sender and receiver
370 * SHOULD include all of its addresses with level greater
371 * than or equal to L.
373 * IPv4 scoping can be controlled through sysctl option
374 * net.sctp.addr_scope_policy
376 static enum sctp_scope
sctp_v4_scope(union sctp_addr
*addr
)
378 enum sctp_scope retval
;
380 /* Check for unusable SCTP addresses. */
381 if (IS_IPV4_UNUSABLE_ADDRESS(addr
->v4
.sin_addr
.s_addr
)) {
382 retval
= SCTP_SCOPE_UNUSABLE
;
383 } else if (ipv4_is_loopback(addr
->v4
.sin_addr
.s_addr
)) {
384 retval
= SCTP_SCOPE_LOOPBACK
;
385 } else if (ipv4_is_linklocal_169(addr
->v4
.sin_addr
.s_addr
)) {
386 retval
= SCTP_SCOPE_LINK
;
387 } else if (ipv4_is_private_10(addr
->v4
.sin_addr
.s_addr
) ||
388 ipv4_is_private_172(addr
->v4
.sin_addr
.s_addr
) ||
389 ipv4_is_private_192(addr
->v4
.sin_addr
.s_addr
)) {
390 retval
= SCTP_SCOPE_PRIVATE
;
392 retval
= SCTP_SCOPE_GLOBAL
;
398 /* Returns a valid dst cache entry for the given source and destination ip
399 * addresses. If an association is passed, trys to get a dst entry with a
400 * source address that matches an address in the bind address list.
402 static void sctp_v4_get_dst(struct sctp_transport
*t
, union sctp_addr
*saddr
,
403 struct flowi
*fl
, struct sock
*sk
)
405 struct sctp_association
*asoc
= t
->asoc
;
407 struct flowi4
*fl4
= &fl
->u
.ip4
;
408 struct sctp_bind_addr
*bp
;
409 struct sctp_sockaddr_entry
*laddr
;
410 struct dst_entry
*dst
= NULL
;
411 union sctp_addr
*daddr
= &t
->ipaddr
;
412 union sctp_addr dst_saddr
;
413 __u8 tos
= inet_sk(sk
)->tos
;
415 if (t
->dscp
& SCTP_DSCP_SET_MASK
)
416 tos
= t
->dscp
& SCTP_DSCP_VAL_MASK
;
417 memset(fl4
, 0x0, sizeof(struct flowi4
));
418 fl4
->daddr
= daddr
->v4
.sin_addr
.s_addr
;
419 fl4
->fl4_dport
= daddr
->v4
.sin_port
;
420 fl4
->flowi4_proto
= IPPROTO_SCTP
;
422 fl4
->flowi4_tos
= RT_CONN_FLAGS_TOS(asoc
->base
.sk
, tos
);
423 fl4
->flowi4_oif
= asoc
->base
.sk
->sk_bound_dev_if
;
424 fl4
->fl4_sport
= htons(asoc
->base
.bind_addr
.port
);
427 fl4
->saddr
= saddr
->v4
.sin_addr
.s_addr
;
429 fl4
->fl4_sport
= saddr
->v4
.sin_port
;
432 pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__
, &fl4
->daddr
,
435 rt
= ip_route_output_key(sock_net(sk
), fl4
);
439 /* If there is no association or if a source address is passed, no
440 * more validation is required.
445 bp
= &asoc
->base
.bind_addr
;
448 /* Walk through the bind address list and look for a bind
449 * address that matches the source address of the returned dst.
451 sctp_v4_dst_saddr(&dst_saddr
, fl4
, htons(bp
->port
));
453 list_for_each_entry_rcu(laddr
, &bp
->address_list
, list
) {
454 if (!laddr
->valid
|| (laddr
->state
== SCTP_ADDR_DEL
) ||
455 (laddr
->state
!= SCTP_ADDR_SRC
&&
456 !asoc
->src_out_of_asoc_ok
))
458 if (sctp_v4_cmp_addr(&dst_saddr
, &laddr
->a
))
463 /* None of the bound addresses match the source address of the
464 * dst. So release it.
470 /* Walk through the bind address list and try to get a dst that
471 * matches a bind address as the source address.
474 list_for_each_entry_rcu(laddr
, &bp
->address_list
, list
) {
475 struct net_device
*odev
;
479 if (laddr
->state
!= SCTP_ADDR_SRC
||
480 AF_INET
!= laddr
->a
.sa
.sa_family
)
483 fl4
->fl4_sport
= laddr
->a
.v4
.sin_port
;
484 flowi4_update_output(fl4
,
485 asoc
->base
.sk
->sk_bound_dev_if
,
486 RT_CONN_FLAGS_TOS(asoc
->base
.sk
, tos
),
487 daddr
->v4
.sin_addr
.s_addr
,
488 laddr
->a
.v4
.sin_addr
.s_addr
);
490 rt
= ip_route_output_key(sock_net(sk
), fl4
);
494 /* Ensure the src address belongs to the output
497 odev
= __ip_dev_find(sock_net(sk
), laddr
->a
.v4
.sin_addr
.s_addr
,
499 if (!odev
|| odev
->ifindex
!= fl4
->flowi4_oif
) {
503 dst_release(&rt
->dst
);
517 pr_debug("rt_dst:%pI4, rt_src:%pI4\n",
518 &fl4
->daddr
, &fl4
->saddr
);
520 pr_debug("no route\n");
523 /* For v4, the source address is cached in the route entry(dst). So no need
524 * to cache it separately and hence this is an empty routine.
526 static void sctp_v4_get_saddr(struct sctp_sock
*sk
,
527 struct sctp_transport
*t
,
530 union sctp_addr
*saddr
= &t
->saddr
;
531 struct rtable
*rt
= (struct rtable
*)t
->dst
;
534 saddr
->v4
.sin_family
= AF_INET
;
535 saddr
->v4
.sin_addr
.s_addr
= fl
->u
.ip4
.saddr
;
539 /* What interface did this skb arrive on? */
540 static int sctp_v4_skb_iif(const struct sk_buff
*skb
)
542 return inet_iif(skb
);
545 /* Was this packet marked by Explicit Congestion Notification? */
546 static int sctp_v4_is_ce(const struct sk_buff
*skb
)
548 return INET_ECN_is_ce(ip_hdr(skb
)->tos
);
551 /* Create and initialize a new sk for the socket returned by accept(). */
552 static struct sock
*sctp_v4_create_accept_sk(struct sock
*sk
,
553 struct sctp_association
*asoc
,
556 struct sock
*newsk
= sk_alloc(sock_net(sk
), PF_INET
, GFP_KERNEL
,
558 struct inet_sock
*newinet
;
563 sock_init_data(NULL
, newsk
);
565 sctp_copy_sock(newsk
, sk
, asoc
);
566 sock_reset_flag(newsk
, SOCK_ZAPPED
);
568 sctp_v4_copy_ip_options(sk
, newsk
);
570 newinet
= inet_sk(newsk
);
572 newinet
->inet_daddr
= asoc
->peer
.primary_addr
.v4
.sin_addr
.s_addr
;
574 sk_refcnt_debug_inc(newsk
);
576 if (newsk
->sk_prot
->init(newsk
)) {
577 sk_common_release(newsk
);
585 static int sctp_v4_addr_to_user(struct sctp_sock
*sp
, union sctp_addr
*addr
)
587 /* No address mapping for V4 sockets */
588 memset(addr
->v4
.sin_zero
, 0, sizeof(addr
->v4
.sin_zero
));
589 return sizeof(struct sockaddr_in
);
592 /* Dump the v4 addr to the seq file. */
593 static void sctp_v4_seq_dump_addr(struct seq_file
*seq
, union sctp_addr
*addr
)
595 seq_printf(seq
, "%pI4 ", &addr
->v4
.sin_addr
);
598 static void sctp_v4_ecn_capable(struct sock
*sk
)
603 static void sctp_addr_wq_timeout_handler(struct timer_list
*t
)
605 struct net
*net
= from_timer(net
, t
, sctp
.addr_wq_timer
);
606 struct sctp_sockaddr_entry
*addrw
, *temp
;
607 struct sctp_sock
*sp
;
609 spin_lock_bh(&net
->sctp
.addr_wq_lock
);
611 list_for_each_entry_safe(addrw
, temp
, &net
->sctp
.addr_waitq
, list
) {
612 pr_debug("%s: the first ent in wq:%p is addr:%pISc for cmd:%d at "
613 "entry:%p\n", __func__
, &net
->sctp
.addr_waitq
, &addrw
->a
.sa
,
614 addrw
->state
, addrw
);
616 #if IS_ENABLED(CONFIG_IPV6)
617 /* Now we send an ASCONF for each association */
618 /* Note. we currently don't handle link local IPv6 addressees */
619 if (addrw
->a
.sa
.sa_family
== AF_INET6
) {
620 struct in6_addr
*in6
;
622 if (ipv6_addr_type(&addrw
->a
.v6
.sin6_addr
) &
626 in6
= (struct in6_addr
*)&addrw
->a
.v6
.sin6_addr
;
627 if (ipv6_chk_addr(net
, in6
, NULL
, 0) == 0 &&
628 addrw
->state
== SCTP_ADDR_NEW
) {
629 unsigned long timeo_val
;
631 pr_debug("%s: this is on DAD, trying %d sec "
633 SCTP_ADDRESS_TICK_DELAY
);
636 timeo_val
+= msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY
);
637 mod_timer(&net
->sctp
.addr_wq_timer
, timeo_val
);
642 list_for_each_entry(sp
, &net
->sctp
.auto_asconf_splist
, auto_asconf_list
) {
645 sk
= sctp_opt2sk(sp
);
646 /* ignore bound-specific endpoints */
647 if (!sctp_is_ep_boundall(sk
))
650 if (sctp_asconf_mgmt(sp
, addrw
) < 0)
651 pr_debug("%s: sctp_asconf_mgmt failed\n", __func__
);
654 #if IS_ENABLED(CONFIG_IPV6)
657 list_del(&addrw
->list
);
660 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
663 static void sctp_free_addr_wq(struct net
*net
)
665 struct sctp_sockaddr_entry
*addrw
;
666 struct sctp_sockaddr_entry
*temp
;
668 spin_lock_bh(&net
->sctp
.addr_wq_lock
);
669 del_timer(&net
->sctp
.addr_wq_timer
);
670 list_for_each_entry_safe(addrw
, temp
, &net
->sctp
.addr_waitq
, list
) {
671 list_del(&addrw
->list
);
674 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
677 /* lookup the entry for the same address in the addr_waitq
678 * sctp_addr_wq MUST be locked
680 static struct sctp_sockaddr_entry
*sctp_addr_wq_lookup(struct net
*net
,
681 struct sctp_sockaddr_entry
*addr
)
683 struct sctp_sockaddr_entry
*addrw
;
685 list_for_each_entry(addrw
, &net
->sctp
.addr_waitq
, list
) {
686 if (addrw
->a
.sa
.sa_family
!= addr
->a
.sa
.sa_family
)
688 if (addrw
->a
.sa
.sa_family
== AF_INET
) {
689 if (addrw
->a
.v4
.sin_addr
.s_addr
==
690 addr
->a
.v4
.sin_addr
.s_addr
)
692 } else if (addrw
->a
.sa
.sa_family
== AF_INET6
) {
693 if (ipv6_addr_equal(&addrw
->a
.v6
.sin6_addr
,
694 &addr
->a
.v6
.sin6_addr
))
701 void sctp_addr_wq_mgmt(struct net
*net
, struct sctp_sockaddr_entry
*addr
, int cmd
)
703 struct sctp_sockaddr_entry
*addrw
;
704 unsigned long timeo_val
;
706 /* first, we check if an opposite message already exist in the queue.
707 * If we found such message, it is removed.
708 * This operation is a bit stupid, but the DHCP client attaches the
709 * new address after a couple of addition and deletion of that address
712 spin_lock_bh(&net
->sctp
.addr_wq_lock
);
713 /* Offsets existing events in addr_wq */
714 addrw
= sctp_addr_wq_lookup(net
, addr
);
716 if (addrw
->state
!= cmd
) {
717 pr_debug("%s: offsets existing entry for %d, addr:%pISc "
718 "in wq:%p\n", __func__
, addrw
->state
, &addrw
->a
.sa
,
719 &net
->sctp
.addr_waitq
);
721 list_del(&addrw
->list
);
724 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
728 /* OK, we have to add the new address to the wait queue */
729 addrw
= kmemdup(addr
, sizeof(struct sctp_sockaddr_entry
), GFP_ATOMIC
);
731 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
735 list_add_tail(&addrw
->list
, &net
->sctp
.addr_waitq
);
737 pr_debug("%s: add new entry for cmd:%d, addr:%pISc in wq:%p\n",
738 __func__
, addrw
->state
, &addrw
->a
.sa
, &net
->sctp
.addr_waitq
);
740 if (!timer_pending(&net
->sctp
.addr_wq_timer
)) {
742 timeo_val
+= msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY
);
743 mod_timer(&net
->sctp
.addr_wq_timer
, timeo_val
);
745 spin_unlock_bh(&net
->sctp
.addr_wq_lock
);
748 /* Event handler for inet address addition/deletion events.
749 * The sctp_local_addr_list needs to be protocted by a spin lock since
750 * multiple notifiers (say IPv4 and IPv6) may be running at the same
751 * time and thus corrupt the list.
752 * The reader side is protected with RCU.
754 static int sctp_inetaddr_event(struct notifier_block
*this, unsigned long ev
,
757 struct in_ifaddr
*ifa
= (struct in_ifaddr
*)ptr
;
758 struct sctp_sockaddr_entry
*addr
= NULL
;
759 struct sctp_sockaddr_entry
*temp
;
760 struct net
*net
= dev_net(ifa
->ifa_dev
->dev
);
765 addr
= kzalloc(sizeof(*addr
), GFP_ATOMIC
);
767 addr
->a
.v4
.sin_family
= AF_INET
;
768 addr
->a
.v4
.sin_addr
.s_addr
= ifa
->ifa_local
;
770 spin_lock_bh(&net
->sctp
.local_addr_lock
);
771 list_add_tail_rcu(&addr
->list
, &net
->sctp
.local_addr_list
);
772 sctp_addr_wq_mgmt(net
, addr
, SCTP_ADDR_NEW
);
773 spin_unlock_bh(&net
->sctp
.local_addr_lock
);
777 spin_lock_bh(&net
->sctp
.local_addr_lock
);
778 list_for_each_entry_safe(addr
, temp
,
779 &net
->sctp
.local_addr_list
, list
) {
780 if (addr
->a
.sa
.sa_family
== AF_INET
&&
781 addr
->a
.v4
.sin_addr
.s_addr
==
783 sctp_addr_wq_mgmt(net
, addr
, SCTP_ADDR_DEL
);
786 list_del_rcu(&addr
->list
);
790 spin_unlock_bh(&net
->sctp
.local_addr_lock
);
792 kfree_rcu(addr
, rcu
);
800 * Initialize the control inode/socket with a control endpoint data
801 * structure. This endpoint is reserved exclusively for the OOTB processing.
803 static int sctp_ctl_sock_init(struct net
*net
)
806 sa_family_t family
= PF_INET
;
808 if (sctp_get_pf_specific(PF_INET6
))
811 err
= inet_ctl_sock_create(&net
->sctp
.ctl_sock
, family
,
812 SOCK_SEQPACKET
, IPPROTO_SCTP
, net
);
814 /* If IPv6 socket could not be created, try the IPv4 socket */
815 if (err
< 0 && family
== PF_INET6
)
816 err
= inet_ctl_sock_create(&net
->sctp
.ctl_sock
, AF_INET
,
817 SOCK_SEQPACKET
, IPPROTO_SCTP
,
821 pr_err("Failed to create the SCTP control socket\n");
827 /* Register address family specific functions. */
828 int sctp_register_af(struct sctp_af
*af
)
830 switch (af
->sa_family
) {
832 if (sctp_af_v4_specific
)
834 sctp_af_v4_specific
= af
;
837 if (sctp_af_v6_specific
)
839 sctp_af_v6_specific
= af
;
845 INIT_LIST_HEAD(&af
->list
);
846 list_add_tail(&af
->list
, &sctp_address_families
);
850 /* Get the table of functions for manipulating a particular address
853 struct sctp_af
*sctp_get_af_specific(sa_family_t family
)
857 return sctp_af_v4_specific
;
859 return sctp_af_v6_specific
;
865 /* Common code to initialize a AF_INET msg_name. */
866 static void sctp_inet_msgname(char *msgname
, int *addr_len
)
868 struct sockaddr_in
*sin
;
870 sin
= (struct sockaddr_in
*)msgname
;
871 *addr_len
= sizeof(struct sockaddr_in
);
872 sin
->sin_family
= AF_INET
;
873 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
876 /* Copy the primary address of the peer primary address as the msg_name. */
877 static void sctp_inet_event_msgname(struct sctp_ulpevent
*event
, char *msgname
,
880 struct sockaddr_in
*sin
, *sinfrom
;
883 struct sctp_association
*asoc
;
886 sctp_inet_msgname(msgname
, addr_len
);
887 sin
= (struct sockaddr_in
*)msgname
;
888 sinfrom
= &asoc
->peer
.primary_addr
.v4
;
889 sin
->sin_port
= htons(asoc
->peer
.port
);
890 sin
->sin_addr
.s_addr
= sinfrom
->sin_addr
.s_addr
;
894 /* Initialize and copy out a msgname from an inbound skb. */
895 static void sctp_inet_skb_msgname(struct sk_buff
*skb
, char *msgname
, int *len
)
898 struct sctphdr
*sh
= sctp_hdr(skb
);
899 struct sockaddr_in
*sin
= (struct sockaddr_in
*)msgname
;
901 sctp_inet_msgname(msgname
, len
);
902 sin
->sin_port
= sh
->source
;
903 sin
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
907 /* Do we support this AF? */
908 static int sctp_inet_af_supported(sa_family_t family
, struct sctp_sock
*sp
)
910 /* PF_INET only supports AF_INET addresses. */
911 return AF_INET
== family
;
914 /* Address matching with wildcards allowed. */
915 static int sctp_inet_cmp_addr(const union sctp_addr
*addr1
,
916 const union sctp_addr
*addr2
,
917 struct sctp_sock
*opt
)
919 /* PF_INET only supports AF_INET addresses. */
920 if (addr1
->sa
.sa_family
!= addr2
->sa
.sa_family
)
922 if (htonl(INADDR_ANY
) == addr1
->v4
.sin_addr
.s_addr
||
923 htonl(INADDR_ANY
) == addr2
->v4
.sin_addr
.s_addr
)
925 if (addr1
->v4
.sin_addr
.s_addr
== addr2
->v4
.sin_addr
.s_addr
)
931 /* Verify that provided sockaddr looks bindable. Common verification has
932 * already been taken care of.
934 static int sctp_inet_bind_verify(struct sctp_sock
*opt
, union sctp_addr
*addr
)
936 return sctp_v4_available(addr
, opt
);
939 /* Verify that sockaddr looks sendable. Common verification has already
940 * been taken care of.
942 static int sctp_inet_send_verify(struct sctp_sock
*opt
, union sctp_addr
*addr
)
947 /* Fill in Supported Address Type information for INIT and INIT-ACK
948 * chunks. Returns number of addresses supported.
950 static int sctp_inet_supported_addrs(const struct sctp_sock
*opt
,
953 types
[0] = SCTP_PARAM_IPV4_ADDRESS
;
957 /* Wrapper routine that calls the ip transmit routine. */
958 static inline int sctp_v4_xmit(struct sk_buff
*skb
,
959 struct sctp_transport
*transport
)
961 struct inet_sock
*inet
= inet_sk(skb
->sk
);
962 __u8 dscp
= inet
->tos
;
964 pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__
, skb
,
965 skb
->len
, &transport
->fl
.u
.ip4
.saddr
,
966 &transport
->fl
.u
.ip4
.daddr
);
968 if (transport
->dscp
& SCTP_DSCP_SET_MASK
)
969 dscp
= transport
->dscp
& SCTP_DSCP_VAL_MASK
;
971 inet
->pmtudisc
= transport
->param_flags
& SPP_PMTUD_ENABLE
?
972 IP_PMTUDISC_DO
: IP_PMTUDISC_DONT
;
974 SCTP_INC_STATS(sock_net(&inet
->sk
), SCTP_MIB_OUTSCTPPACKS
);
976 return __ip_queue_xmit(&inet
->sk
, skb
, &transport
->fl
, dscp
);
979 static struct sctp_af sctp_af_inet
;
981 static struct sctp_pf sctp_pf_inet
= {
982 .event_msgname
= sctp_inet_event_msgname
,
983 .skb_msgname
= sctp_inet_skb_msgname
,
984 .af_supported
= sctp_inet_af_supported
,
985 .cmp_addr
= sctp_inet_cmp_addr
,
986 .bind_verify
= sctp_inet_bind_verify
,
987 .send_verify
= sctp_inet_send_verify
,
988 .supported_addrs
= sctp_inet_supported_addrs
,
989 .create_accept_sk
= sctp_v4_create_accept_sk
,
990 .addr_to_user
= sctp_v4_addr_to_user
,
991 .to_sk_saddr
= sctp_v4_to_sk_saddr
,
992 .to_sk_daddr
= sctp_v4_to_sk_daddr
,
993 .copy_ip_options
= sctp_v4_copy_ip_options
,
997 /* Notifier for inetaddr addition/deletion events. */
998 static struct notifier_block sctp_inetaddr_notifier
= {
999 .notifier_call
= sctp_inetaddr_event
,
1002 /* Socket operations. */
1003 static const struct proto_ops inet_seqpacket_ops
= {
1005 .owner
= THIS_MODULE
,
1006 .release
= inet_release
, /* Needs to be wrapped... */
1008 .connect
= sctp_inet_connect
,
1009 .socketpair
= sock_no_socketpair
,
1010 .accept
= inet_accept
,
1011 .getname
= inet_getname
, /* Semantics are different. */
1013 .ioctl
= inet_ioctl
,
1014 .gettstamp
= sock_gettstamp
,
1015 .listen
= sctp_inet_listen
,
1016 .shutdown
= inet_shutdown
, /* Looks harmless. */
1017 .setsockopt
= sock_common_setsockopt
, /* IP_SOL IP_OPTION is a problem */
1018 .getsockopt
= sock_common_getsockopt
,
1019 .sendmsg
= inet_sendmsg
,
1020 .recvmsg
= inet_recvmsg
,
1021 .mmap
= sock_no_mmap
,
1022 .sendpage
= sock_no_sendpage
,
1023 #ifdef CONFIG_COMPAT
1024 .compat_setsockopt
= compat_sock_common_setsockopt
,
1025 .compat_getsockopt
= compat_sock_common_getsockopt
,
1029 /* Registration with AF_INET family. */
1030 static struct inet_protosw sctp_seqpacket_protosw
= {
1031 .type
= SOCK_SEQPACKET
,
1032 .protocol
= IPPROTO_SCTP
,
1034 .ops
= &inet_seqpacket_ops
,
1035 .flags
= SCTP_PROTOSW_FLAG
1037 static struct inet_protosw sctp_stream_protosw
= {
1038 .type
= SOCK_STREAM
,
1039 .protocol
= IPPROTO_SCTP
,
1041 .ops
= &inet_seqpacket_ops
,
1042 .flags
= SCTP_PROTOSW_FLAG
1045 /* Register with IP layer. */
1046 static const struct net_protocol sctp_protocol
= {
1047 .handler
= sctp_rcv
,
1048 .err_handler
= sctp_v4_err
,
1051 .icmp_strict_tag_validation
= 1,
1054 /* IPv4 address related functions. */
1055 static struct sctp_af sctp_af_inet
= {
1056 .sa_family
= AF_INET
,
1057 .sctp_xmit
= sctp_v4_xmit
,
1058 .setsockopt
= ip_setsockopt
,
1059 .getsockopt
= ip_getsockopt
,
1060 .get_dst
= sctp_v4_get_dst
,
1061 .get_saddr
= sctp_v4_get_saddr
,
1062 .copy_addrlist
= sctp_v4_copy_addrlist
,
1063 .from_skb
= sctp_v4_from_skb
,
1064 .from_sk
= sctp_v4_from_sk
,
1065 .from_addr_param
= sctp_v4_from_addr_param
,
1066 .to_addr_param
= sctp_v4_to_addr_param
,
1067 .cmp_addr
= sctp_v4_cmp_addr
,
1068 .addr_valid
= sctp_v4_addr_valid
,
1069 .inaddr_any
= sctp_v4_inaddr_any
,
1070 .is_any
= sctp_v4_is_any
,
1071 .available
= sctp_v4_available
,
1072 .scope
= sctp_v4_scope
,
1073 .skb_iif
= sctp_v4_skb_iif
,
1074 .is_ce
= sctp_v4_is_ce
,
1075 .seq_dump_addr
= sctp_v4_seq_dump_addr
,
1076 .ecn_capable
= sctp_v4_ecn_capable
,
1077 .net_header_len
= sizeof(struct iphdr
),
1078 .sockaddr_len
= sizeof(struct sockaddr_in
),
1079 .ip_options_len
= sctp_v4_ip_options_len
,
1080 #ifdef CONFIG_COMPAT
1081 .compat_setsockopt
= compat_ip_setsockopt
,
1082 .compat_getsockopt
= compat_ip_getsockopt
,
1086 struct sctp_pf
*sctp_get_pf_specific(sa_family_t family
)
1090 return sctp_pf_inet_specific
;
1092 return sctp_pf_inet6_specific
;
1098 /* Register the PF specific function table. */
1099 int sctp_register_pf(struct sctp_pf
*pf
, sa_family_t family
)
1103 if (sctp_pf_inet_specific
)
1105 sctp_pf_inet_specific
= pf
;
1108 if (sctp_pf_inet6_specific
)
1110 sctp_pf_inet6_specific
= pf
;
1118 static inline int init_sctp_mibs(struct net
*net
)
1120 net
->sctp
.sctp_statistics
= alloc_percpu(struct sctp_mib
);
1121 if (!net
->sctp
.sctp_statistics
)
1126 static inline void cleanup_sctp_mibs(struct net
*net
)
1128 free_percpu(net
->sctp
.sctp_statistics
);
1131 static void sctp_v4_pf_init(void)
1133 /* Initialize the SCTP specific PF functions. */
1134 sctp_register_pf(&sctp_pf_inet
, PF_INET
);
1135 sctp_register_af(&sctp_af_inet
);
1138 static void sctp_v4_pf_exit(void)
1140 list_del(&sctp_af_inet
.list
);
1143 static int sctp_v4_protosw_init(void)
1147 rc
= proto_register(&sctp_prot
, 1);
1151 /* Register SCTP(UDP and TCP style) with socket layer. */
1152 inet_register_protosw(&sctp_seqpacket_protosw
);
1153 inet_register_protosw(&sctp_stream_protosw
);
1158 static void sctp_v4_protosw_exit(void)
1160 inet_unregister_protosw(&sctp_stream_protosw
);
1161 inet_unregister_protosw(&sctp_seqpacket_protosw
);
1162 proto_unregister(&sctp_prot
);
1165 static int sctp_v4_add_protocol(void)
1167 /* Register notifier for inet address additions/deletions. */
1168 register_inetaddr_notifier(&sctp_inetaddr_notifier
);
1170 /* Register SCTP with inet layer. */
1171 if (inet_add_protocol(&sctp_protocol
, IPPROTO_SCTP
) < 0)
1177 static void sctp_v4_del_protocol(void)
1179 inet_del_protocol(&sctp_protocol
, IPPROTO_SCTP
);
1180 unregister_inetaddr_notifier(&sctp_inetaddr_notifier
);
1183 static int __net_init
sctp_defaults_init(struct net
*net
)
1188 * 14. Suggested SCTP Protocol Parameter Values
1190 /* The following protocol parameters are RECOMMENDED: */
1191 /* RTO.Initial - 3 seconds */
1192 net
->sctp
.rto_initial
= SCTP_RTO_INITIAL
;
1193 /* RTO.Min - 1 second */
1194 net
->sctp
.rto_min
= SCTP_RTO_MIN
;
1195 /* RTO.Max - 60 seconds */
1196 net
->sctp
.rto_max
= SCTP_RTO_MAX
;
1197 /* RTO.Alpha - 1/8 */
1198 net
->sctp
.rto_alpha
= SCTP_RTO_ALPHA
;
1199 /* RTO.Beta - 1/4 */
1200 net
->sctp
.rto_beta
= SCTP_RTO_BETA
;
1202 /* Valid.Cookie.Life - 60 seconds */
1203 net
->sctp
.valid_cookie_life
= SCTP_DEFAULT_COOKIE_LIFE
;
1205 /* Whether Cookie Preservative is enabled(1) or not(0) */
1206 net
->sctp
.cookie_preserve_enable
= 1;
1208 /* Default sctp sockets to use md5 as their hmac alg */
1209 #if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5)
1210 net
->sctp
.sctp_hmac_alg
= "md5";
1211 #elif defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1)
1212 net
->sctp
.sctp_hmac_alg
= "sha1";
1214 net
->sctp
.sctp_hmac_alg
= NULL
;
1218 net
->sctp
.max_burst
= SCTP_DEFAULT_MAX_BURST
;
1220 /* Enable pf state by default */
1221 net
->sctp
.pf_enable
= 1;
1223 /* Association.Max.Retrans - 10 attempts
1224 * Path.Max.Retrans - 5 attempts (per destination address)
1225 * Max.Init.Retransmits - 8 attempts
1227 net
->sctp
.max_retrans_association
= 10;
1228 net
->sctp
.max_retrans_path
= 5;
1229 net
->sctp
.max_retrans_init
= 8;
1231 /* Sendbuffer growth - do per-socket accounting */
1232 net
->sctp
.sndbuf_policy
= 0;
1234 /* Rcvbuffer growth - do per-socket accounting */
1235 net
->sctp
.rcvbuf_policy
= 0;
1237 /* HB.interval - 30 seconds */
1238 net
->sctp
.hb_interval
= SCTP_DEFAULT_TIMEOUT_HEARTBEAT
;
1240 /* delayed SACK timeout */
1241 net
->sctp
.sack_timeout
= SCTP_DEFAULT_TIMEOUT_SACK
;
1243 /* Disable ADDIP by default. */
1244 net
->sctp
.addip_enable
= 0;
1245 net
->sctp
.addip_noauth
= 0;
1246 net
->sctp
.default_auto_asconf
= 0;
1248 /* Enable PR-SCTP by default. */
1249 net
->sctp
.prsctp_enable
= 1;
1251 /* Disable RECONF by default. */
1252 net
->sctp
.reconf_enable
= 0;
1254 /* Disable AUTH by default. */
1255 net
->sctp
.auth_enable
= 0;
1257 /* Set SCOPE policy to enabled */
1258 net
->sctp
.scope_policy
= SCTP_SCOPE_POLICY_ENABLE
;
1260 /* Set the default rwnd update threshold */
1261 net
->sctp
.rwnd_upd_shift
= SCTP_DEFAULT_RWND_SHIFT
;
1263 /* Initialize maximum autoclose timeout. */
1264 net
->sctp
.max_autoclose
= INT_MAX
/ HZ
;
1266 status
= sctp_sysctl_net_register(net
);
1268 goto err_sysctl_register
;
1270 /* Allocate and initialise sctp mibs. */
1271 status
= init_sctp_mibs(net
);
1275 #ifdef CONFIG_PROC_FS
1276 /* Initialize proc fs directory. */
1277 status
= sctp_proc_init(net
);
1282 sctp_dbg_objcnt_init(net
);
1284 /* Initialize the local address list. */
1285 INIT_LIST_HEAD(&net
->sctp
.local_addr_list
);
1286 spin_lock_init(&net
->sctp
.local_addr_lock
);
1287 sctp_get_local_addr_list(net
);
1289 /* Initialize the address event list */
1290 INIT_LIST_HEAD(&net
->sctp
.addr_waitq
);
1291 INIT_LIST_HEAD(&net
->sctp
.auto_asconf_splist
);
1292 spin_lock_init(&net
->sctp
.addr_wq_lock
);
1293 net
->sctp
.addr_wq_timer
.expires
= 0;
1294 timer_setup(&net
->sctp
.addr_wq_timer
, sctp_addr_wq_timeout_handler
, 0);
1298 #ifdef CONFIG_PROC_FS
1300 cleanup_sctp_mibs(net
);
1303 sctp_sysctl_net_unregister(net
);
1304 err_sysctl_register
:
1308 static void __net_exit
sctp_defaults_exit(struct net
*net
)
1310 /* Free the local address list */
1311 sctp_free_addr_wq(net
);
1312 sctp_free_local_addr_list(net
);
1314 #ifdef CONFIG_PROC_FS
1315 remove_proc_subtree("sctp", net
->proc_net
);
1316 net
->sctp
.proc_net_sctp
= NULL
;
1318 cleanup_sctp_mibs(net
);
1319 sctp_sysctl_net_unregister(net
);
1322 static struct pernet_operations sctp_defaults_ops
= {
1323 .init
= sctp_defaults_init
,
1324 .exit
= sctp_defaults_exit
,
1327 static int __net_init
sctp_ctrlsock_init(struct net
*net
)
1331 /* Initialize the control inode/socket for handling OOTB packets. */
1332 status
= sctp_ctl_sock_init(net
);
1334 pr_err("Failed to initialize the SCTP control sock\n");
1339 static void __net_init
sctp_ctrlsock_exit(struct net
*net
)
1341 /* Free the control endpoint. */
1342 inet_ctl_sock_destroy(net
->sctp
.ctl_sock
);
1345 static struct pernet_operations sctp_ctrlsock_ops
= {
1346 .init
= sctp_ctrlsock_init
,
1347 .exit
= sctp_ctrlsock_exit
,
1350 /* Initialize the universe into something sensible. */
1351 static __init
int sctp_init(void)
1354 int status
= -EINVAL
;
1356 unsigned long limit
;
1357 unsigned long nr_pages
= totalram_pages();
1361 int max_entry_order
;
1363 sock_skb_cb_check_size(sizeof(struct sctp_ulpevent
));
1365 /* Allocate bind_bucket and chunk caches. */
1367 sctp_bucket_cachep
= kmem_cache_create("sctp_bind_bucket",
1368 sizeof(struct sctp_bind_bucket
),
1369 0, SLAB_HWCACHE_ALIGN
,
1371 if (!sctp_bucket_cachep
)
1374 sctp_chunk_cachep
= kmem_cache_create("sctp_chunk",
1375 sizeof(struct sctp_chunk
),
1376 0, SLAB_HWCACHE_ALIGN
,
1378 if (!sctp_chunk_cachep
)
1379 goto err_chunk_cachep
;
1381 status
= percpu_counter_init(&sctp_sockets_allocated
, 0, GFP_KERNEL
);
1383 goto err_percpu_counter_init
;
1385 /* Implementation specific variables. */
1387 /* Initialize default stream count setup information. */
1388 sctp_max_instreams
= SCTP_DEFAULT_INSTREAMS
;
1389 sctp_max_outstreams
= SCTP_DEFAULT_OUTSTREAMS
;
1391 /* Initialize handle used for association ids. */
1392 idr_init(&sctp_assocs_id
);
1394 limit
= nr_free_buffer_pages() / 8;
1395 limit
= max(limit
, 128UL);
1396 sysctl_sctp_mem
[0] = limit
/ 4 * 3;
1397 sysctl_sctp_mem
[1] = limit
;
1398 sysctl_sctp_mem
[2] = sysctl_sctp_mem
[0] * 2;
1400 /* Set per-socket limits to no more than 1/128 the pressure threshold*/
1401 limit
= (sysctl_sctp_mem
[1]) << (PAGE_SHIFT
- 7);
1402 max_share
= min(4UL*1024*1024, limit
);
1404 sysctl_sctp_rmem
[0] = SK_MEM_QUANTUM
; /* give each asoc 1 page min */
1405 sysctl_sctp_rmem
[1] = 1500 * SKB_TRUESIZE(1);
1406 sysctl_sctp_rmem
[2] = max(sysctl_sctp_rmem
[1], max_share
);
1408 sysctl_sctp_wmem
[0] = SK_MEM_QUANTUM
;
1409 sysctl_sctp_wmem
[1] = 16*1024;
1410 sysctl_sctp_wmem
[2] = max(64*1024, max_share
);
1412 /* Size and allocate the association hash table.
1413 * The methodology is similar to that of the tcp hash tables.
1414 * Though not identical. Start by getting a goal size
1416 if (nr_pages
>= (128 * 1024))
1417 goal
= nr_pages
>> (22 - PAGE_SHIFT
);
1419 goal
= nr_pages
>> (24 - PAGE_SHIFT
);
1421 /* Then compute the page order for said goal */
1422 order
= get_order(goal
);
1424 /* Now compute the required page order for the maximum sized table we
1427 max_entry_order
= get_order(MAX_SCTP_PORT_HASH_ENTRIES
*
1428 sizeof(struct sctp_bind_hashbucket
));
1430 /* Limit the page order by that maximum hash table size */
1431 order
= min(order
, max_entry_order
);
1433 /* Allocate and initialize the endpoint hash table. */
1434 sctp_ep_hashsize
= 64;
1436 kmalloc_array(64, sizeof(struct sctp_hashbucket
), GFP_KERNEL
);
1437 if (!sctp_ep_hashtable
) {
1438 pr_err("Failed endpoint_hash alloc\n");
1440 goto err_ehash_alloc
;
1442 for (i
= 0; i
< sctp_ep_hashsize
; i
++) {
1443 rwlock_init(&sctp_ep_hashtable
[i
].lock
);
1444 INIT_HLIST_HEAD(&sctp_ep_hashtable
[i
].chain
);
1447 /* Allocate and initialize the SCTP port hash table.
1448 * Note that order is initalized to start at the max sized
1449 * table we want to support. If we can't get that many pages
1450 * reduce the order and try again
1453 sctp_port_hashtable
= (struct sctp_bind_hashbucket
*)
1454 __get_free_pages(GFP_KERNEL
| __GFP_NOWARN
, order
);
1455 } while (!sctp_port_hashtable
&& --order
> 0);
1457 if (!sctp_port_hashtable
) {
1458 pr_err("Failed bind hash alloc\n");
1460 goto err_bhash_alloc
;
1463 /* Now compute the number of entries that will fit in the
1464 * port hash space we allocated
1466 num_entries
= (1UL << order
) * PAGE_SIZE
/
1467 sizeof(struct sctp_bind_hashbucket
);
1469 /* And finish by rounding it down to the nearest power of two
1470 * this wastes some memory of course, but its needed because
1471 * the hash function operates based on the assumption that
1472 * that the number of entries is a power of two
1474 sctp_port_hashsize
= rounddown_pow_of_two(num_entries
);
1476 for (i
= 0; i
< sctp_port_hashsize
; i
++) {
1477 spin_lock_init(&sctp_port_hashtable
[i
].lock
);
1478 INIT_HLIST_HEAD(&sctp_port_hashtable
[i
].chain
);
1481 status
= sctp_transport_hashtable_init();
1483 goto err_thash_alloc
;
1485 pr_info("Hash tables configured (bind %d/%d)\n", sctp_port_hashsize
,
1488 sctp_sysctl_register();
1490 INIT_LIST_HEAD(&sctp_address_families
);
1493 sctp_sched_ops_init();
1495 status
= register_pernet_subsys(&sctp_defaults_ops
);
1497 goto err_register_defaults
;
1499 status
= sctp_v4_protosw_init();
1501 goto err_protosw_init
;
1503 status
= sctp_v6_protosw_init();
1505 goto err_v6_protosw_init
;
1507 status
= register_pernet_subsys(&sctp_ctrlsock_ops
);
1509 goto err_register_ctrlsock
;
1511 status
= sctp_v4_add_protocol();
1513 goto err_add_protocol
;
1515 /* Register SCTP with inet6 layer. */
1516 status
= sctp_v6_add_protocol();
1518 goto err_v6_add_protocol
;
1520 if (sctp_offload_init() < 0)
1521 pr_crit("%s: Cannot add SCTP protocol offload\n", __func__
);
1525 err_v6_add_protocol
:
1526 sctp_v4_del_protocol();
1528 unregister_pernet_subsys(&sctp_ctrlsock_ops
);
1529 err_register_ctrlsock
:
1530 sctp_v6_protosw_exit();
1531 err_v6_protosw_init
:
1532 sctp_v4_protosw_exit();
1534 unregister_pernet_subsys(&sctp_defaults_ops
);
1535 err_register_defaults
:
1538 sctp_sysctl_unregister();
1539 free_pages((unsigned long)sctp_port_hashtable
,
1540 get_order(sctp_port_hashsize
*
1541 sizeof(struct sctp_bind_hashbucket
)));
1543 sctp_transport_hashtable_destroy();
1545 kfree(sctp_ep_hashtable
);
1547 percpu_counter_destroy(&sctp_sockets_allocated
);
1548 err_percpu_counter_init
:
1549 kmem_cache_destroy(sctp_chunk_cachep
);
1551 kmem_cache_destroy(sctp_bucket_cachep
);
1555 /* Exit handler for the SCTP protocol. */
1556 static __exit
void sctp_exit(void)
1558 /* BUG. This should probably do something useful like clean
1559 * up all the remaining associations and all that memory.
1562 /* Unregister with inet6/inet layers. */
1563 sctp_v6_del_protocol();
1564 sctp_v4_del_protocol();
1566 unregister_pernet_subsys(&sctp_ctrlsock_ops
);
1568 /* Free protosw registrations */
1569 sctp_v6_protosw_exit();
1570 sctp_v4_protosw_exit();
1572 unregister_pernet_subsys(&sctp_defaults_ops
);
1574 /* Unregister with socket layer. */
1578 sctp_sysctl_unregister();
1580 free_pages((unsigned long)sctp_port_hashtable
,
1581 get_order(sctp_port_hashsize
*
1582 sizeof(struct sctp_bind_hashbucket
)));
1583 kfree(sctp_ep_hashtable
);
1584 sctp_transport_hashtable_destroy();
1586 percpu_counter_destroy(&sctp_sockets_allocated
);
1588 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1590 kmem_cache_destroy(sctp_chunk_cachep
);
1591 kmem_cache_destroy(sctp_bucket_cachep
);
1594 module_init(sctp_init
);
1595 module_exit(sctp_exit
);
1598 * __stringify doesn't likes enums, so use IPPROTO_SCTP value (132) directly.
1600 MODULE_ALIAS("net-pf-" __stringify(PF_INET
) "-proto-132");
1601 MODULE_ALIAS("net-pf-" __stringify(PF_INET6
) "-proto-132");
1602 MODULE_AUTHOR("Linux Kernel SCTP developers <linux-sctp@vger.kernel.org>");
1603 MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
1604 module_param_named(no_checksums
, sctp_checksum_disable
, bool, 0644);
1605 MODULE_PARM_DESC(no_checksums
, "Disable checksums computing and verification");
1606 MODULE_LICENSE("GPL");