1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2002 International Business Machines, Corp.
6 * Copyright (c) 2001 Intel Corp.
7 * Copyright (c) 2001 Nokia, Inc.
8 * Copyright (c) 2001 La Monte H.P. Yarroll
10 * This file is part of the SCTP kernel implementation
12 * This abstraction represents an SCTP endpoint.
14 * Please send any bug reports or fixes you make to the
16 * lksctp developers <linux-sctp@vger.kernel.org>
18 * Written or modified by:
19 * La Monte H.P. Yarroll <piggy@acm.org>
20 * Karl Knutson <karl@athena.chicago.il.us>
21 * Jon Grimm <jgrimm@austin.ibm.com>
22 * Daisy Chang <daisyc@us.ibm.com>
23 * Dajiang Zhang <dajiang.zhang@nokia.com>
26 #include <linux/types.h>
27 #include <linux/slab.h>
29 #include <linux/random.h> /* get_random_bytes() */
32 #include <net/sctp/sctp.h>
33 #include <net/sctp/sm.h>
35 /* Forward declarations for internal helpers. */
36 static void sctp_endpoint_bh_rcv(struct work_struct
*work
);
39 * Initialize the base fields of the endpoint structure.
41 static struct sctp_endpoint
*sctp_endpoint_init(struct sctp_endpoint
*ep
,
45 struct net
*net
= sock_net(sk
);
46 struct sctp_shared_key
*null_key
;
48 ep
->digest
= kzalloc(SCTP_SIGNATURE_SIZE
, gfp
);
52 ep
->asconf_enable
= net
->sctp
.addip_enable
;
53 ep
->auth_enable
= net
->sctp
.auth_enable
;
54 if (ep
->auth_enable
) {
55 if (sctp_auth_init(ep
, gfp
))
57 if (ep
->asconf_enable
) {
58 sctp_auth_ep_add_chunkid(ep
, SCTP_CID_ASCONF
);
59 sctp_auth_ep_add_chunkid(ep
, SCTP_CID_ASCONF_ACK
);
63 /* Initialize the base structure. */
64 /* What type of endpoint are we? */
65 ep
->base
.type
= SCTP_EP_TYPE_SOCKET
;
67 /* Initialize the basic object fields. */
68 refcount_set(&ep
->base
.refcnt
, 1);
69 ep
->base
.dead
= false;
71 /* Create an input queue. */
72 sctp_inq_init(&ep
->base
.inqueue
);
74 /* Set its top-half handler */
75 sctp_inq_set_th_handler(&ep
->base
.inqueue
, sctp_endpoint_bh_rcv
);
77 /* Initialize the bind addr area */
78 sctp_bind_addr_init(&ep
->base
.bind_addr
, 0);
80 /* Create the lists of associations. */
81 INIT_LIST_HEAD(&ep
->asocs
);
83 /* Use SCTP specific send buffer space queues. */
84 ep
->sndbuf_policy
= net
->sctp
.sndbuf_policy
;
86 sk
->sk_data_ready
= sctp_data_ready
;
87 sk
->sk_write_space
= sctp_write_space
;
88 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
90 /* Get the receive buffer policy for this endpoint */
91 ep
->rcvbuf_policy
= net
->sctp
.rcvbuf_policy
;
93 /* Initialize the secret key used with cookie. */
94 get_random_bytes(ep
->secret_key
, sizeof(ep
->secret_key
));
96 /* SCTP-AUTH extensions*/
97 INIT_LIST_HEAD(&ep
->endpoint_shared_keys
);
98 null_key
= sctp_auth_shkey_create(0, gfp
);
102 list_add(&null_key
->key_list
, &ep
->endpoint_shared_keys
);
104 /* Add the null key to the endpoint shared keys list and
105 * set the hmcas and chunks pointers.
107 ep
->prsctp_enable
= net
->sctp
.prsctp_enable
;
108 ep
->reconf_enable
= net
->sctp
.reconf_enable
;
109 ep
->ecn_enable
= net
->sctp
.ecn_enable
;
111 /* Remember who we are attached to. */
113 ep
->base
.net
= sock_net(sk
);
114 sock_hold(ep
->base
.sk
);
126 /* Create a sctp_endpoint with all that boring stuff initialized.
127 * Returns NULL if there isn't enough memory.
129 struct sctp_endpoint
*sctp_endpoint_new(struct sock
*sk
, gfp_t gfp
)
131 struct sctp_endpoint
*ep
;
133 /* Build a local endpoint. */
134 ep
= kzalloc(sizeof(*ep
), gfp
);
138 if (!sctp_endpoint_init(ep
, sk
, gfp
))
141 SCTP_DBG_OBJCNT_INC(ep
);
150 /* Add an association to an endpoint. */
151 void sctp_endpoint_add_asoc(struct sctp_endpoint
*ep
,
152 struct sctp_association
*asoc
)
154 struct sock
*sk
= ep
->base
.sk
;
156 /* If this is a temporary association, don't bother
157 * since we'll be removing it shortly and don't
158 * want anyone to find it anyway.
163 /* Now just add it to our list of asocs */
164 list_add_tail(&asoc
->asocs
, &ep
->asocs
);
166 /* Increment the backlog value for a TCP-style listening socket. */
167 if (sctp_style(sk
, TCP
) && sctp_sstate(sk
, LISTENING
))
168 sk_acceptq_added(sk
);
171 /* Free the endpoint structure. Delay cleanup until
172 * all users have released their reference count on this structure.
174 void sctp_endpoint_free(struct sctp_endpoint
*ep
)
176 ep
->base
.dead
= true;
178 inet_sk_set_state(ep
->base
.sk
, SCTP_SS_CLOSED
);
180 /* Unlink this endpoint, so we can't find it again! */
181 sctp_unhash_endpoint(ep
);
183 sctp_endpoint_put(ep
);
186 /* Final destructor for endpoint. */
187 static void sctp_endpoint_destroy_rcu(struct rcu_head
*head
)
189 struct sctp_endpoint
*ep
= container_of(head
, struct sctp_endpoint
, rcu
);
190 struct sock
*sk
= ep
->base
.sk
;
192 sctp_sk(sk
)->ep
= NULL
;
196 SCTP_DBG_OBJCNT_DEC(ep
);
199 static void sctp_endpoint_destroy(struct sctp_endpoint
*ep
)
203 if (unlikely(!ep
->base
.dead
)) {
204 WARN(1, "Attempt to destroy undead endpoint %p!\n", ep
);
208 /* Free the digest buffer */
211 /* SCTP-AUTH: Free up AUTH releated data such as shared keys
212 * chunks and hmacs arrays that were allocated
214 sctp_auth_destroy_keys(&ep
->endpoint_shared_keys
);
218 sctp_inq_free(&ep
->base
.inqueue
);
219 sctp_bind_addr_free(&ep
->base
.bind_addr
);
221 memset(ep
->secret_key
, 0, sizeof(ep
->secret_key
));
224 /* Remove and free the port */
225 if (sctp_sk(sk
)->bind_hash
)
228 call_rcu(&ep
->rcu
, sctp_endpoint_destroy_rcu
);
231 /* Hold a reference to an endpoint. */
232 int sctp_endpoint_hold(struct sctp_endpoint
*ep
)
234 return refcount_inc_not_zero(&ep
->base
.refcnt
);
237 /* Release a reference to an endpoint and clean up if there are
238 * no more references.
240 void sctp_endpoint_put(struct sctp_endpoint
*ep
)
242 if (refcount_dec_and_test(&ep
->base
.refcnt
))
243 sctp_endpoint_destroy(ep
);
246 /* Is this the endpoint we are looking for? */
247 struct sctp_endpoint
*sctp_endpoint_is_match(struct sctp_endpoint
*ep
,
249 const union sctp_addr
*laddr
,
252 int bound_dev_if
= READ_ONCE(ep
->base
.sk
->sk_bound_dev_if
);
253 struct sctp_endpoint
*retval
= NULL
;
255 if (net_eq(ep
->base
.net
, net
) &&
256 sctp_sk_bound_dev_eq(net
, bound_dev_if
, dif
, sdif
) &&
257 (htons(ep
->base
.bind_addr
.port
) == laddr
->v4
.sin_port
)) {
258 if (sctp_bind_addr_match(&ep
->base
.bind_addr
, laddr
,
259 sctp_sk(ep
->base
.sk
)))
266 /* Find the association that goes with this chunk.
267 * We lookup the transport from hashtable at first, then get association
270 struct sctp_association
*sctp_endpoint_lookup_assoc(
271 const struct sctp_endpoint
*ep
,
272 const union sctp_addr
*paddr
,
273 struct sctp_transport
**transport
)
275 struct sctp_association
*asoc
= NULL
;
276 struct sctp_transport
*t
;
280 /* If the local port is not set, there can't be any associations
283 if (!ep
->base
.bind_addr
.port
)
287 t
= sctp_epaddr_lookup_transport(ep
, paddr
);
298 /* Look for any peeled off association from the endpoint that matches the
299 * given peer address.
301 bool sctp_endpoint_is_peeled_off(struct sctp_endpoint
*ep
,
302 const union sctp_addr
*paddr
)
304 int bound_dev_if
= READ_ONCE(ep
->base
.sk
->sk_bound_dev_if
);
305 struct sctp_sockaddr_entry
*addr
;
306 struct net
*net
= ep
->base
.net
;
307 struct sctp_bind_addr
*bp
;
309 bp
= &ep
->base
.bind_addr
;
310 /* This function is called with the socket lock held,
311 * so the address_list can not change.
313 list_for_each_entry(addr
, &bp
->address_list
, list
) {
314 if (sctp_has_association(net
, &addr
->a
, paddr
,
315 bound_dev_if
, bound_dev_if
))
322 /* Do delayed input processing. This is scheduled by sctp_rcv().
323 * This may be called on BH or task time.
325 static void sctp_endpoint_bh_rcv(struct work_struct
*work
)
327 struct sctp_endpoint
*ep
=
328 container_of(work
, struct sctp_endpoint
,
329 base
.inqueue
.immediate
);
330 struct sctp_association
*asoc
;
333 struct sctp_transport
*transport
;
334 struct sctp_chunk
*chunk
;
335 struct sctp_inq
*inqueue
;
336 union sctp_subtype subtype
;
337 enum sctp_state state
;
339 int first_time
= 1; /* is this the first time through the loop */
345 inqueue
= &ep
->base
.inqueue
;
349 while (NULL
!= (chunk
= sctp_inq_pop(inqueue
))) {
350 subtype
= SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
);
352 /* If the first chunk in the packet is AUTH, do special
353 * processing specified in Section 6.3 of SCTP-AUTH spec
355 if (first_time
&& (subtype
.chunk
== SCTP_CID_AUTH
)) {
356 struct sctp_chunkhdr
*next_hdr
;
358 next_hdr
= sctp_inq_peek(inqueue
);
362 /* If the next chunk is COOKIE-ECHO, skip the AUTH
363 * chunk while saving a pointer to it so we can do
364 * Authentication later (during cookie-echo
367 if (next_hdr
->type
== SCTP_CID_COOKIE_ECHO
) {
368 chunk
->auth_chunk
= skb_clone(chunk
->skb
,
375 /* We might have grown an association since last we
376 * looked, so try again.
378 * This happens when we've just processed our
381 if (NULL
== chunk
->asoc
) {
382 asoc
= sctp_endpoint_lookup_assoc(ep
,
386 chunk
->transport
= transport
;
389 state
= asoc
? asoc
->state
: SCTP_STATE_CLOSED
;
390 if (sctp_auth_recv_cid(subtype
.chunk
, asoc
) && !chunk
->auth
)
393 /* Remember where the last DATA chunk came from so we
394 * know where to send the SACK.
396 if (asoc
&& sctp_chunk_is_data(chunk
))
397 asoc
->peer
.last_data_from
= chunk
->transport
;
399 SCTP_INC_STATS(ep
->base
.net
, SCTP_MIB_INCTRLCHUNKS
);
401 asoc
->stats
.ictrlchunks
++;
404 if (chunk
->transport
)
405 chunk
->transport
->last_time_heard
= ktime_get();
407 error
= sctp_do_sm(net
, SCTP_EVENT_T_CHUNK
, subtype
, state
,
408 ep
, asoc
, chunk
, GFP_ATOMIC
);
413 /* Check to see if the endpoint is freed in response to
414 * the incoming chunk. If so, get out of the while loop.
416 if (!sctp_sk(sk
)->ep
)