1 /* SCTP kernel implementation
2 * Copyright (c) 1999-2000 Cisco, Inc.
3 * Copyright (c) 1999-2001 Motorola, Inc.
4 * Copyright (c) 2001-2003 International Business Machines, Corp.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This file is part of the SCTP kernel implementation
11 * These functions handle all input from the IP layer into SCTP.
13 * This SCTP implementation is free software;
14 * you can redistribute it and/or modify it under the terms of
15 * the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This SCTP implementation is distributed in the hope that it
20 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
21 * ************************
22 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
23 * See the GNU General Public License for more details.
25 * You should have received a copy of the GNU General Public License
26 * along with GNU CC; see the file COPYING. If not, see
27 * <http://www.gnu.org/licenses/>.
29 * Please send any bug reports or fixes you make to the
31 * lksctp developers <linux-sctp@vger.kernel.org>
33 * Written or modified by:
34 * La Monte H.P. Yarroll <piggy@acm.org>
35 * Karl Knutson <karl@athena.chicago.il.us>
36 * Xingang Guo <xingang.guo@intel.com>
37 * Jon Grimm <jgrimm@us.ibm.com>
38 * Hui Huang <hui.huang@nokia.com>
39 * Daisy Chang <daisyc@us.ibm.com>
40 * Sridhar Samudrala <sri@us.ibm.com>
41 * Ardelle Fan <ardelle.fan@intel.com>
44 #include <linux/types.h>
45 #include <linux/list.h> /* For struct list_head */
46 #include <linux/socket.h>
48 #include <linux/time.h> /* For struct timeval */
49 #include <linux/slab.h>
55 #include <net/sctp/sctp.h>
56 #include <net/sctp/sm.h>
57 #include <net/sctp/checksum.h>
58 #include <net/net_namespace.h>
59 #include <linux/rhashtable.h>
61 /* Forward declarations for internal helpers. */
62 static int sctp_rcv_ootb(struct sk_buff
*);
63 static struct sctp_association
*__sctp_rcv_lookup(struct net
*net
,
65 const union sctp_addr
*paddr
,
66 const union sctp_addr
*laddr
,
67 struct sctp_transport
**transportp
);
68 static struct sctp_endpoint
*__sctp_rcv_lookup_endpoint(struct net
*net
,
69 const union sctp_addr
*laddr
);
70 static struct sctp_association
*__sctp_lookup_association(
72 const union sctp_addr
*local
,
73 const union sctp_addr
*peer
,
74 struct sctp_transport
**pt
);
76 static int sctp_add_backlog(struct sock
*sk
, struct sk_buff
*skb
);
79 /* Calculate the SCTP checksum of an SCTP packet. */
80 static inline int sctp_rcv_checksum(struct net
*net
, struct sk_buff
*skb
)
82 struct sctphdr
*sh
= sctp_hdr(skb
);
83 __le32 cmp
= sh
->checksum
;
84 __le32 val
= sctp_compute_cksum(skb
, 0);
87 /* CRC failure, dump it. */
88 __SCTP_INC_STATS(net
, SCTP_MIB_CHECKSUMERRORS
);
95 * This is the routine which IP calls when receiving an SCTP packet.
97 int sctp_rcv(struct sk_buff
*skb
)
100 struct sctp_association
*asoc
;
101 struct sctp_endpoint
*ep
= NULL
;
102 struct sctp_ep_common
*rcvr
;
103 struct sctp_transport
*transport
= NULL
;
104 struct sctp_chunk
*chunk
;
106 union sctp_addr dest
;
109 struct net
*net
= dev_net(skb
->dev
);
110 bool is_gso
= skb_is_gso(skb
) && skb_is_gso_sctp(skb
);
112 if (skb
->pkt_type
!= PACKET_HOST
)
115 __SCTP_INC_STATS(net
, SCTP_MIB_INSCTPPACKS
);
117 /* If packet is too small to contain a single chunk, let's not
118 * waste time on it anymore.
120 if (skb
->len
< sizeof(struct sctphdr
) + sizeof(struct sctp_chunkhdr
) +
121 skb_transport_offset(skb
))
124 /* If the packet is fragmented and we need to do crc checking,
125 * it's better to just linearize it otherwise crc computing
128 if ((!is_gso
&& skb_linearize(skb
)) ||
129 !pskb_may_pull(skb
, sizeof(struct sctphdr
)))
132 /* Pull up the IP header. */
133 __skb_pull(skb
, skb_transport_offset(skb
));
135 skb
->csum_valid
= 0; /* Previous value not applicable */
136 if (skb_csum_unnecessary(skb
))
137 __skb_decr_checksum_unnecessary(skb
);
138 else if (!sctp_checksum_disable
&&
140 sctp_rcv_checksum(net
, skb
) < 0)
144 __skb_pull(skb
, sizeof(struct sctphdr
));
146 family
= ipver2af(ip_hdr(skb
)->version
);
147 af
= sctp_get_af_specific(family
);
150 SCTP_INPUT_CB(skb
)->af
= af
;
152 /* Initialize local addresses for lookups. */
153 af
->from_skb(&src
, skb
, 1);
154 af
->from_skb(&dest
, skb
, 0);
156 /* If the packet is to or from a non-unicast address,
157 * silently discard the packet.
159 * This is not clearly defined in the RFC except in section
160 * 8.4 - OOTB handling. However, based on the book "Stream Control
161 * Transmission Protocol" 2.1, "It is important to note that the
162 * IP address of an SCTP transport address must be a routable
163 * unicast address. In other words, IP multicast addresses and
164 * IP broadcast addresses cannot be used in an SCTP transport
167 if (!af
->addr_valid(&src
, NULL
, skb
) ||
168 !af
->addr_valid(&dest
, NULL
, skb
))
171 asoc
= __sctp_rcv_lookup(net
, skb
, &src
, &dest
, &transport
);
174 ep
= __sctp_rcv_lookup_endpoint(net
, &dest
);
176 /* Retrieve the common input handling substructure. */
177 rcvr
= asoc
? &asoc
->base
: &ep
->base
;
181 * If a frame arrives on an interface and the receiving socket is
182 * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
184 if (sk
->sk_bound_dev_if
&& (sk
->sk_bound_dev_if
!= af
->skb_iif(skb
))) {
186 sctp_transport_put(transport
);
190 sctp_endpoint_put(ep
);
193 sk
= net
->sctp
.ctl_sock
;
194 ep
= sctp_sk(sk
)->ep
;
195 sctp_endpoint_hold(ep
);
200 * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
201 * An SCTP packet is called an "out of the blue" (OOTB)
202 * packet if it is correctly formed, i.e., passed the
203 * receiver's checksum check, but the receiver is not
204 * able to identify the association to which this
208 if (sctp_rcv_ootb(skb
)) {
209 __SCTP_INC_STATS(net
, SCTP_MIB_OUTOFBLUES
);
210 goto discard_release
;
214 if (!xfrm_policy_check(sk
, XFRM_POLICY_IN
, skb
, family
))
215 goto discard_release
;
218 if (sk_filter(sk
, skb
))
219 goto discard_release
;
221 /* Create an SCTP packet structure. */
222 chunk
= sctp_chunkify(skb
, asoc
, sk
, GFP_ATOMIC
);
224 goto discard_release
;
225 SCTP_INPUT_CB(skb
)->chunk
= chunk
;
227 /* Remember what endpoint is to handle this packet. */
230 /* Remember the SCTP header. */
231 chunk
->sctp_hdr
= sctp_hdr(skb
);
233 /* Set the source and destination addresses of the incoming chunk. */
234 sctp_init_addrs(chunk
, &src
, &dest
);
236 /* Remember where we came from. */
237 chunk
->transport
= transport
;
239 /* Acquire access to the sock lock. Note: We are safe from other
240 * bottom halves on this lock, but a user may be in the lock too,
241 * so check if it is busy.
245 if (sk
!= rcvr
->sk
) {
246 /* Our cached sk is different from the rcvr->sk. This is
247 * because migrate()/accept() may have moved the association
248 * to a new socket and released all the sockets. So now we
249 * are holding a lock on the old socket while the user may
250 * be doing something with the new socket. Switch our veiw
258 if (sock_owned_by_user(sk
) || !sctp_newsk_ready(sk
)) {
259 if (sctp_add_backlog(sk
, skb
)) {
261 sctp_chunk_free(chunk
);
262 skb
= NULL
; /* sctp_chunk_free already freed the skb */
263 goto discard_release
;
265 __SCTP_INC_STATS(net
, SCTP_MIB_IN_PKT_BACKLOG
);
267 __SCTP_INC_STATS(net
, SCTP_MIB_IN_PKT_SOFTIRQ
);
268 sctp_inq_push(&chunk
->rcvr
->inqueue
, chunk
);
273 /* Release the asoc/ep ref we took in the lookup calls. */
275 sctp_transport_put(transport
);
277 sctp_endpoint_put(ep
);
282 __SCTP_INC_STATS(net
, SCTP_MIB_IN_PKT_DISCARDS
);
287 /* Release the asoc/ep ref we took in the lookup calls. */
289 sctp_transport_put(transport
);
291 sctp_endpoint_put(ep
);
296 /* Process the backlog queue of the socket. Every skb on
297 * the backlog holds a ref on an association or endpoint.
298 * We hold this ref throughout the state machine to make
299 * sure that the structure we need is still around.
301 int sctp_backlog_rcv(struct sock
*sk
, struct sk_buff
*skb
)
303 struct sctp_chunk
*chunk
= SCTP_INPUT_CB(skb
)->chunk
;
304 struct sctp_inq
*inqueue
= &chunk
->rcvr
->inqueue
;
305 struct sctp_transport
*t
= chunk
->transport
;
306 struct sctp_ep_common
*rcvr
= NULL
;
311 /* If the rcvr is dead then the association or endpoint
312 * has been deleted and we can safely drop the chunk
313 * and refs that we are holding.
316 sctp_chunk_free(chunk
);
320 if (unlikely(rcvr
->sk
!= sk
)) {
321 /* In this case, the association moved from one socket to
322 * another. We are currently sitting on the backlog of the
323 * old socket, so we need to move.
324 * However, since we are here in the process context we
325 * need to take make sure that the user doesn't own
326 * the new socket when we process the packet.
327 * If the new socket is user-owned, queue the chunk to the
328 * backlog of the new socket without dropping any refs.
329 * Otherwise, we can safely push the chunk on the inqueue.
336 if (sock_owned_by_user(sk
) || !sctp_newsk_ready(sk
)) {
337 if (sk_add_backlog(sk
, skb
, sk
->sk_rcvbuf
))
338 sctp_chunk_free(chunk
);
342 sctp_inq_push(inqueue
, chunk
);
347 /* If the chunk was backloged again, don't drop refs */
351 if (!sctp_newsk_ready(sk
)) {
352 if (!sk_add_backlog(sk
, skb
, sk
->sk_rcvbuf
))
354 sctp_chunk_free(chunk
);
356 sctp_inq_push(inqueue
, chunk
);
361 /* Release the refs we took in sctp_add_backlog */
362 if (SCTP_EP_TYPE_ASSOCIATION
== rcvr
->type
)
363 sctp_transport_put(t
);
364 else if (SCTP_EP_TYPE_SOCKET
== rcvr
->type
)
365 sctp_endpoint_put(sctp_ep(rcvr
));
372 static int sctp_add_backlog(struct sock
*sk
, struct sk_buff
*skb
)
374 struct sctp_chunk
*chunk
= SCTP_INPUT_CB(skb
)->chunk
;
375 struct sctp_transport
*t
= chunk
->transport
;
376 struct sctp_ep_common
*rcvr
= chunk
->rcvr
;
379 ret
= sk_add_backlog(sk
, skb
, sk
->sk_rcvbuf
);
381 /* Hold the assoc/ep while hanging on the backlog queue.
382 * This way, we know structures we need will not disappear
385 if (SCTP_EP_TYPE_ASSOCIATION
== rcvr
->type
)
386 sctp_transport_hold(t
);
387 else if (SCTP_EP_TYPE_SOCKET
== rcvr
->type
)
388 sctp_endpoint_hold(sctp_ep(rcvr
));
396 /* Handle icmp frag needed error. */
397 void sctp_icmp_frag_needed(struct sock
*sk
, struct sctp_association
*asoc
,
398 struct sctp_transport
*t
, __u32 pmtu
)
400 if (!t
|| (t
->pathmtu
<= pmtu
))
403 if (sock_owned_by_user(sk
)) {
404 atomic_set(&t
->mtu_info
, pmtu
);
405 asoc
->pmtu_pending
= 1;
410 if (!(t
->param_flags
& SPP_PMTUD_ENABLE
))
411 /* We can't allow retransmitting in such case, as the
412 * retransmission would be sized just as before, and thus we
413 * would get another icmp, and retransmit again.
417 /* Update transports view of the MTU. Return if no update was needed.
418 * If an update wasn't needed/possible, it also doesn't make sense to
419 * try to retransmit now.
421 if (!sctp_transport_update_pmtu(t
, pmtu
))
424 /* Update association pmtu. */
425 sctp_assoc_sync_pmtu(asoc
);
427 /* Retransmit with the new pmtu setting. */
428 sctp_retransmit(&asoc
->outqueue
, t
, SCTP_RTXR_PMTUD
);
431 void sctp_icmp_redirect(struct sock
*sk
, struct sctp_transport
*t
,
434 struct dst_entry
*dst
;
436 if (sock_owned_by_user(sk
) || !t
)
438 dst
= sctp_transport_dst_check(t
);
440 dst
->ops
->redirect(dst
, sk
, skb
);
444 * SCTP Implementer's Guide, 2.37 ICMP handling procedures
446 * ICMP8) If the ICMP code is a "Unrecognized next header type encountered"
447 * or a "Protocol Unreachable" treat this message as an abort
448 * with the T bit set.
450 * This function sends an event to the state machine, which will abort the
454 void sctp_icmp_proto_unreachable(struct sock
*sk
,
455 struct sctp_association
*asoc
,
456 struct sctp_transport
*t
)
458 if (sock_owned_by_user(sk
)) {
459 if (timer_pending(&t
->proto_unreach_timer
))
462 if (!mod_timer(&t
->proto_unreach_timer
,
464 sctp_association_hold(asoc
);
467 struct net
*net
= sock_net(sk
);
469 pr_debug("%s: unrecognized next header type "
470 "encountered!\n", __func__
);
472 if (del_timer(&t
->proto_unreach_timer
))
473 sctp_association_put(asoc
);
475 sctp_do_sm(net
, SCTP_EVENT_T_OTHER
,
476 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH
),
477 asoc
->state
, asoc
->ep
, asoc
, t
,
482 /* Common lookup code for icmp/icmpv6 error handler. */
483 struct sock
*sctp_err_lookup(struct net
*net
, int family
, struct sk_buff
*skb
,
484 struct sctphdr
*sctphdr
,
485 struct sctp_association
**app
,
486 struct sctp_transport
**tpp
)
488 struct sctp_init_chunk
*chunkhdr
, _chunkhdr
;
489 union sctp_addr saddr
;
490 union sctp_addr daddr
;
492 struct sock
*sk
= NULL
;
493 struct sctp_association
*asoc
;
494 struct sctp_transport
*transport
= NULL
;
495 __u32 vtag
= ntohl(sctphdr
->vtag
);
497 *app
= NULL
; *tpp
= NULL
;
499 af
= sctp_get_af_specific(family
);
504 /* Initialize local addresses for lookups. */
505 af
->from_skb(&saddr
, skb
, 1);
506 af
->from_skb(&daddr
, skb
, 0);
508 /* Look for an association that matches the incoming ICMP error
511 asoc
= __sctp_lookup_association(net
, &saddr
, &daddr
, &transport
);
517 /* RFC 4960, Appendix C. ICMP Handling
519 * ICMP6) An implementation MUST validate that the Verification Tag
520 * contained in the ICMP message matches the Verification Tag of
521 * the peer. If the Verification Tag is not 0 and does NOT
522 * match, discard the ICMP message. If it is 0 and the ICMP
523 * message contains enough bytes to verify that the chunk type is
524 * an INIT chunk and that the Initiate Tag matches the tag of the
525 * peer, continue with ICMP7. If the ICMP message is too short
526 * or the chunk type or the Initiate Tag does not match, silently
527 * discard the packet.
530 /* chunk header + first 4 octects of init header */
531 chunkhdr
= skb_header_pointer(skb
, skb_transport_offset(skb
) +
532 sizeof(struct sctphdr
),
533 sizeof(struct sctp_chunkhdr
) +
534 sizeof(__be32
), &_chunkhdr
);
536 chunkhdr
->chunk_hdr
.type
!= SCTP_CID_INIT
||
537 ntohl(chunkhdr
->init_hdr
.init_tag
) != asoc
->c
.my_vtag
)
540 } else if (vtag
!= asoc
->c
.peer_vtag
) {
546 /* If too many ICMPs get dropped on busy
547 * servers this needs to be solved differently.
549 if (sock_owned_by_user(sk
))
550 __NET_INC_STATS(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
557 sctp_transport_put(transport
);
561 /* Common cleanup code for icmp/icmpv6 error handler. */
562 void sctp_err_finish(struct sock
*sk
, struct sctp_transport
*t
)
565 sctp_transport_put(t
);
569 * This routine is called by the ICMP module when it gets some
570 * sort of error condition. If err < 0 then the socket should
571 * be closed and the error returned to the user. If err > 0
572 * it's just the icmp type << 8 | icmp code. After adjustment
573 * header points to the first 8 bytes of the sctp header. We need
574 * to find the appropriate port.
576 * The locking strategy used here is very "optimistic". When
577 * someone else accesses the socket the ICMP is just dropped
578 * and for some paths there is no check at all.
579 * A more general error queue to queue errors for later handling
580 * is probably better.
583 void sctp_v4_err(struct sk_buff
*skb
, __u32 info
)
585 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
586 const int ihlen
= iph
->ihl
* 4;
587 const int type
= icmp_hdr(skb
)->type
;
588 const int code
= icmp_hdr(skb
)->code
;
590 struct sctp_association
*asoc
= NULL
;
591 struct sctp_transport
*transport
;
592 struct inet_sock
*inet
;
593 __u16 saveip
, savesctp
;
595 struct net
*net
= dev_net(skb
->dev
);
597 /* Fix up skb to look at the embedded net header. */
598 saveip
= skb
->network_header
;
599 savesctp
= skb
->transport_header
;
600 skb_reset_network_header(skb
);
601 skb_set_transport_header(skb
, ihlen
);
602 sk
= sctp_err_lookup(net
, AF_INET
, skb
, sctp_hdr(skb
), &asoc
, &transport
);
603 /* Put back, the original values. */
604 skb
->network_header
= saveip
;
605 skb
->transport_header
= savesctp
;
607 __ICMP_INC_STATS(net
, ICMP_MIB_INERRORS
);
610 /* Warning: The sock lock is held. Remember to call
615 case ICMP_PARAMETERPROB
:
618 case ICMP_DEST_UNREACH
:
619 if (code
> NR_ICMP_UNREACH
)
622 /* PMTU discovery (RFC1191) */
623 if (ICMP_FRAG_NEEDED
== code
) {
624 sctp_icmp_frag_needed(sk
, asoc
, transport
,
628 if (ICMP_PROT_UNREACH
== code
) {
629 sctp_icmp_proto_unreachable(sk
, asoc
,
634 err
= icmp_err_convert
[code
].errno
;
636 case ICMP_TIME_EXCEEDED
:
637 /* Ignore any time exceeded errors due to fragment reassembly
640 if (ICMP_EXC_FRAGTIME
== code
)
646 sctp_icmp_redirect(sk
, transport
, skb
);
647 /* Fall through to out_unlock. */
653 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
655 sk
->sk_error_report(sk
);
656 } else { /* Only an error on timeout */
657 sk
->sk_err_soft
= err
;
661 sctp_err_finish(sk
, transport
);
665 * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
667 * This function scans all the chunks in the OOTB packet to determine if
668 * the packet should be discarded right away. If a response might be needed
669 * for this packet, or, if further processing is possible, the packet will
670 * be queued to a proper inqueue for the next phase of handling.
673 * Return 0 - If further processing is needed.
674 * Return 1 - If the packet can be discarded right away.
676 static int sctp_rcv_ootb(struct sk_buff
*skb
)
678 struct sctp_chunkhdr
*ch
, _ch
;
679 int ch_end
, offset
= 0;
681 /* Scan through all the chunks in the packet. */
683 /* Make sure we have at least the header there */
684 if (offset
+ sizeof(_ch
) > skb
->len
)
687 ch
= skb_header_pointer(skb
, offset
, sizeof(*ch
), &_ch
);
689 /* Break out if chunk length is less then minimal. */
690 if (ntohs(ch
->length
) < sizeof(_ch
))
693 ch_end
= offset
+ SCTP_PAD4(ntohs(ch
->length
));
694 if (ch_end
> skb
->len
)
697 /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
698 * receiver MUST silently discard the OOTB packet and take no
701 if (SCTP_CID_ABORT
== ch
->type
)
704 /* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE
705 * chunk, the receiver should silently discard the packet
706 * and take no further action.
708 if (SCTP_CID_SHUTDOWN_COMPLETE
== ch
->type
)
712 * This will discard packets with INIT chunk bundled as
713 * subsequent chunks in the packet. When INIT is first,
714 * the normal INIT processing will discard the chunk.
716 if (SCTP_CID_INIT
== ch
->type
&& (void *)ch
!= skb
->data
)
720 } while (ch_end
< skb
->len
);
728 /* Insert endpoint into the hash table. */
729 static void __sctp_hash_endpoint(struct sctp_endpoint
*ep
)
731 struct net
*net
= sock_net(ep
->base
.sk
);
732 struct sctp_ep_common
*epb
;
733 struct sctp_hashbucket
*head
;
737 epb
->hashent
= sctp_ep_hashfn(net
, epb
->bind_addr
.port
);
738 head
= &sctp_ep_hashtable
[epb
->hashent
];
740 write_lock(&head
->lock
);
741 hlist_add_head(&epb
->node
, &head
->chain
);
742 write_unlock(&head
->lock
);
745 /* Add an endpoint to the hash. Local BH-safe. */
746 void sctp_hash_endpoint(struct sctp_endpoint
*ep
)
749 __sctp_hash_endpoint(ep
);
753 /* Remove endpoint from the hash table. */
754 static void __sctp_unhash_endpoint(struct sctp_endpoint
*ep
)
756 struct net
*net
= sock_net(ep
->base
.sk
);
757 struct sctp_hashbucket
*head
;
758 struct sctp_ep_common
*epb
;
762 epb
->hashent
= sctp_ep_hashfn(net
, epb
->bind_addr
.port
);
764 head
= &sctp_ep_hashtable
[epb
->hashent
];
766 write_lock(&head
->lock
);
767 hlist_del_init(&epb
->node
);
768 write_unlock(&head
->lock
);
771 /* Remove endpoint from the hash. Local BH-safe. */
772 void sctp_unhash_endpoint(struct sctp_endpoint
*ep
)
775 __sctp_unhash_endpoint(ep
);
779 /* Look up an endpoint. */
780 static struct sctp_endpoint
*__sctp_rcv_lookup_endpoint(struct net
*net
,
781 const union sctp_addr
*laddr
)
783 struct sctp_hashbucket
*head
;
784 struct sctp_ep_common
*epb
;
785 struct sctp_endpoint
*ep
;
788 hash
= sctp_ep_hashfn(net
, ntohs(laddr
->v4
.sin_port
));
789 head
= &sctp_ep_hashtable
[hash
];
790 read_lock(&head
->lock
);
791 sctp_for_each_hentry(epb
, &head
->chain
) {
793 if (sctp_endpoint_is_match(ep
, net
, laddr
))
797 ep
= sctp_sk(net
->sctp
.ctl_sock
)->ep
;
800 sctp_endpoint_hold(ep
);
801 read_unlock(&head
->lock
);
805 /* rhashtable for transport */
806 struct sctp_hash_cmp_arg
{
807 const union sctp_addr
*paddr
;
808 const struct net
*net
;
812 static inline int sctp_hash_cmp(struct rhashtable_compare_arg
*arg
,
815 struct sctp_transport
*t
= (struct sctp_transport
*)ptr
;
816 const struct sctp_hash_cmp_arg
*x
= arg
->key
;
819 if (!sctp_cmp_addr_exact(&t
->ipaddr
, x
->paddr
))
821 if (!sctp_transport_hold(t
))
824 if (!net_eq(t
->asoc
->base
.net
, x
->net
))
826 if (x
->lport
!= htons(t
->asoc
->base
.bind_addr
.port
))
831 sctp_transport_put(t
);
835 static inline __u32
sctp_hash_obj(const void *data
, u32 len
, u32 seed
)
837 const struct sctp_transport
*t
= data
;
838 const union sctp_addr
*paddr
= &t
->ipaddr
;
839 const struct net
*net
= t
->asoc
->base
.net
;
840 __be16 lport
= htons(t
->asoc
->base
.bind_addr
.port
);
843 if (paddr
->sa
.sa_family
== AF_INET6
)
844 addr
= jhash(&paddr
->v6
.sin6_addr
, 16, seed
);
846 addr
= (__force __u32
)paddr
->v4
.sin_addr
.s_addr
;
848 return jhash_3words(addr
, ((__force __u32
)paddr
->v4
.sin_port
) << 16 |
849 (__force __u32
)lport
, net_hash_mix(net
), seed
);
852 static inline __u32
sctp_hash_key(const void *data
, u32 len
, u32 seed
)
854 const struct sctp_hash_cmp_arg
*x
= data
;
855 const union sctp_addr
*paddr
= x
->paddr
;
856 const struct net
*net
= x
->net
;
857 __be16 lport
= x
->lport
;
860 if (paddr
->sa
.sa_family
== AF_INET6
)
861 addr
= jhash(&paddr
->v6
.sin6_addr
, 16, seed
);
863 addr
= (__force __u32
)paddr
->v4
.sin_addr
.s_addr
;
865 return jhash_3words(addr
, ((__force __u32
)paddr
->v4
.sin_port
) << 16 |
866 (__force __u32
)lport
, net_hash_mix(net
), seed
);
869 static const struct rhashtable_params sctp_hash_params
= {
870 .head_offset
= offsetof(struct sctp_transport
, node
),
871 .hashfn
= sctp_hash_key
,
872 .obj_hashfn
= sctp_hash_obj
,
873 .obj_cmpfn
= sctp_hash_cmp
,
874 .automatic_shrinking
= true,
877 int sctp_transport_hashtable_init(void)
879 return rhltable_init(&sctp_transport_hashtable
, &sctp_hash_params
);
882 void sctp_transport_hashtable_destroy(void)
884 rhltable_destroy(&sctp_transport_hashtable
);
887 int sctp_hash_transport(struct sctp_transport
*t
)
889 struct sctp_transport
*transport
;
890 struct rhlist_head
*tmp
, *list
;
891 struct sctp_hash_cmp_arg arg
;
897 arg
.net
= sock_net(t
->asoc
->base
.sk
);
898 arg
.paddr
= &t
->ipaddr
;
899 arg
.lport
= htons(t
->asoc
->base
.bind_addr
.port
);
902 list
= rhltable_lookup(&sctp_transport_hashtable
, &arg
,
905 rhl_for_each_entry_rcu(transport
, tmp
, list
, node
)
906 if (transport
->asoc
->ep
== t
->asoc
->ep
) {
912 err
= rhltable_insert_key(&sctp_transport_hashtable
, &arg
,
913 &t
->node
, sctp_hash_params
);
915 pr_err_once("insert transport fail, errno %d\n", err
);
920 void sctp_unhash_transport(struct sctp_transport
*t
)
925 rhltable_remove(&sctp_transport_hashtable
, &t
->node
,
929 /* return a transport with holding it */
930 struct sctp_transport
*sctp_addrs_lookup_transport(
932 const union sctp_addr
*laddr
,
933 const union sctp_addr
*paddr
)
935 struct rhlist_head
*tmp
, *list
;
936 struct sctp_transport
*t
;
937 struct sctp_hash_cmp_arg arg
= {
940 .lport
= laddr
->v4
.sin_port
,
943 list
= rhltable_lookup(&sctp_transport_hashtable
, &arg
,
946 rhl_for_each_entry_rcu(t
, tmp
, list
, node
) {
947 if (!sctp_transport_hold(t
))
950 if (sctp_bind_addr_match(&t
->asoc
->base
.bind_addr
,
951 laddr
, sctp_sk(t
->asoc
->base
.sk
)))
953 sctp_transport_put(t
);
959 /* return a transport without holding it, as it's only used under sock lock */
960 struct sctp_transport
*sctp_epaddr_lookup_transport(
961 const struct sctp_endpoint
*ep
,
962 const union sctp_addr
*paddr
)
964 struct net
*net
= sock_net(ep
->base
.sk
);
965 struct rhlist_head
*tmp
, *list
;
966 struct sctp_transport
*t
;
967 struct sctp_hash_cmp_arg arg
= {
970 .lport
= htons(ep
->base
.bind_addr
.port
),
973 list
= rhltable_lookup(&sctp_transport_hashtable
, &arg
,
976 rhl_for_each_entry_rcu(t
, tmp
, list
, node
)
977 if (ep
== t
->asoc
->ep
)
983 /* Look up an association. */
984 static struct sctp_association
*__sctp_lookup_association(
986 const union sctp_addr
*local
,
987 const union sctp_addr
*peer
,
988 struct sctp_transport
**pt
)
990 struct sctp_transport
*t
;
991 struct sctp_association
*asoc
= NULL
;
993 t
= sctp_addrs_lookup_transport(net
, local
, peer
);
1004 /* Look up an association. protected by RCU read lock */
1006 struct sctp_association
*sctp_lookup_association(struct net
*net
,
1007 const union sctp_addr
*laddr
,
1008 const union sctp_addr
*paddr
,
1009 struct sctp_transport
**transportp
)
1011 struct sctp_association
*asoc
;
1014 asoc
= __sctp_lookup_association(net
, laddr
, paddr
, transportp
);
1020 /* Is there an association matching the given local and peer addresses? */
1021 bool sctp_has_association(struct net
*net
,
1022 const union sctp_addr
*laddr
,
1023 const union sctp_addr
*paddr
)
1025 struct sctp_transport
*transport
;
1027 if (sctp_lookup_association(net
, laddr
, paddr
, &transport
)) {
1028 sctp_transport_put(transport
);
1036 * SCTP Implementors Guide, 2.18 Handling of address
1037 * parameters within the INIT or INIT-ACK.
1039 * D) When searching for a matching TCB upon reception of an INIT
1040 * or INIT-ACK chunk the receiver SHOULD use not only the
1041 * source address of the packet (containing the INIT or
1042 * INIT-ACK) but the receiver SHOULD also use all valid
1043 * address parameters contained within the chunk.
1045 * 2.18.3 Solution description
1047 * This new text clearly specifies to an implementor the need
1048 * to look within the INIT or INIT-ACK. Any implementation that
1049 * does not do this, may not be able to establish associations
1050 * in certain circumstances.
1053 static struct sctp_association
*__sctp_rcv_init_lookup(struct net
*net
,
1054 struct sk_buff
*skb
,
1055 const union sctp_addr
*laddr
, struct sctp_transport
**transportp
)
1057 struct sctp_association
*asoc
;
1058 union sctp_addr addr
;
1059 union sctp_addr
*paddr
= &addr
;
1060 struct sctphdr
*sh
= sctp_hdr(skb
);
1061 union sctp_params params
;
1062 struct sctp_init_chunk
*init
;
1066 * This code will NOT touch anything inside the chunk--it is
1067 * strictly READ-ONLY.
1069 * RFC 2960 3 SCTP packet Format
1071 * Multiple chunks can be bundled into one SCTP packet up to
1072 * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN
1073 * COMPLETE chunks. These chunks MUST NOT be bundled with any
1074 * other chunk in a packet. See Section 6.10 for more details
1075 * on chunk bundling.
1078 /* Find the start of the TLVs and the end of the chunk. This is
1079 * the region we search for address parameters.
1081 init
= (struct sctp_init_chunk
*)skb
->data
;
1083 /* Walk the parameters looking for embedded addresses. */
1084 sctp_walk_params(params
, init
, init_hdr
.params
) {
1086 /* Note: Ignoring hostname addresses. */
1087 af
= sctp_get_af_specific(param_type2af(params
.p
->type
));
1091 af
->from_addr_param(paddr
, params
.addr
, sh
->source
, 0);
1093 asoc
= __sctp_lookup_association(net
, laddr
, paddr
, transportp
);
1101 /* ADD-IP, Section 5.2
1102 * When an endpoint receives an ASCONF Chunk from the remote peer
1103 * special procedures may be needed to identify the association the
1104 * ASCONF Chunk is associated with. To properly find the association
1105 * the following procedures SHOULD be followed:
1107 * D2) If the association is not found, use the address found in the
1108 * Address Parameter TLV combined with the port number found in the
1109 * SCTP common header. If found proceed to rule D4.
1111 * D2-ext) If more than one ASCONF Chunks are packed together, use the
1112 * address found in the ASCONF Address Parameter TLV of each of the
1113 * subsequent ASCONF Chunks. If found, proceed to rule D4.
1115 static struct sctp_association
*__sctp_rcv_asconf_lookup(
1117 struct sctp_chunkhdr
*ch
,
1118 const union sctp_addr
*laddr
,
1120 struct sctp_transport
**transportp
)
1122 struct sctp_addip_chunk
*asconf
= (struct sctp_addip_chunk
*)ch
;
1124 union sctp_addr_param
*param
;
1125 union sctp_addr paddr
;
1127 /* Skip over the ADDIP header and find the Address parameter */
1128 param
= (union sctp_addr_param
*)(asconf
+ 1);
1130 af
= sctp_get_af_specific(param_type2af(param
->p
.type
));
1134 af
->from_addr_param(&paddr
, param
, peer_port
, 0);
1136 return __sctp_lookup_association(net
, laddr
, &paddr
, transportp
);
1140 /* SCTP-AUTH, Section 6.3:
1141 * If the receiver does not find a STCB for a packet containing an AUTH
1142 * chunk as the first chunk and not a COOKIE-ECHO chunk as the second
1143 * chunk, it MUST use the chunks after the AUTH chunk to look up an existing
1146 * This means that any chunks that can help us identify the association need
1147 * to be looked at to find this association.
1149 static struct sctp_association
*__sctp_rcv_walk_lookup(struct net
*net
,
1150 struct sk_buff
*skb
,
1151 const union sctp_addr
*laddr
,
1152 struct sctp_transport
**transportp
)
1154 struct sctp_association
*asoc
= NULL
;
1155 struct sctp_chunkhdr
*ch
;
1157 unsigned int chunk_num
= 1;
1160 /* Walk through the chunks looking for AUTH or ASCONF chunks
1161 * to help us find the association.
1163 ch
= (struct sctp_chunkhdr
*)skb
->data
;
1165 /* Break out if chunk length is less then minimal. */
1166 if (ntohs(ch
->length
) < sizeof(*ch
))
1169 ch_end
= ((__u8
*)ch
) + SCTP_PAD4(ntohs(ch
->length
));
1170 if (ch_end
> skb_tail_pointer(skb
))
1175 have_auth
= chunk_num
;
1178 case SCTP_CID_COOKIE_ECHO
:
1179 /* If a packet arrives containing an AUTH chunk as
1180 * a first chunk, a COOKIE-ECHO chunk as the second
1181 * chunk, and possibly more chunks after them, and
1182 * the receiver does not have an STCB for that
1183 * packet, then authentication is based on
1184 * the contents of the COOKIE- ECHO chunk.
1186 if (have_auth
== 1 && chunk_num
== 2)
1190 case SCTP_CID_ASCONF
:
1191 if (have_auth
|| net
->sctp
.addip_noauth
)
1192 asoc
= __sctp_rcv_asconf_lookup(
1194 sctp_hdr(skb
)->source
,
1203 ch
= (struct sctp_chunkhdr
*)ch_end
;
1205 } while (ch_end
< skb_tail_pointer(skb
));
1211 * There are circumstances when we need to look inside the SCTP packet
1212 * for information to help us find the association. Examples
1213 * include looking inside of INIT/INIT-ACK chunks or after the AUTH
1216 static struct sctp_association
*__sctp_rcv_lookup_harder(struct net
*net
,
1217 struct sk_buff
*skb
,
1218 const union sctp_addr
*laddr
,
1219 struct sctp_transport
**transportp
)
1221 struct sctp_chunkhdr
*ch
;
1223 /* We do not allow GSO frames here as we need to linearize and
1224 * then cannot guarantee frame boundaries. This shouldn't be an
1225 * issue as packets hitting this are mostly INIT or INIT-ACK and
1226 * those cannot be on GSO-style anyway.
1228 if (skb_is_gso(skb
) && skb_is_gso_sctp(skb
))
1231 ch
= (struct sctp_chunkhdr
*)skb
->data
;
1233 /* The code below will attempt to walk the chunk and extract
1234 * parameter information. Before we do that, we need to verify
1235 * that the chunk length doesn't cause overflow. Otherwise, we'll
1238 if (SCTP_PAD4(ntohs(ch
->length
)) > skb
->len
)
1241 /* If this is INIT/INIT-ACK look inside the chunk too. */
1242 if (ch
->type
== SCTP_CID_INIT
|| ch
->type
== SCTP_CID_INIT_ACK
)
1243 return __sctp_rcv_init_lookup(net
, skb
, laddr
, transportp
);
1245 return __sctp_rcv_walk_lookup(net
, skb
, laddr
, transportp
);
1248 /* Lookup an association for an inbound skb. */
1249 static struct sctp_association
*__sctp_rcv_lookup(struct net
*net
,
1250 struct sk_buff
*skb
,
1251 const union sctp_addr
*paddr
,
1252 const union sctp_addr
*laddr
,
1253 struct sctp_transport
**transportp
)
1255 struct sctp_association
*asoc
;
1257 asoc
= __sctp_lookup_association(net
, laddr
, paddr
, transportp
);
1261 /* Further lookup for INIT/INIT-ACK packets.
1262 * SCTP Implementors Guide, 2.18 Handling of address
1263 * parameters within the INIT or INIT-ACK.
1265 asoc
= __sctp_rcv_lookup_harder(net
, skb
, laddr
, transportp
);
1269 if (paddr
->sa
.sa_family
== AF_INET
)
1270 pr_debug("sctp: asoc not found for src:%pI4:%d dst:%pI4:%d\n",
1271 &laddr
->v4
.sin_addr
, ntohs(laddr
->v4
.sin_port
),
1272 &paddr
->v4
.sin_addr
, ntohs(paddr
->v4
.sin_port
));
1274 pr_debug("sctp: asoc not found for src:%pI6:%d dst:%pI6:%d\n",
1275 &laddr
->v6
.sin6_addr
, ntohs(laddr
->v6
.sin6_port
),
1276 &paddr
->v6
.sin6_addr
, ntohs(paddr
->v6
.sin6_port
));