3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
72 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
);
73 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
74 struct request_sock
*req
);
76 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 static const struct inet_connection_sock_af_ops ipv6_mapped
;
79 static const struct inet_connection_sock_af_ops ipv6_specific
;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
84 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
85 const struct in6_addr
*addr
)
91 static void inet6_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
93 struct dst_entry
*dst
= skb_dst(skb
);
95 if (dst
&& dst_hold_safe(dst
)) {
96 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
99 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
100 inet6_sk(sk
)->rx_dst_cookie
= rt6_get_cookie(rt
);
104 static u32
tcp_v6_init_seq(const struct sk_buff
*skb
)
106 return secure_tcpv6_seq(ipv6_hdr(skb
)->daddr
.s6_addr32
,
107 ipv6_hdr(skb
)->saddr
.s6_addr32
,
109 tcp_hdr(skb
)->source
);
112 static u32
tcp_v6_init_ts_off(const struct sk_buff
*skb
)
114 return secure_tcpv6_ts_off(ipv6_hdr(skb
)->daddr
.s6_addr32
,
115 ipv6_hdr(skb
)->saddr
.s6_addr32
);
118 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
121 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
122 struct inet_sock
*inet
= inet_sk(sk
);
123 struct inet_connection_sock
*icsk
= inet_csk(sk
);
124 struct ipv6_pinfo
*np
= inet6_sk(sk
);
125 struct tcp_sock
*tp
= tcp_sk(sk
);
126 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
127 struct ipv6_txoptions
*opt
;
129 struct dst_entry
*dst
;
132 struct inet_timewait_death_row
*tcp_death_row
= &sock_net(sk
)->ipv4
.tcp_death_row
;
134 if (addr_len
< SIN6_LEN_RFC2133
)
137 if (usin
->sin6_family
!= AF_INET6
)
138 return -EAFNOSUPPORT
;
140 memset(&fl6
, 0, sizeof(fl6
));
143 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
144 IP6_ECN_flow_init(fl6
.flowlabel
);
145 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
146 struct ip6_flowlabel
*flowlabel
;
147 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
150 fl6_sock_release(flowlabel
);
155 * connect() to INADDR_ANY means loopback (BSD'ism).
158 if (ipv6_addr_any(&usin
->sin6_addr
)) {
159 if (ipv6_addr_v4mapped(&sk
->sk_v6_rcv_saddr
))
160 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK
),
163 usin
->sin6_addr
= in6addr_loopback
;
166 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
168 if (addr_type
& IPV6_ADDR_MULTICAST
)
171 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
172 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
173 usin
->sin6_scope_id
) {
174 /* If interface is set while binding, indices
177 if (sk
->sk_bound_dev_if
&&
178 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
181 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
184 /* Connect to link-local address requires an interface */
185 if (!sk
->sk_bound_dev_if
)
189 if (tp
->rx_opt
.ts_recent_stamp
&&
190 !ipv6_addr_equal(&sk
->sk_v6_daddr
, &usin
->sin6_addr
)) {
191 tp
->rx_opt
.ts_recent
= 0;
192 tp
->rx_opt
.ts_recent_stamp
= 0;
196 sk
->sk_v6_daddr
= usin
->sin6_addr
;
197 np
->flow_label
= fl6
.flowlabel
;
203 if (addr_type
& IPV6_ADDR_MAPPED
) {
204 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
205 struct sockaddr_in sin
;
207 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
209 if (__ipv6_only_sock(sk
))
212 sin
.sin_family
= AF_INET
;
213 sin
.sin_port
= usin
->sin6_port
;
214 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
216 icsk
->icsk_af_ops
= &ipv6_mapped
;
217 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
218 #ifdef CONFIG_TCP_MD5SIG
219 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
222 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
225 icsk
->icsk_ext_hdr_len
= exthdrlen
;
226 icsk
->icsk_af_ops
= &ipv6_specific
;
227 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
228 #ifdef CONFIG_TCP_MD5SIG
229 tp
->af_specific
= &tcp_sock_ipv6_specific
;
233 np
->saddr
= sk
->sk_v6_rcv_saddr
;
238 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
))
239 saddr
= &sk
->sk_v6_rcv_saddr
;
241 fl6
.flowi6_proto
= IPPROTO_TCP
;
242 fl6
.daddr
= sk
->sk_v6_daddr
;
243 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
244 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
245 fl6
.flowi6_mark
= sk
->sk_mark
;
246 fl6
.fl6_dport
= usin
->sin6_port
;
247 fl6
.fl6_sport
= inet
->inet_sport
;
248 fl6
.flowi6_uid
= sk
->sk_uid
;
250 opt
= rcu_dereference_protected(np
->opt
, lockdep_sock_is_held(sk
));
251 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
253 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
255 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
);
263 sk
->sk_v6_rcv_saddr
= *saddr
;
266 /* set the source address */
268 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
270 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
271 ip6_dst_store(sk
, dst
, NULL
, NULL
);
273 icsk
->icsk_ext_hdr_len
= 0;
275 icsk
->icsk_ext_hdr_len
= opt
->opt_flen
+
278 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
280 inet
->inet_dport
= usin
->sin6_port
;
282 tcp_set_state(sk
, TCP_SYN_SENT
);
283 err
= inet6_hash_connect(tcp_death_row
, sk
);
289 if (likely(!tp
->repair
)) {
291 tp
->write_seq
= secure_tcpv6_seq(np
->saddr
.s6_addr32
,
292 sk
->sk_v6_daddr
.s6_addr32
,
295 tp
->tsoffset
= secure_tcpv6_ts_off(np
->saddr
.s6_addr32
,
296 sk
->sk_v6_daddr
.s6_addr32
);
299 if (tcp_fastopen_defer_connect(sk
, &err
))
304 err
= tcp_connect(sk
);
311 tcp_set_state(sk
, TCP_CLOSE
);
313 inet
->inet_dport
= 0;
314 sk
->sk_route_caps
= 0;
318 static void tcp_v6_mtu_reduced(struct sock
*sk
)
320 struct dst_entry
*dst
;
322 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
325 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
329 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
330 tcp_sync_mss(sk
, dst_mtu(dst
));
331 tcp_simple_retransmit(sk
);
335 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
336 u8 type
, u8 code
, int offset
, __be32 info
)
338 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
339 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
340 struct net
*net
= dev_net(skb
->dev
);
341 struct request_sock
*fastopen
;
342 struct ipv6_pinfo
*np
;
349 sk
= __inet6_lookup_established(net
, &tcp_hashinfo
,
350 &hdr
->daddr
, th
->dest
,
351 &hdr
->saddr
, ntohs(th
->source
),
355 __ICMP6_INC_STATS(net
, __in6_dev_get(skb
->dev
),
360 if (sk
->sk_state
== TCP_TIME_WAIT
) {
361 inet_twsk_put(inet_twsk(sk
));
364 seq
= ntohl(th
->seq
);
365 fatal
= icmpv6_err_convert(type
, code
, &err
);
366 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
367 return tcp_req_err(sk
, seq
, fatal
);
370 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
371 __NET_INC_STATS(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
373 if (sk
->sk_state
== TCP_CLOSE
)
376 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
377 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
382 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
383 fastopen
= tp
->fastopen_rsk
;
384 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
385 if (sk
->sk_state
!= TCP_LISTEN
&&
386 !between(seq
, snd_una
, tp
->snd_nxt
)) {
387 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
393 if (type
== NDISC_REDIRECT
) {
394 if (!sock_owned_by_user(sk
)) {
395 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
398 dst
->ops
->redirect(dst
, sk
, skb
);
403 if (type
== ICMPV6_PKT_TOOBIG
) {
404 /* We are not interested in TCP_LISTEN and open_requests
405 * (SYN-ACKs send out by Linux are always <576bytes so
406 * they should go through unfragmented).
408 if (sk
->sk_state
== TCP_LISTEN
)
411 if (!ip6_sk_accept_pmtu(sk
))
414 tp
->mtu_info
= ntohl(info
);
415 if (!sock_owned_by_user(sk
))
416 tcp_v6_mtu_reduced(sk
);
417 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
,
424 /* Might be for an request_sock */
425 switch (sk
->sk_state
) {
428 /* Only in fast or simultaneous open. If a fast open socket is
429 * is already accepted it is treated as a connected one below.
431 if (fastopen
&& !fastopen
->sk
)
434 if (!sock_owned_by_user(sk
)) {
436 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
440 sk
->sk_err_soft
= err
;
444 if (!sock_owned_by_user(sk
) && np
->recverr
) {
446 sk
->sk_error_report(sk
);
448 sk
->sk_err_soft
= err
;
456 static int tcp_v6_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
458 struct request_sock
*req
,
459 struct tcp_fastopen_cookie
*foc
,
460 enum tcp_synack_type synack_type
)
462 struct inet_request_sock
*ireq
= inet_rsk(req
);
463 struct ipv6_pinfo
*np
= inet6_sk(sk
);
464 struct ipv6_txoptions
*opt
;
465 struct flowi6
*fl6
= &fl
->u
.ip6
;
469 /* First, grab a route. */
470 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
,
471 IPPROTO_TCP
)) == NULL
)
474 skb
= tcp_make_synack(sk
, dst
, req
, foc
, synack_type
);
477 __tcp_v6_send_check(skb
, &ireq
->ir_v6_loc_addr
,
478 &ireq
->ir_v6_rmt_addr
);
480 fl6
->daddr
= ireq
->ir_v6_rmt_addr
;
481 if (np
->repflow
&& ireq
->pktopts
)
482 fl6
->flowlabel
= ip6_flowlabel(ipv6_hdr(ireq
->pktopts
));
485 opt
= ireq
->ipv6_opt
;
487 opt
= rcu_dereference(np
->opt
);
488 err
= ip6_xmit(sk
, skb
, fl6
, sk
->sk_mark
, opt
, np
->tclass
);
490 err
= net_xmit_eval(err
);
498 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
500 kfree(inet_rsk(req
)->ipv6_opt
);
501 kfree_skb(inet_rsk(req
)->pktopts
);
504 #ifdef CONFIG_TCP_MD5SIG
505 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
506 const struct in6_addr
*addr
)
508 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
511 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(const struct sock
*sk
,
512 const struct sock
*addr_sk
)
514 return tcp_v6_md5_do_lookup(sk
, &addr_sk
->sk_v6_daddr
);
517 static int tcp_v6_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
520 struct tcp_md5sig cmd
;
521 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
523 if (optlen
< sizeof(cmd
))
526 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
529 if (sin6
->sin6_family
!= AF_INET6
)
532 if (!cmd
.tcpm_keylen
) {
533 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
534 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
536 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
540 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
543 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
544 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
545 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
547 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
548 AF_INET6
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
551 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool
*hp
,
552 const struct in6_addr
*daddr
,
553 const struct in6_addr
*saddr
,
554 const struct tcphdr
*th
, int nbytes
)
556 struct tcp6_pseudohdr
*bp
;
557 struct scatterlist sg
;
561 /* 1. TCP pseudo-header (RFC2460) */
564 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
565 bp
->len
= cpu_to_be32(nbytes
);
567 _th
= (struct tcphdr
*)(bp
+ 1);
568 memcpy(_th
, th
, sizeof(*th
));
571 sg_init_one(&sg
, bp
, sizeof(*bp
) + sizeof(*th
));
572 ahash_request_set_crypt(hp
->md5_req
, &sg
, NULL
,
573 sizeof(*bp
) + sizeof(*th
));
574 return crypto_ahash_update(hp
->md5_req
);
577 static int tcp_v6_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
578 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
579 const struct tcphdr
*th
)
581 struct tcp_md5sig_pool
*hp
;
582 struct ahash_request
*req
;
584 hp
= tcp_get_md5sig_pool();
586 goto clear_hash_noput
;
589 if (crypto_ahash_init(req
))
591 if (tcp_v6_md5_hash_headers(hp
, daddr
, saddr
, th
, th
->doff
<< 2))
593 if (tcp_md5_hash_key(hp
, key
))
595 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
596 if (crypto_ahash_final(req
))
599 tcp_put_md5sig_pool();
603 tcp_put_md5sig_pool();
605 memset(md5_hash
, 0, 16);
609 static int tcp_v6_md5_hash_skb(char *md5_hash
,
610 const struct tcp_md5sig_key
*key
,
611 const struct sock
*sk
,
612 const struct sk_buff
*skb
)
614 const struct in6_addr
*saddr
, *daddr
;
615 struct tcp_md5sig_pool
*hp
;
616 struct ahash_request
*req
;
617 const struct tcphdr
*th
= tcp_hdr(skb
);
619 if (sk
) { /* valid for establish/request sockets */
620 saddr
= &sk
->sk_v6_rcv_saddr
;
621 daddr
= &sk
->sk_v6_daddr
;
623 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
624 saddr
= &ip6h
->saddr
;
625 daddr
= &ip6h
->daddr
;
628 hp
= tcp_get_md5sig_pool();
630 goto clear_hash_noput
;
633 if (crypto_ahash_init(req
))
636 if (tcp_v6_md5_hash_headers(hp
, daddr
, saddr
, th
, skb
->len
))
638 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
640 if (tcp_md5_hash_key(hp
, key
))
642 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
643 if (crypto_ahash_final(req
))
646 tcp_put_md5sig_pool();
650 tcp_put_md5sig_pool();
652 memset(md5_hash
, 0, 16);
658 static bool tcp_v6_inbound_md5_hash(const struct sock
*sk
,
659 const struct sk_buff
*skb
)
661 #ifdef CONFIG_TCP_MD5SIG
662 const __u8
*hash_location
= NULL
;
663 struct tcp_md5sig_key
*hash_expected
;
664 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
665 const struct tcphdr
*th
= tcp_hdr(skb
);
669 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
670 hash_location
= tcp_parse_md5sig_option(th
);
672 /* We've parsed the options - do we have a hash? */
673 if (!hash_expected
&& !hash_location
)
676 if (hash_expected
&& !hash_location
) {
677 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
681 if (!hash_expected
&& hash_location
) {
682 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
686 /* check the signature */
687 genhash
= tcp_v6_md5_hash_skb(newhash
,
691 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
692 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5FAILURE
);
693 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
694 genhash
? "failed" : "mismatch",
695 &ip6h
->saddr
, ntohs(th
->source
),
696 &ip6h
->daddr
, ntohs(th
->dest
));
703 static void tcp_v6_init_req(struct request_sock
*req
,
704 const struct sock
*sk_listener
,
707 struct inet_request_sock
*ireq
= inet_rsk(req
);
708 const struct ipv6_pinfo
*np
= inet6_sk(sk_listener
);
710 ireq
->ir_v6_rmt_addr
= ipv6_hdr(skb
)->saddr
;
711 ireq
->ir_v6_loc_addr
= ipv6_hdr(skb
)->daddr
;
713 /* So that link locals have meaning */
714 if (!sk_listener
->sk_bound_dev_if
&&
715 ipv6_addr_type(&ireq
->ir_v6_rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
716 ireq
->ir_iif
= tcp_v6_iif(skb
);
718 if (!TCP_SKB_CB(skb
)->tcp_tw_isn
&&
719 (ipv6_opt_accepted(sk_listener
, skb
, &TCP_SKB_CB(skb
)->header
.h6
) ||
720 np
->rxopt
.bits
.rxinfo
||
721 np
->rxopt
.bits
.rxoinfo
|| np
->rxopt
.bits
.rxhlim
||
722 np
->rxopt
.bits
.rxohlim
|| np
->repflow
)) {
723 atomic_inc(&skb
->users
);
728 static struct dst_entry
*tcp_v6_route_req(const struct sock
*sk
,
730 const struct request_sock
*req
)
732 return inet6_csk_route_req(sk
, &fl
->u
.ip6
, req
, IPPROTO_TCP
);
735 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
737 .obj_size
= sizeof(struct tcp6_request_sock
),
738 .rtx_syn_ack
= tcp_rtx_synack
,
739 .send_ack
= tcp_v6_reqsk_send_ack
,
740 .destructor
= tcp_v6_reqsk_destructor
,
741 .send_reset
= tcp_v6_send_reset
,
742 .syn_ack_timeout
= tcp_syn_ack_timeout
,
745 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
746 .mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) -
747 sizeof(struct ipv6hdr
),
748 #ifdef CONFIG_TCP_MD5SIG
749 .req_md5_lookup
= tcp_v6_md5_lookup
,
750 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
752 .init_req
= tcp_v6_init_req
,
753 #ifdef CONFIG_SYN_COOKIES
754 .cookie_init_seq
= cookie_v6_init_sequence
,
756 .route_req
= tcp_v6_route_req
,
757 .init_seq
= tcp_v6_init_seq
,
758 .init_ts_off
= tcp_v6_init_ts_off
,
759 .send_synack
= tcp_v6_send_synack
,
762 static void tcp_v6_send_response(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
763 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
,
764 int oif
, struct tcp_md5sig_key
*key
, int rst
,
765 u8 tclass
, __be32 label
)
767 const struct tcphdr
*th
= tcp_hdr(skb
);
769 struct sk_buff
*buff
;
771 struct net
*net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
772 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
773 unsigned int tot_len
= sizeof(struct tcphdr
);
774 struct dst_entry
*dst
;
778 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
779 #ifdef CONFIG_TCP_MD5SIG
781 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
784 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
789 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
791 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
792 skb_reset_transport_header(buff
);
794 /* Swap the send and the receive. */
795 memset(t1
, 0, sizeof(*t1
));
796 t1
->dest
= th
->source
;
797 t1
->source
= th
->dest
;
798 t1
->doff
= tot_len
/ 4;
799 t1
->seq
= htonl(seq
);
800 t1
->ack_seq
= htonl(ack
);
801 t1
->ack
= !rst
|| !th
->ack
;
803 t1
->window
= htons(win
);
805 topt
= (__be32
*)(t1
+ 1);
808 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
809 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
810 *topt
++ = htonl(tsval
);
811 *topt
++ = htonl(tsecr
);
814 #ifdef CONFIG_TCP_MD5SIG
816 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
817 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
818 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
819 &ipv6_hdr(skb
)->saddr
,
820 &ipv6_hdr(skb
)->daddr
, t1
);
824 memset(&fl6
, 0, sizeof(fl6
));
825 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
826 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
827 fl6
.flowlabel
= label
;
829 buff
->ip_summed
= CHECKSUM_PARTIAL
;
832 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
834 fl6
.flowi6_proto
= IPPROTO_TCP
;
835 if (rt6_need_strict(&fl6
.daddr
) && !oif
)
836 fl6
.flowi6_oif
= tcp_v6_iif(skb
);
838 if (!oif
&& netif_index_is_l3_master(net
, skb
->skb_iif
))
841 fl6
.flowi6_oif
= oif
;
844 fl6
.flowi6_mark
= IP6_REPLY_MARK(net
, skb
->mark
);
845 fl6
.fl6_dport
= t1
->dest
;
846 fl6
.fl6_sport
= t1
->source
;
847 fl6
.flowi6_uid
= sock_net_uid(net
, sk
&& sk_fullsock(sk
) ? sk
: NULL
);
848 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
850 /* Pass a socket to ip6_dst_lookup either it is for RST
851 * Underlying function will use this to retrieve the network
854 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
);
856 skb_dst_set(buff
, dst
);
857 ip6_xmit(ctl_sk
, buff
, &fl6
, fl6
.flowi6_mark
, NULL
, tclass
);
858 TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
860 TCP_INC_STATS(net
, TCP_MIB_OUTRSTS
);
867 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
869 const struct tcphdr
*th
= tcp_hdr(skb
);
870 u32 seq
= 0, ack_seq
= 0;
871 struct tcp_md5sig_key
*key
= NULL
;
872 #ifdef CONFIG_TCP_MD5SIG
873 const __u8
*hash_location
= NULL
;
874 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
875 unsigned char newhash
[16];
877 struct sock
*sk1
= NULL
;
884 /* If sk not NULL, it means we did a successful lookup and incoming
885 * route had to be correct. prequeue might have dropped our dst.
887 if (!sk
&& !ipv6_unicast_destination(skb
))
890 #ifdef CONFIG_TCP_MD5SIG
892 hash_location
= tcp_parse_md5sig_option(th
);
893 if (sk
&& sk_fullsock(sk
)) {
894 key
= tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
);
895 } else if (hash_location
) {
897 * active side is lost. Try to find listening socket through
898 * source port, and then find md5 key through listening socket.
899 * we are not loose security here:
900 * Incoming packet is checked with md5 hash with finding key,
901 * no RST generated if md5 hash doesn't match.
903 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
904 &tcp_hashinfo
, NULL
, 0,
906 th
->source
, &ipv6h
->daddr
,
907 ntohs(th
->source
), tcp_v6_iif(skb
));
911 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
915 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, skb
);
916 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
922 seq
= ntohl(th
->ack_seq
);
924 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
927 oif
= sk
? sk
->sk_bound_dev_if
: 0;
928 tcp_v6_send_response(sk
, skb
, seq
, ack_seq
, 0, 0, 0, oif
, key
, 1, 0, 0);
930 #ifdef CONFIG_TCP_MD5SIG
936 static void tcp_v6_send_ack(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
937 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
, int oif
,
938 struct tcp_md5sig_key
*key
, u8 tclass
,
941 tcp_v6_send_response(sk
, skb
, seq
, ack
, win
, tsval
, tsecr
, oif
, key
, 0,
945 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
947 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
948 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
950 tcp_v6_send_ack(sk
, skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
951 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
952 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
953 tcptw
->tw_ts_recent
, tw
->tw_bound_dev_if
, tcp_twsk_md5_key(tcptw
),
954 tw
->tw_tclass
, cpu_to_be32(tw
->tw_flowlabel
));
959 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
960 struct request_sock
*req
)
962 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
963 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
966 * The window field (SEG.WND) of every outgoing segment, with the
967 * exception of <SYN> segments, MUST be right-shifted by
968 * Rcv.Wind.Shift bits:
970 tcp_v6_send_ack(sk
, skb
, (sk
->sk_state
== TCP_LISTEN
) ?
971 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
972 tcp_rsk(req
)->rcv_nxt
,
973 req
->rsk_rcv_wnd
>> inet_rsk(req
)->rcv_wscale
,
974 tcp_time_stamp
+ tcp_rsk(req
)->ts_off
,
975 req
->ts_recent
, sk
->sk_bound_dev_if
,
976 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
),
981 static struct sock
*tcp_v6_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
983 #ifdef CONFIG_SYN_COOKIES
984 const struct tcphdr
*th
= tcp_hdr(skb
);
987 sk
= cookie_v6_check(sk
, skb
);
992 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
994 if (skb
->protocol
== htons(ETH_P_IP
))
995 return tcp_v4_conn_request(sk
, skb
);
997 if (!ipv6_unicast_destination(skb
))
1000 return tcp_conn_request(&tcp6_request_sock_ops
,
1001 &tcp_request_sock_ipv6_ops
, sk
, skb
);
1005 return 0; /* don't send reset */
1008 static void tcp_v6_restore_cb(struct sk_buff
*skb
)
1010 /* We need to move header back to the beginning if xfrm6_policy_check()
1011 * and tcp_v6_fill_cb() are going to be called again.
1012 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1014 memmove(IP6CB(skb
), &TCP_SKB_CB(skb
)->header
.h6
,
1015 sizeof(struct inet6_skb_parm
));
1018 static struct sock
*tcp_v6_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
1019 struct request_sock
*req
,
1020 struct dst_entry
*dst
,
1021 struct request_sock
*req_unhash
,
1024 struct inet_request_sock
*ireq
;
1025 struct ipv6_pinfo
*newnp
;
1026 const struct ipv6_pinfo
*np
= inet6_sk(sk
);
1027 struct ipv6_txoptions
*opt
;
1028 struct tcp6_sock
*newtcp6sk
;
1029 struct inet_sock
*newinet
;
1030 struct tcp_sock
*newtp
;
1032 #ifdef CONFIG_TCP_MD5SIG
1033 struct tcp_md5sig_key
*key
;
1037 if (skb
->protocol
== htons(ETH_P_IP
)) {
1042 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
,
1043 req_unhash
, own_req
);
1048 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1049 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1051 newinet
= inet_sk(newsk
);
1052 newnp
= inet6_sk(newsk
);
1053 newtp
= tcp_sk(newsk
);
1055 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1057 newnp
->saddr
= newsk
->sk_v6_rcv_saddr
;
1059 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1060 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1061 #ifdef CONFIG_TCP_MD5SIG
1062 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1065 newnp
->ipv6_mc_list
= NULL
;
1066 newnp
->ipv6_ac_list
= NULL
;
1067 newnp
->ipv6_fl_list
= NULL
;
1068 newnp
->pktoptions
= NULL
;
1070 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1071 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1072 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1074 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1077 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1078 * here, tcp_create_openreq_child now does this for us, see the comment in
1079 * that function for the gory details. -acme
1082 /* It is tricky place. Until this moment IPv4 tcp
1083 worked with IPv6 icsk.icsk_af_ops.
1086 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1091 ireq
= inet_rsk(req
);
1093 if (sk_acceptq_is_full(sk
))
1097 dst
= inet6_csk_route_req(sk
, &fl6
, req
, IPPROTO_TCP
);
1102 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1107 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1108 * count here, tcp_create_openreq_child now does this for us, see the
1109 * comment in that function for the gory details. -acme
1112 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1113 ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1114 inet6_sk_rx_dst_set(newsk
, skb
);
1116 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1117 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1119 newtp
= tcp_sk(newsk
);
1120 newinet
= inet_sk(newsk
);
1121 newnp
= inet6_sk(newsk
);
1123 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1125 newsk
->sk_v6_daddr
= ireq
->ir_v6_rmt_addr
;
1126 newnp
->saddr
= ireq
->ir_v6_loc_addr
;
1127 newsk
->sk_v6_rcv_saddr
= ireq
->ir_v6_loc_addr
;
1128 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1130 /* Now IPv6 options...
1132 First: no IPv4 options.
1134 newinet
->inet_opt
= NULL
;
1135 newnp
->ipv6_mc_list
= NULL
;
1136 newnp
->ipv6_ac_list
= NULL
;
1137 newnp
->ipv6_fl_list
= NULL
;
1140 newnp
->rxopt
.all
= np
->rxopt
.all
;
1142 newnp
->pktoptions
= NULL
;
1144 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1145 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1146 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1148 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1150 /* Clone native IPv6 options from listening socket (if any)
1152 Yes, keeping reference count would be much more clever,
1153 but we make one more one thing there: reattach optmem
1156 opt
= ireq
->ipv6_opt
;
1158 opt
= rcu_dereference(np
->opt
);
1160 opt
= ipv6_dup_options(newsk
, opt
);
1161 RCU_INIT_POINTER(newnp
->opt
, opt
);
1163 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1165 inet_csk(newsk
)->icsk_ext_hdr_len
= opt
->opt_nflen
+
1168 tcp_ca_openreq_child(newsk
, dst
);
1170 tcp_sync_mss(newsk
, dst_mtu(dst
));
1171 newtp
->advmss
= tcp_mss_clamp(tcp_sk(sk
), dst_metric_advmss(dst
));
1173 tcp_initialize_rcv_mss(newsk
);
1175 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1176 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1178 #ifdef CONFIG_TCP_MD5SIG
1179 /* Copy over the MD5 key from the original socket */
1180 key
= tcp_v6_md5_do_lookup(sk
, &newsk
->sk_v6_daddr
);
1182 /* We're using one, so create a matching key
1183 * on the newsk structure. If we fail to get
1184 * memory, then we end up not copying the key
1187 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newsk
->sk_v6_daddr
,
1188 AF_INET6
, key
->key
, key
->keylen
,
1189 sk_gfp_mask(sk
, GFP_ATOMIC
));
1193 if (__inet_inherit_port(sk
, newsk
) < 0) {
1194 inet_csk_prepare_forced_close(newsk
);
1198 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1200 tcp_move_syn(newtp
, req
);
1202 /* Clone pktoptions received with SYN, if we own the req */
1203 if (ireq
->pktopts
) {
1204 newnp
->pktoptions
= skb_clone(ireq
->pktopts
,
1205 sk_gfp_mask(sk
, GFP_ATOMIC
));
1206 consume_skb(ireq
->pktopts
);
1207 ireq
->pktopts
= NULL
;
1208 if (newnp
->pktoptions
) {
1209 tcp_v6_restore_cb(newnp
->pktoptions
);
1210 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1218 __NET_INC_STATS(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1226 /* The socket must have it's spinlock held when we get
1227 * here, unless it is a TCP_LISTEN socket.
1229 * We have a potential double-lock case here, so even when
1230 * doing backlog processing we use the BH locking scheme.
1231 * This is because we cannot sleep with the original spinlock
1234 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1236 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1237 struct tcp_sock
*tp
;
1238 struct sk_buff
*opt_skb
= NULL
;
1240 /* Imagine: socket is IPv6. IPv4 packet arrives,
1241 goes to IPv4 receive handler and backlogged.
1242 From backlog it always goes here. Kerboom...
1243 Fortunately, tcp_rcv_established and rcv_established
1244 handle them correctly, but it is not case with
1245 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1248 if (skb
->protocol
== htons(ETH_P_IP
))
1249 return tcp_v4_do_rcv(sk
, skb
);
1251 if (tcp_filter(sk
, skb
))
1255 * socket locking is here for SMP purposes as backlog rcv
1256 * is currently called with bh processing disabled.
1259 /* Do Stevens' IPV6_PKTOPTIONS.
1261 Yes, guys, it is the only place in our code, where we
1262 may make it not affecting IPv4.
1263 The rest of code is protocol independent,
1264 and I do not like idea to uglify IPv4.
1266 Actually, all the idea behind IPV6_PKTOPTIONS
1267 looks not very well thought. For now we latch
1268 options, received in the last packet, enqueued
1269 by tcp. Feel free to propose better solution.
1273 opt_skb
= skb_clone(skb
, sk_gfp_mask(sk
, GFP_ATOMIC
));
1275 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1276 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1278 sock_rps_save_rxhash(sk
, skb
);
1279 sk_mark_napi_id(sk
, skb
);
1281 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1282 dst
->ops
->check(dst
, np
->rx_dst_cookie
) == NULL
) {
1284 sk
->sk_rx_dst
= NULL
;
1288 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1290 goto ipv6_pktoptions
;
1294 if (tcp_checksum_complete(skb
))
1297 if (sk
->sk_state
== TCP_LISTEN
) {
1298 struct sock
*nsk
= tcp_v6_cookie_check(sk
, skb
);
1304 if (tcp_child_process(sk
, nsk
, skb
))
1307 __kfree_skb(opt_skb
);
1311 sock_rps_save_rxhash(sk
, skb
);
1313 if (tcp_rcv_state_process(sk
, skb
))
1316 goto ipv6_pktoptions
;
1320 tcp_v6_send_reset(sk
, skb
);
1323 __kfree_skb(opt_skb
);
1327 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1328 TCP_INC_STATS(sock_net(sk
), TCP_MIB_INERRS
);
1333 /* Do you ask, what is it?
1335 1. skb was enqueued by tcp.
1336 2. skb is added to tail of read queue, rather than out of order.
1337 3. socket is not in passive state.
1338 4. Finally, it really contains options, which user wants to receive.
1341 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1342 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1343 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1344 np
->mcast_oif
= tcp_v6_iif(opt_skb
);
1345 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1346 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1347 if (np
->rxopt
.bits
.rxflow
|| np
->rxopt
.bits
.rxtclass
)
1348 np
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(opt_skb
));
1350 np
->flow_label
= ip6_flowlabel(ipv6_hdr(opt_skb
));
1351 if (ipv6_opt_accepted(sk
, opt_skb
, &TCP_SKB_CB(opt_skb
)->header
.h6
)) {
1352 skb_set_owner_r(opt_skb
, sk
);
1353 tcp_v6_restore_cb(opt_skb
);
1354 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1356 __kfree_skb(opt_skb
);
1357 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1365 static void tcp_v6_fill_cb(struct sk_buff
*skb
, const struct ipv6hdr
*hdr
,
1366 const struct tcphdr
*th
)
1368 /* This is tricky: we move IP6CB at its correct location into
1369 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1370 * _decode_session6() uses IP6CB().
1371 * barrier() makes sure compiler won't play aliasing games.
1373 memmove(&TCP_SKB_CB(skb
)->header
.h6
, IP6CB(skb
),
1374 sizeof(struct inet6_skb_parm
));
1377 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1378 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1379 skb
->len
- th
->doff
*4);
1380 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1381 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1382 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1383 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1384 TCP_SKB_CB(skb
)->sacked
= 0;
1387 static int tcp_v6_rcv(struct sk_buff
*skb
)
1389 const struct tcphdr
*th
;
1390 const struct ipv6hdr
*hdr
;
1394 struct net
*net
= dev_net(skb
->dev
);
1396 if (skb
->pkt_type
!= PACKET_HOST
)
1400 * Count it even if it's bad.
1402 __TCP_INC_STATS(net
, TCP_MIB_INSEGS
);
1404 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1407 th
= (const struct tcphdr
*)skb
->data
;
1409 if (unlikely(th
->doff
< sizeof(struct tcphdr
)/4))
1411 if (!pskb_may_pull(skb
, th
->doff
*4))
1414 if (skb_checksum_init(skb
, IPPROTO_TCP
, ip6_compute_pseudo
))
1417 th
= (const struct tcphdr
*)skb
->data
;
1418 hdr
= ipv6_hdr(skb
);
1421 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, __tcp_hdrlen(th
),
1422 th
->source
, th
->dest
, inet6_iif(skb
),
1428 if (sk
->sk_state
== TCP_TIME_WAIT
)
1431 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1432 struct request_sock
*req
= inet_reqsk(sk
);
1435 sk
= req
->rsk_listener
;
1436 tcp_v6_fill_cb(skb
, hdr
, th
);
1437 if (tcp_v6_inbound_md5_hash(sk
, skb
)) {
1438 sk_drops_add(sk
, skb
);
1442 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
1443 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1448 nsk
= tcp_check_req(sk
, skb
, req
, false);
1451 goto discard_and_relse
;
1455 tcp_v6_restore_cb(skb
);
1456 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1457 tcp_v6_send_reset(nsk
, skb
);
1458 goto discard_and_relse
;
1464 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1465 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
1466 goto discard_and_relse
;
1469 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1470 goto discard_and_relse
;
1472 tcp_v6_fill_cb(skb
, hdr
, th
);
1474 if (tcp_v6_inbound_md5_hash(sk
, skb
))
1475 goto discard_and_relse
;
1477 if (tcp_filter(sk
, skb
))
1478 goto discard_and_relse
;
1479 th
= (const struct tcphdr
*)skb
->data
;
1480 hdr
= ipv6_hdr(skb
);
1484 if (sk
->sk_state
== TCP_LISTEN
) {
1485 ret
= tcp_v6_do_rcv(sk
, skb
);
1486 goto put_and_return
;
1489 sk_incoming_cpu_update(sk
);
1491 bh_lock_sock_nested(sk
);
1492 tcp_segs_in(tcp_sk(sk
), skb
);
1494 if (!sock_owned_by_user(sk
)) {
1495 if (!tcp_prequeue(sk
, skb
))
1496 ret
= tcp_v6_do_rcv(sk
, skb
);
1497 } else if (tcp_add_backlog(sk
, skb
)) {
1498 goto discard_and_relse
;
1505 return ret
? -1 : 0;
1508 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1511 tcp_v6_fill_cb(skb
, hdr
, th
);
1513 if (tcp_checksum_complete(skb
)) {
1515 __TCP_INC_STATS(net
, TCP_MIB_CSUMERRORS
);
1517 __TCP_INC_STATS(net
, TCP_MIB_INERRS
);
1519 tcp_v6_send_reset(NULL
, skb
);
1527 sk_drops_add(sk
, skb
);
1533 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1534 inet_twsk_put(inet_twsk(sk
));
1538 tcp_v6_fill_cb(skb
, hdr
, th
);
1540 if (tcp_checksum_complete(skb
)) {
1541 inet_twsk_put(inet_twsk(sk
));
1545 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1550 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1551 skb
, __tcp_hdrlen(th
),
1552 &ipv6_hdr(skb
)->saddr
, th
->source
,
1553 &ipv6_hdr(skb
)->daddr
,
1554 ntohs(th
->dest
), tcp_v6_iif(skb
));
1556 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1557 inet_twsk_deschedule_put(tw
);
1559 tcp_v6_restore_cb(skb
);
1563 /* Fall through to ACK */
1566 tcp_v6_timewait_ack(sk
, skb
);
1569 tcp_v6_restore_cb(skb
);
1570 tcp_v6_send_reset(sk
, skb
);
1571 inet_twsk_deschedule_put(inet_twsk(sk
));
1573 case TCP_TW_SUCCESS
:
1579 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1581 const struct ipv6hdr
*hdr
;
1582 const struct tcphdr
*th
;
1585 if (skb
->pkt_type
!= PACKET_HOST
)
1588 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1591 hdr
= ipv6_hdr(skb
);
1594 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1597 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1598 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1599 &hdr
->saddr
, th
->source
,
1600 &hdr
->daddr
, ntohs(th
->dest
),
1604 skb
->destructor
= sock_edemux
;
1605 if (sk_fullsock(sk
)) {
1606 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1609 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1611 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1612 skb_dst_set_noref(skb
, dst
);
1617 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1618 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1619 .twsk_unique
= tcp_twsk_unique
,
1620 .twsk_destructor
= tcp_twsk_destructor
,
1623 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1624 .queue_xmit
= inet6_csk_xmit
,
1625 .send_check
= tcp_v6_send_check
,
1626 .rebuild_header
= inet6_sk_rebuild_header
,
1627 .sk_rx_dst_set
= inet6_sk_rx_dst_set
,
1628 .conn_request
= tcp_v6_conn_request
,
1629 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1630 .net_header_len
= sizeof(struct ipv6hdr
),
1631 .net_frag_header_len
= sizeof(struct frag_hdr
),
1632 .setsockopt
= ipv6_setsockopt
,
1633 .getsockopt
= ipv6_getsockopt
,
1634 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1635 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1636 #ifdef CONFIG_COMPAT
1637 .compat_setsockopt
= compat_ipv6_setsockopt
,
1638 .compat_getsockopt
= compat_ipv6_getsockopt
,
1640 .mtu_reduced
= tcp_v6_mtu_reduced
,
1643 #ifdef CONFIG_TCP_MD5SIG
1644 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1645 .md5_lookup
= tcp_v6_md5_lookup
,
1646 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1647 .md5_parse
= tcp_v6_parse_md5_keys
,
1652 * TCP over IPv4 via INET6 API
1654 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1655 .queue_xmit
= ip_queue_xmit
,
1656 .send_check
= tcp_v4_send_check
,
1657 .rebuild_header
= inet_sk_rebuild_header
,
1658 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1659 .conn_request
= tcp_v6_conn_request
,
1660 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1661 .net_header_len
= sizeof(struct iphdr
),
1662 .setsockopt
= ipv6_setsockopt
,
1663 .getsockopt
= ipv6_getsockopt
,
1664 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1665 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1666 #ifdef CONFIG_COMPAT
1667 .compat_setsockopt
= compat_ipv6_setsockopt
,
1668 .compat_getsockopt
= compat_ipv6_getsockopt
,
1670 .mtu_reduced
= tcp_v4_mtu_reduced
,
1673 #ifdef CONFIG_TCP_MD5SIG
1674 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1675 .md5_lookup
= tcp_v4_md5_lookup
,
1676 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1677 .md5_parse
= tcp_v6_parse_md5_keys
,
1681 /* NOTE: A lot of things set to zero explicitly by call to
1682 * sk_alloc() so need not be done here.
1684 static int tcp_v6_init_sock(struct sock
*sk
)
1686 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1690 icsk
->icsk_af_ops
= &ipv6_specific
;
1692 #ifdef CONFIG_TCP_MD5SIG
1693 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1699 static void tcp_v6_destroy_sock(struct sock
*sk
)
1701 tcp_v4_destroy_sock(sk
);
1702 inet6_destroy_sock(sk
);
1705 #ifdef CONFIG_PROC_FS
1706 /* Proc filesystem TCPv6 sock list dumping. */
1707 static void get_openreq6(struct seq_file
*seq
,
1708 const struct request_sock
*req
, int i
)
1710 long ttd
= req
->rsk_timer
.expires
- jiffies
;
1711 const struct in6_addr
*src
= &inet_rsk(req
)->ir_v6_loc_addr
;
1712 const struct in6_addr
*dest
= &inet_rsk(req
)->ir_v6_rmt_addr
;
1718 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1719 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1721 src
->s6_addr32
[0], src
->s6_addr32
[1],
1722 src
->s6_addr32
[2], src
->s6_addr32
[3],
1723 inet_rsk(req
)->ir_num
,
1724 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1725 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1726 ntohs(inet_rsk(req
)->ir_rmt_port
),
1728 0, 0, /* could print option size, but that is af dependent. */
1729 1, /* timers active (only the expire timer) */
1730 jiffies_to_clock_t(ttd
),
1732 from_kuid_munged(seq_user_ns(seq
),
1733 sock_i_uid(req
->rsk_listener
)),
1734 0, /* non standard timer */
1735 0, /* open_requests have no inode */
1739 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1741 const struct in6_addr
*dest
, *src
;
1744 unsigned long timer_expires
;
1745 const struct inet_sock
*inet
= inet_sk(sp
);
1746 const struct tcp_sock
*tp
= tcp_sk(sp
);
1747 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1748 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
1752 dest
= &sp
->sk_v6_daddr
;
1753 src
= &sp
->sk_v6_rcv_saddr
;
1754 destp
= ntohs(inet
->inet_dport
);
1755 srcp
= ntohs(inet
->inet_sport
);
1757 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
1758 icsk
->icsk_pending
== ICSK_TIME_REO_TIMEOUT
||
1759 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
1761 timer_expires
= icsk
->icsk_timeout
;
1762 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1764 timer_expires
= icsk
->icsk_timeout
;
1765 } else if (timer_pending(&sp
->sk_timer
)) {
1767 timer_expires
= sp
->sk_timer
.expires
;
1770 timer_expires
= jiffies
;
1773 state
= sk_state_load(sp
);
1774 if (state
== TCP_LISTEN
)
1775 rx_queue
= sp
->sk_ack_backlog
;
1777 /* Because we don't lock the socket,
1778 * we might find a transient negative value.
1780 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
1783 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1784 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1786 src
->s6_addr32
[0], src
->s6_addr32
[1],
1787 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1788 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1789 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1791 tp
->write_seq
- tp
->snd_una
,
1794 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
1795 icsk
->icsk_retransmits
,
1796 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sp
)),
1797 icsk
->icsk_probes_out
,
1799 atomic_read(&sp
->sk_refcnt
), sp
,
1800 jiffies_to_clock_t(icsk
->icsk_rto
),
1801 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1802 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
1804 state
== TCP_LISTEN
?
1805 fastopenq
->max_qlen
:
1806 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
)
1810 static void get_timewait6_sock(struct seq_file
*seq
,
1811 struct inet_timewait_sock
*tw
, int i
)
1813 long delta
= tw
->tw_timer
.expires
- jiffies
;
1814 const struct in6_addr
*dest
, *src
;
1817 dest
= &tw
->tw_v6_daddr
;
1818 src
= &tw
->tw_v6_rcv_saddr
;
1819 destp
= ntohs(tw
->tw_dport
);
1820 srcp
= ntohs(tw
->tw_sport
);
1823 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1824 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1826 src
->s6_addr32
[0], src
->s6_addr32
[1],
1827 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1828 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1829 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1830 tw
->tw_substate
, 0, 0,
1831 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
1832 atomic_read(&tw
->tw_refcnt
), tw
);
1835 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1837 struct tcp_iter_state
*st
;
1838 struct sock
*sk
= v
;
1840 if (v
== SEQ_START_TOKEN
) {
1845 "st tx_queue rx_queue tr tm->when retrnsmt"
1846 " uid timeout inode\n");
1851 if (sk
->sk_state
== TCP_TIME_WAIT
)
1852 get_timewait6_sock(seq
, v
, st
->num
);
1853 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
1854 get_openreq6(seq
, v
, st
->num
);
1856 get_tcp6_sock(seq
, v
, st
->num
);
1861 static const struct file_operations tcp6_afinfo_seq_fops
= {
1862 .owner
= THIS_MODULE
,
1863 .open
= tcp_seq_open
,
1865 .llseek
= seq_lseek
,
1866 .release
= seq_release_net
1869 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1872 .seq_fops
= &tcp6_afinfo_seq_fops
,
1874 .show
= tcp6_seq_show
,
1878 int __net_init
tcp6_proc_init(struct net
*net
)
1880 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
1883 void tcp6_proc_exit(struct net
*net
)
1885 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
1889 struct proto tcpv6_prot
= {
1891 .owner
= THIS_MODULE
,
1893 .connect
= tcp_v6_connect
,
1894 .disconnect
= tcp_disconnect
,
1895 .accept
= inet_csk_accept
,
1897 .init
= tcp_v6_init_sock
,
1898 .destroy
= tcp_v6_destroy_sock
,
1899 .shutdown
= tcp_shutdown
,
1900 .setsockopt
= tcp_setsockopt
,
1901 .getsockopt
= tcp_getsockopt
,
1902 .keepalive
= tcp_set_keepalive
,
1903 .recvmsg
= tcp_recvmsg
,
1904 .sendmsg
= tcp_sendmsg
,
1905 .sendpage
= tcp_sendpage
,
1906 .backlog_rcv
= tcp_v6_do_rcv
,
1907 .release_cb
= tcp_release_cb
,
1909 .unhash
= inet_unhash
,
1910 .get_port
= inet_csk_get_port
,
1911 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1912 .stream_memory_free
= tcp_stream_memory_free
,
1913 .sockets_allocated
= &tcp_sockets_allocated
,
1914 .memory_allocated
= &tcp_memory_allocated
,
1915 .memory_pressure
= &tcp_memory_pressure
,
1916 .orphan_count
= &tcp_orphan_count
,
1917 .sysctl_mem
= sysctl_tcp_mem
,
1918 .sysctl_wmem
= sysctl_tcp_wmem
,
1919 .sysctl_rmem
= sysctl_tcp_rmem
,
1920 .max_header
= MAX_TCP_HEADER
,
1921 .obj_size
= sizeof(struct tcp6_sock
),
1922 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
1923 .twsk_prot
= &tcp6_timewait_sock_ops
,
1924 .rsk_prot
= &tcp6_request_sock_ops
,
1925 .h
.hashinfo
= &tcp_hashinfo
,
1926 .no_autobind
= true,
1927 #ifdef CONFIG_COMPAT
1928 .compat_setsockopt
= compat_tcp_setsockopt
,
1929 .compat_getsockopt
= compat_tcp_getsockopt
,
1931 .diag_destroy
= tcp_abort
,
1934 static struct inet6_protocol tcpv6_protocol
= {
1935 .early_demux
= tcp_v6_early_demux
,
1936 .early_demux_handler
= tcp_v6_early_demux
,
1937 .handler
= tcp_v6_rcv
,
1938 .err_handler
= tcp_v6_err
,
1939 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1942 static struct inet_protosw tcpv6_protosw
= {
1943 .type
= SOCK_STREAM
,
1944 .protocol
= IPPROTO_TCP
,
1945 .prot
= &tcpv6_prot
,
1946 .ops
= &inet6_stream_ops
,
1947 .flags
= INET_PROTOSW_PERMANENT
|
1951 static int __net_init
tcpv6_net_init(struct net
*net
)
1953 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
1954 SOCK_RAW
, IPPROTO_TCP
, net
);
1957 static void __net_exit
tcpv6_net_exit(struct net
*net
)
1959 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
1962 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
1964 inet_twsk_purge(&tcp_hashinfo
, AF_INET6
);
1967 static struct pernet_operations tcpv6_net_ops
= {
1968 .init
= tcpv6_net_init
,
1969 .exit
= tcpv6_net_exit
,
1970 .exit_batch
= tcpv6_net_exit_batch
,
1973 int __init
tcpv6_init(void)
1977 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1981 /* register inet6 protocol */
1982 ret
= inet6_register_protosw(&tcpv6_protosw
);
1984 goto out_tcpv6_protocol
;
1986 ret
= register_pernet_subsys(&tcpv6_net_ops
);
1988 goto out_tcpv6_protosw
;
1993 inet6_unregister_protosw(&tcpv6_protosw
);
1995 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1999 void tcpv6_exit(void)
2001 unregister_pernet_subsys(&tcpv6_net_ops
);
2002 inet6_unregister_protosw(&tcpv6_protosw
);
2003 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);