3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
72 #include <trace/events/tcp.h>
74 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
);
75 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
76 struct request_sock
*req
);
78 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
80 static const struct inet_connection_sock_af_ops ipv6_mapped
;
81 static const struct inet_connection_sock_af_ops ipv6_specific
;
82 #ifdef CONFIG_TCP_MD5SIG
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
86 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
87 const struct in6_addr
*addr
)
93 static void inet6_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
95 struct dst_entry
*dst
= skb_dst(skb
);
97 if (dst
&& dst_hold_safe(dst
)) {
98 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
101 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
102 inet6_sk(sk
)->rx_dst_cookie
= rt6_get_cookie(rt
);
106 static u32
tcp_v6_init_seq(const struct sk_buff
*skb
)
108 return secure_tcpv6_seq(ipv6_hdr(skb
)->daddr
.s6_addr32
,
109 ipv6_hdr(skb
)->saddr
.s6_addr32
,
111 tcp_hdr(skb
)->source
);
114 static u32
tcp_v6_init_ts_off(const struct net
*net
, const struct sk_buff
*skb
)
116 return secure_tcpv6_ts_off(net
, ipv6_hdr(skb
)->daddr
.s6_addr32
,
117 ipv6_hdr(skb
)->saddr
.s6_addr32
);
120 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
123 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
124 struct inet_sock
*inet
= inet_sk(sk
);
125 struct inet_connection_sock
*icsk
= inet_csk(sk
);
126 struct ipv6_pinfo
*np
= inet6_sk(sk
);
127 struct tcp_sock
*tp
= tcp_sk(sk
);
128 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
129 struct ipv6_txoptions
*opt
;
131 struct dst_entry
*dst
;
134 struct inet_timewait_death_row
*tcp_death_row
= &sock_net(sk
)->ipv4
.tcp_death_row
;
136 if (addr_len
< SIN6_LEN_RFC2133
)
139 if (usin
->sin6_family
!= AF_INET6
)
140 return -EAFNOSUPPORT
;
142 memset(&fl6
, 0, sizeof(fl6
));
145 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
146 IP6_ECN_flow_init(fl6
.flowlabel
);
147 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
148 struct ip6_flowlabel
*flowlabel
;
149 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
152 fl6_sock_release(flowlabel
);
157 * connect() to INADDR_ANY means loopback (BSD'ism).
160 if (ipv6_addr_any(&usin
->sin6_addr
)) {
161 if (ipv6_addr_v4mapped(&sk
->sk_v6_rcv_saddr
))
162 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK
),
165 usin
->sin6_addr
= in6addr_loopback
;
168 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
170 if (addr_type
& IPV6_ADDR_MULTICAST
)
173 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
174 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
175 usin
->sin6_scope_id
) {
176 /* If interface is set while binding, indices
179 if (!sk_dev_equal_l3scope(sk
, usin
->sin6_scope_id
))
182 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
185 /* Connect to link-local address requires an interface */
186 if (!sk
->sk_bound_dev_if
)
190 if (tp
->rx_opt
.ts_recent_stamp
&&
191 !ipv6_addr_equal(&sk
->sk_v6_daddr
, &usin
->sin6_addr
)) {
192 tp
->rx_opt
.ts_recent
= 0;
193 tp
->rx_opt
.ts_recent_stamp
= 0;
197 sk
->sk_v6_daddr
= usin
->sin6_addr
;
198 np
->flow_label
= fl6
.flowlabel
;
204 if (addr_type
& IPV6_ADDR_MAPPED
) {
205 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
206 struct sockaddr_in sin
;
208 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
210 if (__ipv6_only_sock(sk
))
213 sin
.sin_family
= AF_INET
;
214 sin
.sin_port
= usin
->sin6_port
;
215 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
217 icsk
->icsk_af_ops
= &ipv6_mapped
;
218 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
219 #ifdef CONFIG_TCP_MD5SIG
220 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
223 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
226 icsk
->icsk_ext_hdr_len
= exthdrlen
;
227 icsk
->icsk_af_ops
= &ipv6_specific
;
228 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
229 #ifdef CONFIG_TCP_MD5SIG
230 tp
->af_specific
= &tcp_sock_ipv6_specific
;
234 np
->saddr
= sk
->sk_v6_rcv_saddr
;
239 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
))
240 saddr
= &sk
->sk_v6_rcv_saddr
;
242 fl6
.flowi6_proto
= IPPROTO_TCP
;
243 fl6
.daddr
= sk
->sk_v6_daddr
;
244 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
245 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
246 fl6
.flowi6_mark
= sk
->sk_mark
;
247 fl6
.fl6_dport
= usin
->sin6_port
;
248 fl6
.fl6_sport
= inet
->inet_sport
;
249 fl6
.flowi6_uid
= sk
->sk_uid
;
251 opt
= rcu_dereference_protected(np
->opt
, lockdep_sock_is_held(sk
));
252 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
254 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
256 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
);
264 sk
->sk_v6_rcv_saddr
= *saddr
;
267 /* set the source address */
269 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
271 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
272 ip6_dst_store(sk
, dst
, NULL
, NULL
);
274 icsk
->icsk_ext_hdr_len
= 0;
276 icsk
->icsk_ext_hdr_len
= opt
->opt_flen
+
279 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
281 inet
->inet_dport
= usin
->sin6_port
;
283 tcp_set_state(sk
, TCP_SYN_SENT
);
284 err
= inet6_hash_connect(tcp_death_row
, sk
);
290 if (likely(!tp
->repair
)) {
292 tp
->write_seq
= secure_tcpv6_seq(np
->saddr
.s6_addr32
,
293 sk
->sk_v6_daddr
.s6_addr32
,
296 tp
->tsoffset
= secure_tcpv6_ts_off(sock_net(sk
),
298 sk
->sk_v6_daddr
.s6_addr32
);
301 if (tcp_fastopen_defer_connect(sk
, &err
))
306 err
= tcp_connect(sk
);
313 tcp_set_state(sk
, TCP_CLOSE
);
315 inet
->inet_dport
= 0;
316 sk
->sk_route_caps
= 0;
320 static void tcp_v6_mtu_reduced(struct sock
*sk
)
322 struct dst_entry
*dst
;
324 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
327 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
331 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
332 tcp_sync_mss(sk
, dst_mtu(dst
));
333 tcp_simple_retransmit(sk
);
337 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
338 u8 type
, u8 code
, int offset
, __be32 info
)
340 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
341 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
342 struct net
*net
= dev_net(skb
->dev
);
343 struct request_sock
*fastopen
;
344 struct ipv6_pinfo
*np
;
351 sk
= __inet6_lookup_established(net
, &tcp_hashinfo
,
352 &hdr
->daddr
, th
->dest
,
353 &hdr
->saddr
, ntohs(th
->source
),
354 skb
->dev
->ifindex
, inet6_sdif(skb
));
357 __ICMP6_INC_STATS(net
, __in6_dev_get(skb
->dev
),
362 if (sk
->sk_state
== TCP_TIME_WAIT
) {
363 inet_twsk_put(inet_twsk(sk
));
366 seq
= ntohl(th
->seq
);
367 fatal
= icmpv6_err_convert(type
, code
, &err
);
368 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
369 return tcp_req_err(sk
, seq
, fatal
);
372 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
373 __NET_INC_STATS(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
375 if (sk
->sk_state
== TCP_CLOSE
)
378 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
379 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
384 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
385 fastopen
= tp
->fastopen_rsk
;
386 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
387 if (sk
->sk_state
!= TCP_LISTEN
&&
388 !between(seq
, snd_una
, tp
->snd_nxt
)) {
389 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
395 if (type
== NDISC_REDIRECT
) {
396 if (!sock_owned_by_user(sk
)) {
397 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
400 dst
->ops
->redirect(dst
, sk
, skb
);
405 if (type
== ICMPV6_PKT_TOOBIG
) {
406 /* We are not interested in TCP_LISTEN and open_requests
407 * (SYN-ACKs send out by Linux are always <576bytes so
408 * they should go through unfragmented).
410 if (sk
->sk_state
== TCP_LISTEN
)
413 if (!ip6_sk_accept_pmtu(sk
))
416 tp
->mtu_info
= ntohl(info
);
417 if (!sock_owned_by_user(sk
))
418 tcp_v6_mtu_reduced(sk
);
419 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
,
426 /* Might be for an request_sock */
427 switch (sk
->sk_state
) {
430 /* Only in fast or simultaneous open. If a fast open socket is
431 * is already accepted it is treated as a connected one below.
433 if (fastopen
&& !fastopen
->sk
)
436 if (!sock_owned_by_user(sk
)) {
438 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
442 sk
->sk_err_soft
= err
;
446 if (!sock_owned_by_user(sk
) && np
->recverr
) {
448 sk
->sk_error_report(sk
);
450 sk
->sk_err_soft
= err
;
458 static int tcp_v6_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
460 struct request_sock
*req
,
461 struct tcp_fastopen_cookie
*foc
,
462 enum tcp_synack_type synack_type
)
464 struct inet_request_sock
*ireq
= inet_rsk(req
);
465 struct ipv6_pinfo
*np
= inet6_sk(sk
);
466 struct ipv6_txoptions
*opt
;
467 struct flowi6
*fl6
= &fl
->u
.ip6
;
471 /* First, grab a route. */
472 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
,
473 IPPROTO_TCP
)) == NULL
)
476 skb
= tcp_make_synack(sk
, dst
, req
, foc
, synack_type
);
479 __tcp_v6_send_check(skb
, &ireq
->ir_v6_loc_addr
,
480 &ireq
->ir_v6_rmt_addr
);
482 fl6
->daddr
= ireq
->ir_v6_rmt_addr
;
483 if (np
->repflow
&& ireq
->pktopts
)
484 fl6
->flowlabel
= ip6_flowlabel(ipv6_hdr(ireq
->pktopts
));
487 opt
= ireq
->ipv6_opt
;
489 opt
= rcu_dereference(np
->opt
);
490 err
= ip6_xmit(sk
, skb
, fl6
, sk
->sk_mark
, opt
, np
->tclass
);
492 err
= net_xmit_eval(err
);
500 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
502 kfree(inet_rsk(req
)->ipv6_opt
);
503 kfree_skb(inet_rsk(req
)->pktopts
);
506 #ifdef CONFIG_TCP_MD5SIG
507 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
508 const struct in6_addr
*addr
)
510 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
513 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(const struct sock
*sk
,
514 const struct sock
*addr_sk
)
516 return tcp_v6_md5_do_lookup(sk
, &addr_sk
->sk_v6_daddr
);
519 static int tcp_v6_parse_md5_keys(struct sock
*sk
, int optname
,
520 char __user
*optval
, int optlen
)
522 struct tcp_md5sig cmd
;
523 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
526 if (optlen
< sizeof(cmd
))
529 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
532 if (sin6
->sin6_family
!= AF_INET6
)
535 if (optname
== TCP_MD5SIG_EXT
&&
536 cmd
.tcpm_flags
& TCP_MD5SIG_FLAG_PREFIX
) {
537 prefixlen
= cmd
.tcpm_prefixlen
;
538 if (prefixlen
> 128 || (ipv6_addr_v4mapped(&sin6
->sin6_addr
) &&
542 prefixlen
= ipv6_addr_v4mapped(&sin6
->sin6_addr
) ? 32 : 128;
545 if (!cmd
.tcpm_keylen
) {
546 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
547 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
549 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
550 AF_INET6
, prefixlen
);
553 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
556 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
557 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
558 AF_INET
, prefixlen
, cmd
.tcpm_key
,
559 cmd
.tcpm_keylen
, GFP_KERNEL
);
561 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
562 AF_INET6
, prefixlen
, cmd
.tcpm_key
,
563 cmd
.tcpm_keylen
, GFP_KERNEL
);
566 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool
*hp
,
567 const struct in6_addr
*daddr
,
568 const struct in6_addr
*saddr
,
569 const struct tcphdr
*th
, int nbytes
)
571 struct tcp6_pseudohdr
*bp
;
572 struct scatterlist sg
;
576 /* 1. TCP pseudo-header (RFC2460) */
579 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
580 bp
->len
= cpu_to_be32(nbytes
);
582 _th
= (struct tcphdr
*)(bp
+ 1);
583 memcpy(_th
, th
, sizeof(*th
));
586 sg_init_one(&sg
, bp
, sizeof(*bp
) + sizeof(*th
));
587 ahash_request_set_crypt(hp
->md5_req
, &sg
, NULL
,
588 sizeof(*bp
) + sizeof(*th
));
589 return crypto_ahash_update(hp
->md5_req
);
592 static int tcp_v6_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
593 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
594 const struct tcphdr
*th
)
596 struct tcp_md5sig_pool
*hp
;
597 struct ahash_request
*req
;
599 hp
= tcp_get_md5sig_pool();
601 goto clear_hash_noput
;
604 if (crypto_ahash_init(req
))
606 if (tcp_v6_md5_hash_headers(hp
, daddr
, saddr
, th
, th
->doff
<< 2))
608 if (tcp_md5_hash_key(hp
, key
))
610 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
611 if (crypto_ahash_final(req
))
614 tcp_put_md5sig_pool();
618 tcp_put_md5sig_pool();
620 memset(md5_hash
, 0, 16);
624 static int tcp_v6_md5_hash_skb(char *md5_hash
,
625 const struct tcp_md5sig_key
*key
,
626 const struct sock
*sk
,
627 const struct sk_buff
*skb
)
629 const struct in6_addr
*saddr
, *daddr
;
630 struct tcp_md5sig_pool
*hp
;
631 struct ahash_request
*req
;
632 const struct tcphdr
*th
= tcp_hdr(skb
);
634 if (sk
) { /* valid for establish/request sockets */
635 saddr
= &sk
->sk_v6_rcv_saddr
;
636 daddr
= &sk
->sk_v6_daddr
;
638 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
639 saddr
= &ip6h
->saddr
;
640 daddr
= &ip6h
->daddr
;
643 hp
= tcp_get_md5sig_pool();
645 goto clear_hash_noput
;
648 if (crypto_ahash_init(req
))
651 if (tcp_v6_md5_hash_headers(hp
, daddr
, saddr
, th
, skb
->len
))
653 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
655 if (tcp_md5_hash_key(hp
, key
))
657 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
658 if (crypto_ahash_final(req
))
661 tcp_put_md5sig_pool();
665 tcp_put_md5sig_pool();
667 memset(md5_hash
, 0, 16);
673 static bool tcp_v6_inbound_md5_hash(const struct sock
*sk
,
674 const struct sk_buff
*skb
)
676 #ifdef CONFIG_TCP_MD5SIG
677 const __u8
*hash_location
= NULL
;
678 struct tcp_md5sig_key
*hash_expected
;
679 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
680 const struct tcphdr
*th
= tcp_hdr(skb
);
684 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
685 hash_location
= tcp_parse_md5sig_option(th
);
687 /* We've parsed the options - do we have a hash? */
688 if (!hash_expected
&& !hash_location
)
691 if (hash_expected
&& !hash_location
) {
692 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
696 if (!hash_expected
&& hash_location
) {
697 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
701 /* check the signature */
702 genhash
= tcp_v6_md5_hash_skb(newhash
,
706 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
707 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5FAILURE
);
708 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
709 genhash
? "failed" : "mismatch",
710 &ip6h
->saddr
, ntohs(th
->source
),
711 &ip6h
->daddr
, ntohs(th
->dest
));
718 static void tcp_v6_init_req(struct request_sock
*req
,
719 const struct sock
*sk_listener
,
722 struct inet_request_sock
*ireq
= inet_rsk(req
);
723 const struct ipv6_pinfo
*np
= inet6_sk(sk_listener
);
725 ireq
->ir_v6_rmt_addr
= ipv6_hdr(skb
)->saddr
;
726 ireq
->ir_v6_loc_addr
= ipv6_hdr(skb
)->daddr
;
728 /* So that link locals have meaning */
729 if (!sk_listener
->sk_bound_dev_if
&&
730 ipv6_addr_type(&ireq
->ir_v6_rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
731 ireq
->ir_iif
= tcp_v6_iif(skb
);
733 if (!TCP_SKB_CB(skb
)->tcp_tw_isn
&&
734 (ipv6_opt_accepted(sk_listener
, skb
, &TCP_SKB_CB(skb
)->header
.h6
) ||
735 np
->rxopt
.bits
.rxinfo
||
736 np
->rxopt
.bits
.rxoinfo
|| np
->rxopt
.bits
.rxhlim
||
737 np
->rxopt
.bits
.rxohlim
|| np
->repflow
)) {
738 refcount_inc(&skb
->users
);
743 static struct dst_entry
*tcp_v6_route_req(const struct sock
*sk
,
745 const struct request_sock
*req
)
747 return inet6_csk_route_req(sk
, &fl
->u
.ip6
, req
, IPPROTO_TCP
);
750 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
752 .obj_size
= sizeof(struct tcp6_request_sock
),
753 .rtx_syn_ack
= tcp_rtx_synack
,
754 .send_ack
= tcp_v6_reqsk_send_ack
,
755 .destructor
= tcp_v6_reqsk_destructor
,
756 .send_reset
= tcp_v6_send_reset
,
757 .syn_ack_timeout
= tcp_syn_ack_timeout
,
760 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
761 .mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) -
762 sizeof(struct ipv6hdr
),
763 #ifdef CONFIG_TCP_MD5SIG
764 .req_md5_lookup
= tcp_v6_md5_lookup
,
765 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
767 .init_req
= tcp_v6_init_req
,
768 #ifdef CONFIG_SYN_COOKIES
769 .cookie_init_seq
= cookie_v6_init_sequence
,
771 .route_req
= tcp_v6_route_req
,
772 .init_seq
= tcp_v6_init_seq
,
773 .init_ts_off
= tcp_v6_init_ts_off
,
774 .send_synack
= tcp_v6_send_synack
,
777 static void tcp_v6_send_response(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
778 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
,
779 int oif
, struct tcp_md5sig_key
*key
, int rst
,
780 u8 tclass
, __be32 label
)
782 const struct tcphdr
*th
= tcp_hdr(skb
);
784 struct sk_buff
*buff
;
786 struct net
*net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
787 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
788 unsigned int tot_len
= sizeof(struct tcphdr
);
789 struct dst_entry
*dst
;
793 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
794 #ifdef CONFIG_TCP_MD5SIG
796 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
799 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
804 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
806 t1
= skb_push(buff
, tot_len
);
807 skb_reset_transport_header(buff
);
809 /* Swap the send and the receive. */
810 memset(t1
, 0, sizeof(*t1
));
811 t1
->dest
= th
->source
;
812 t1
->source
= th
->dest
;
813 t1
->doff
= tot_len
/ 4;
814 t1
->seq
= htonl(seq
);
815 t1
->ack_seq
= htonl(ack
);
816 t1
->ack
= !rst
|| !th
->ack
;
818 t1
->window
= htons(win
);
820 topt
= (__be32
*)(t1
+ 1);
823 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
824 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
825 *topt
++ = htonl(tsval
);
826 *topt
++ = htonl(tsecr
);
829 #ifdef CONFIG_TCP_MD5SIG
831 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
832 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
833 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
834 &ipv6_hdr(skb
)->saddr
,
835 &ipv6_hdr(skb
)->daddr
, t1
);
839 memset(&fl6
, 0, sizeof(fl6
));
840 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
841 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
842 fl6
.flowlabel
= label
;
844 buff
->ip_summed
= CHECKSUM_PARTIAL
;
847 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
849 fl6
.flowi6_proto
= IPPROTO_TCP
;
850 if (rt6_need_strict(&fl6
.daddr
) && !oif
)
851 fl6
.flowi6_oif
= tcp_v6_iif(skb
);
853 if (!oif
&& netif_index_is_l3_master(net
, skb
->skb_iif
))
856 fl6
.flowi6_oif
= oif
;
859 fl6
.flowi6_mark
= IP6_REPLY_MARK(net
, skb
->mark
);
860 fl6
.fl6_dport
= t1
->dest
;
861 fl6
.fl6_sport
= t1
->source
;
862 fl6
.flowi6_uid
= sock_net_uid(net
, sk
&& sk_fullsock(sk
) ? sk
: NULL
);
863 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
865 /* Pass a socket to ip6_dst_lookup either it is for RST
866 * Underlying function will use this to retrieve the network
869 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
);
871 skb_dst_set(buff
, dst
);
872 ip6_xmit(ctl_sk
, buff
, &fl6
, fl6
.flowi6_mark
, NULL
, tclass
);
873 TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
875 TCP_INC_STATS(net
, TCP_MIB_OUTRSTS
);
882 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
884 const struct tcphdr
*th
= tcp_hdr(skb
);
885 u32 seq
= 0, ack_seq
= 0;
886 struct tcp_md5sig_key
*key
= NULL
;
887 #ifdef CONFIG_TCP_MD5SIG
888 const __u8
*hash_location
= NULL
;
889 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
890 unsigned char newhash
[16];
892 struct sock
*sk1
= NULL
;
899 /* If sk not NULL, it means we did a successful lookup and incoming
900 * route had to be correct. prequeue might have dropped our dst.
902 if (!sk
&& !ipv6_unicast_destination(skb
))
905 #ifdef CONFIG_TCP_MD5SIG
907 hash_location
= tcp_parse_md5sig_option(th
);
908 if (sk
&& sk_fullsock(sk
)) {
909 key
= tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
);
910 } else if (hash_location
) {
912 * active side is lost. Try to find listening socket through
913 * source port, and then find md5 key through listening socket.
914 * we are not loose security here:
915 * Incoming packet is checked with md5 hash with finding key,
916 * no RST generated if md5 hash doesn't match.
918 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
919 &tcp_hashinfo
, NULL
, 0,
921 th
->source
, &ipv6h
->daddr
,
922 ntohs(th
->source
), tcp_v6_iif(skb
),
927 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
931 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, skb
);
932 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
938 seq
= ntohl(th
->ack_seq
);
940 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
944 oif
= sk
->sk_bound_dev_if
;
946 trace_tcp_send_reset(sk
, skb
);
949 tcp_v6_send_response(sk
, skb
, seq
, ack_seq
, 0, 0, 0, oif
, key
, 1, 0, 0);
951 #ifdef CONFIG_TCP_MD5SIG
957 static void tcp_v6_send_ack(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
958 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
, int oif
,
959 struct tcp_md5sig_key
*key
, u8 tclass
,
962 tcp_v6_send_response(sk
, skb
, seq
, ack
, win
, tsval
, tsecr
, oif
, key
, 0,
966 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
968 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
969 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
971 tcp_v6_send_ack(sk
, skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
972 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
973 tcp_time_stamp_raw() + tcptw
->tw_ts_offset
,
974 tcptw
->tw_ts_recent
, tw
->tw_bound_dev_if
, tcp_twsk_md5_key(tcptw
),
975 tw
->tw_tclass
, cpu_to_be32(tw
->tw_flowlabel
));
980 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
981 struct request_sock
*req
)
983 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
984 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
987 * The window field (SEG.WND) of every outgoing segment, with the
988 * exception of <SYN> segments, MUST be right-shifted by
989 * Rcv.Wind.Shift bits:
991 tcp_v6_send_ack(sk
, skb
, (sk
->sk_state
== TCP_LISTEN
) ?
992 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
993 tcp_rsk(req
)->rcv_nxt
,
994 req
->rsk_rcv_wnd
>> inet_rsk(req
)->rcv_wscale
,
995 tcp_time_stamp_raw() + tcp_rsk(req
)->ts_off
,
996 req
->ts_recent
, sk
->sk_bound_dev_if
,
997 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->saddr
),
1002 static struct sock
*tcp_v6_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
1004 #ifdef CONFIG_SYN_COOKIES
1005 const struct tcphdr
*th
= tcp_hdr(skb
);
1008 sk
= cookie_v6_check(sk
, skb
);
1013 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1015 if (skb
->protocol
== htons(ETH_P_IP
))
1016 return tcp_v4_conn_request(sk
, skb
);
1018 if (!ipv6_unicast_destination(skb
))
1021 return tcp_conn_request(&tcp6_request_sock_ops
,
1022 &tcp_request_sock_ipv6_ops
, sk
, skb
);
1026 return 0; /* don't send reset */
1029 static void tcp_v6_restore_cb(struct sk_buff
*skb
)
1031 /* We need to move header back to the beginning if xfrm6_policy_check()
1032 * and tcp_v6_fill_cb() are going to be called again.
1033 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1035 memmove(IP6CB(skb
), &TCP_SKB_CB(skb
)->header
.h6
,
1036 sizeof(struct inet6_skb_parm
));
1039 static struct sock
*tcp_v6_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
1040 struct request_sock
*req
,
1041 struct dst_entry
*dst
,
1042 struct request_sock
*req_unhash
,
1045 struct inet_request_sock
*ireq
;
1046 struct ipv6_pinfo
*newnp
;
1047 const struct ipv6_pinfo
*np
= inet6_sk(sk
);
1048 struct ipv6_txoptions
*opt
;
1049 struct tcp6_sock
*newtcp6sk
;
1050 struct inet_sock
*newinet
;
1051 struct tcp_sock
*newtp
;
1053 #ifdef CONFIG_TCP_MD5SIG
1054 struct tcp_md5sig_key
*key
;
1058 if (skb
->protocol
== htons(ETH_P_IP
)) {
1063 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
,
1064 req_unhash
, own_req
);
1069 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1070 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1072 newinet
= inet_sk(newsk
);
1073 newnp
= inet6_sk(newsk
);
1074 newtp
= tcp_sk(newsk
);
1076 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1078 newnp
->saddr
= newsk
->sk_v6_rcv_saddr
;
1080 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1081 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1082 #ifdef CONFIG_TCP_MD5SIG
1083 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1086 newnp
->ipv6_mc_list
= NULL
;
1087 newnp
->ipv6_ac_list
= NULL
;
1088 newnp
->ipv6_fl_list
= NULL
;
1089 newnp
->pktoptions
= NULL
;
1091 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1092 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1093 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1095 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1098 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1099 * here, tcp_create_openreq_child now does this for us, see the comment in
1100 * that function for the gory details. -acme
1103 /* It is tricky place. Until this moment IPv4 tcp
1104 worked with IPv6 icsk.icsk_af_ops.
1107 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1112 ireq
= inet_rsk(req
);
1114 if (sk_acceptq_is_full(sk
))
1118 dst
= inet6_csk_route_req(sk
, &fl6
, req
, IPPROTO_TCP
);
1123 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1128 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1129 * count here, tcp_create_openreq_child now does this for us, see the
1130 * comment in that function for the gory details. -acme
1133 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1134 ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1135 inet6_sk_rx_dst_set(newsk
, skb
);
1137 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1138 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1140 newtp
= tcp_sk(newsk
);
1141 newinet
= inet_sk(newsk
);
1142 newnp
= inet6_sk(newsk
);
1144 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1146 newsk
->sk_v6_daddr
= ireq
->ir_v6_rmt_addr
;
1147 newnp
->saddr
= ireq
->ir_v6_loc_addr
;
1148 newsk
->sk_v6_rcv_saddr
= ireq
->ir_v6_loc_addr
;
1149 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1151 /* Now IPv6 options...
1153 First: no IPv4 options.
1155 newinet
->inet_opt
= NULL
;
1156 newnp
->ipv6_mc_list
= NULL
;
1157 newnp
->ipv6_ac_list
= NULL
;
1158 newnp
->ipv6_fl_list
= NULL
;
1161 newnp
->rxopt
.all
= np
->rxopt
.all
;
1163 newnp
->pktoptions
= NULL
;
1165 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1166 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1167 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1169 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1171 /* Clone native IPv6 options from listening socket (if any)
1173 Yes, keeping reference count would be much more clever,
1174 but we make one more one thing there: reattach optmem
1177 opt
= ireq
->ipv6_opt
;
1179 opt
= rcu_dereference(np
->opt
);
1181 opt
= ipv6_dup_options(newsk
, opt
);
1182 RCU_INIT_POINTER(newnp
->opt
, opt
);
1184 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1186 inet_csk(newsk
)->icsk_ext_hdr_len
= opt
->opt_nflen
+
1189 tcp_ca_openreq_child(newsk
, dst
);
1191 tcp_sync_mss(newsk
, dst_mtu(dst
));
1192 newtp
->advmss
= tcp_mss_clamp(tcp_sk(sk
), dst_metric_advmss(dst
));
1194 tcp_initialize_rcv_mss(newsk
);
1196 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1197 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1199 #ifdef CONFIG_TCP_MD5SIG
1200 /* Copy over the MD5 key from the original socket */
1201 key
= tcp_v6_md5_do_lookup(sk
, &newsk
->sk_v6_daddr
);
1203 /* We're using one, so create a matching key
1204 * on the newsk structure. If we fail to get
1205 * memory, then we end up not copying the key
1208 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newsk
->sk_v6_daddr
,
1209 AF_INET6
, 128, key
->key
, key
->keylen
,
1210 sk_gfp_mask(sk
, GFP_ATOMIC
));
1214 if (__inet_inherit_port(sk
, newsk
) < 0) {
1215 inet_csk_prepare_forced_close(newsk
);
1219 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1221 tcp_move_syn(newtp
, req
);
1223 /* Clone pktoptions received with SYN, if we own the req */
1224 if (ireq
->pktopts
) {
1225 newnp
->pktoptions
= skb_clone(ireq
->pktopts
,
1226 sk_gfp_mask(sk
, GFP_ATOMIC
));
1227 consume_skb(ireq
->pktopts
);
1228 ireq
->pktopts
= NULL
;
1229 if (newnp
->pktoptions
) {
1230 tcp_v6_restore_cb(newnp
->pktoptions
);
1231 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1239 __NET_INC_STATS(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1247 /* The socket must have it's spinlock held when we get
1248 * here, unless it is a TCP_LISTEN socket.
1250 * We have a potential double-lock case here, so even when
1251 * doing backlog processing we use the BH locking scheme.
1252 * This is because we cannot sleep with the original spinlock
1255 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1257 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1258 struct tcp_sock
*tp
;
1259 struct sk_buff
*opt_skb
= NULL
;
1261 /* Imagine: socket is IPv6. IPv4 packet arrives,
1262 goes to IPv4 receive handler and backlogged.
1263 From backlog it always goes here. Kerboom...
1264 Fortunately, tcp_rcv_established and rcv_established
1265 handle them correctly, but it is not case with
1266 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1269 if (skb
->protocol
== htons(ETH_P_IP
))
1270 return tcp_v4_do_rcv(sk
, skb
);
1273 * socket locking is here for SMP purposes as backlog rcv
1274 * is currently called with bh processing disabled.
1277 /* Do Stevens' IPV6_PKTOPTIONS.
1279 Yes, guys, it is the only place in our code, where we
1280 may make it not affecting IPv4.
1281 The rest of code is protocol independent,
1282 and I do not like idea to uglify IPv4.
1284 Actually, all the idea behind IPV6_PKTOPTIONS
1285 looks not very well thought. For now we latch
1286 options, received in the last packet, enqueued
1287 by tcp. Feel free to propose better solution.
1291 opt_skb
= skb_clone(skb
, sk_gfp_mask(sk
, GFP_ATOMIC
));
1293 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1294 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1296 sock_rps_save_rxhash(sk
, skb
);
1297 sk_mark_napi_id(sk
, skb
);
1299 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1300 dst
->ops
->check(dst
, np
->rx_dst_cookie
) == NULL
) {
1302 sk
->sk_rx_dst
= NULL
;
1306 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
));
1308 goto ipv6_pktoptions
;
1312 if (tcp_checksum_complete(skb
))
1315 if (sk
->sk_state
== TCP_LISTEN
) {
1316 struct sock
*nsk
= tcp_v6_cookie_check(sk
, skb
);
1322 if (tcp_child_process(sk
, nsk
, skb
))
1325 __kfree_skb(opt_skb
);
1329 sock_rps_save_rxhash(sk
, skb
);
1331 if (tcp_rcv_state_process(sk
, skb
))
1334 goto ipv6_pktoptions
;
1338 tcp_v6_send_reset(sk
, skb
);
1341 __kfree_skb(opt_skb
);
1345 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1346 TCP_INC_STATS(sock_net(sk
), TCP_MIB_INERRS
);
1351 /* Do you ask, what is it?
1353 1. skb was enqueued by tcp.
1354 2. skb is added to tail of read queue, rather than out of order.
1355 3. socket is not in passive state.
1356 4. Finally, it really contains options, which user wants to receive.
1359 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1360 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1361 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1362 np
->mcast_oif
= tcp_v6_iif(opt_skb
);
1363 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1364 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1365 if (np
->rxopt
.bits
.rxflow
|| np
->rxopt
.bits
.rxtclass
)
1366 np
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(opt_skb
));
1368 np
->flow_label
= ip6_flowlabel(ipv6_hdr(opt_skb
));
1369 if (ipv6_opt_accepted(sk
, opt_skb
, &TCP_SKB_CB(opt_skb
)->header
.h6
)) {
1370 skb_set_owner_r(opt_skb
, sk
);
1371 tcp_v6_restore_cb(opt_skb
);
1372 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1374 __kfree_skb(opt_skb
);
1375 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1383 static void tcp_v6_fill_cb(struct sk_buff
*skb
, const struct ipv6hdr
*hdr
,
1384 const struct tcphdr
*th
)
1386 /* This is tricky: we move IP6CB at its correct location into
1387 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1388 * _decode_session6() uses IP6CB().
1389 * barrier() makes sure compiler won't play aliasing games.
1391 memmove(&TCP_SKB_CB(skb
)->header
.h6
, IP6CB(skb
),
1392 sizeof(struct inet6_skb_parm
));
1395 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1396 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1397 skb
->len
- th
->doff
*4);
1398 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1399 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1400 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1401 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1402 TCP_SKB_CB(skb
)->sacked
= 0;
1403 TCP_SKB_CB(skb
)->has_rxtstamp
=
1404 skb
->tstamp
|| skb_hwtstamps(skb
)->hwtstamp
;
1407 static int tcp_v6_rcv(struct sk_buff
*skb
)
1409 int sdif
= inet6_sdif(skb
);
1410 const struct tcphdr
*th
;
1411 const struct ipv6hdr
*hdr
;
1415 struct net
*net
= dev_net(skb
->dev
);
1417 if (skb
->pkt_type
!= PACKET_HOST
)
1421 * Count it even if it's bad.
1423 __TCP_INC_STATS(net
, TCP_MIB_INSEGS
);
1425 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1428 th
= (const struct tcphdr
*)skb
->data
;
1430 if (unlikely(th
->doff
< sizeof(struct tcphdr
)/4))
1432 if (!pskb_may_pull(skb
, th
->doff
*4))
1435 if (skb_checksum_init(skb
, IPPROTO_TCP
, ip6_compute_pseudo
))
1438 th
= (const struct tcphdr
*)skb
->data
;
1439 hdr
= ipv6_hdr(skb
);
1442 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, __tcp_hdrlen(th
),
1443 th
->source
, th
->dest
, inet6_iif(skb
), sdif
,
1449 if (sk
->sk_state
== TCP_TIME_WAIT
)
1452 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1453 struct request_sock
*req
= inet_reqsk(sk
);
1456 sk
= req
->rsk_listener
;
1457 if (tcp_v6_inbound_md5_hash(sk
, skb
)) {
1458 sk_drops_add(sk
, skb
);
1462 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
1463 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1469 if (!tcp_filter(sk
, skb
)) {
1470 th
= (const struct tcphdr
*)skb
->data
;
1471 hdr
= ipv6_hdr(skb
);
1472 tcp_v6_fill_cb(skb
, hdr
, th
);
1473 nsk
= tcp_check_req(sk
, skb
, req
, false);
1477 goto discard_and_relse
;
1481 tcp_v6_restore_cb(skb
);
1482 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1483 tcp_v6_send_reset(nsk
, skb
);
1484 goto discard_and_relse
;
1490 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1491 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
1492 goto discard_and_relse
;
1495 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1496 goto discard_and_relse
;
1498 if (tcp_v6_inbound_md5_hash(sk
, skb
))
1499 goto discard_and_relse
;
1501 if (tcp_filter(sk
, skb
))
1502 goto discard_and_relse
;
1503 th
= (const struct tcphdr
*)skb
->data
;
1504 hdr
= ipv6_hdr(skb
);
1505 tcp_v6_fill_cb(skb
, hdr
, th
);
1509 if (sk
->sk_state
== TCP_LISTEN
) {
1510 ret
= tcp_v6_do_rcv(sk
, skb
);
1511 goto put_and_return
;
1514 sk_incoming_cpu_update(sk
);
1516 bh_lock_sock_nested(sk
);
1517 tcp_segs_in(tcp_sk(sk
), skb
);
1519 if (!sock_owned_by_user(sk
)) {
1520 ret
= tcp_v6_do_rcv(sk
, skb
);
1521 } else if (tcp_add_backlog(sk
, skb
)) {
1522 goto discard_and_relse
;
1529 return ret
? -1 : 0;
1532 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1535 tcp_v6_fill_cb(skb
, hdr
, th
);
1537 if (tcp_checksum_complete(skb
)) {
1539 __TCP_INC_STATS(net
, TCP_MIB_CSUMERRORS
);
1541 __TCP_INC_STATS(net
, TCP_MIB_INERRS
);
1543 tcp_v6_send_reset(NULL
, skb
);
1551 sk_drops_add(sk
, skb
);
1557 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1558 inet_twsk_put(inet_twsk(sk
));
1562 tcp_v6_fill_cb(skb
, hdr
, th
);
1564 if (tcp_checksum_complete(skb
)) {
1565 inet_twsk_put(inet_twsk(sk
));
1569 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1574 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1575 skb
, __tcp_hdrlen(th
),
1576 &ipv6_hdr(skb
)->saddr
, th
->source
,
1577 &ipv6_hdr(skb
)->daddr
,
1578 ntohs(th
->dest
), tcp_v6_iif(skb
),
1581 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1582 inet_twsk_deschedule_put(tw
);
1584 tcp_v6_restore_cb(skb
);
1592 tcp_v6_timewait_ack(sk
, skb
);
1595 tcp_v6_send_reset(sk
, skb
);
1596 inet_twsk_deschedule_put(inet_twsk(sk
));
1598 case TCP_TW_SUCCESS
:
1604 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1606 const struct ipv6hdr
*hdr
;
1607 const struct tcphdr
*th
;
1610 if (skb
->pkt_type
!= PACKET_HOST
)
1613 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1616 hdr
= ipv6_hdr(skb
);
1619 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1622 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1623 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1624 &hdr
->saddr
, th
->source
,
1625 &hdr
->daddr
, ntohs(th
->dest
),
1626 inet6_iif(skb
), inet6_sdif(skb
));
1629 skb
->destructor
= sock_edemux
;
1630 if (sk_fullsock(sk
)) {
1631 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1634 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1636 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1637 skb_dst_set_noref(skb
, dst
);
1642 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1643 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1644 .twsk_unique
= tcp_twsk_unique
,
1645 .twsk_destructor
= tcp_twsk_destructor
,
1648 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1649 .queue_xmit
= inet6_csk_xmit
,
1650 .send_check
= tcp_v6_send_check
,
1651 .rebuild_header
= inet6_sk_rebuild_header
,
1652 .sk_rx_dst_set
= inet6_sk_rx_dst_set
,
1653 .conn_request
= tcp_v6_conn_request
,
1654 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1655 .net_header_len
= sizeof(struct ipv6hdr
),
1656 .net_frag_header_len
= sizeof(struct frag_hdr
),
1657 .setsockopt
= ipv6_setsockopt
,
1658 .getsockopt
= ipv6_getsockopt
,
1659 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1660 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1661 #ifdef CONFIG_COMPAT
1662 .compat_setsockopt
= compat_ipv6_setsockopt
,
1663 .compat_getsockopt
= compat_ipv6_getsockopt
,
1665 .mtu_reduced
= tcp_v6_mtu_reduced
,
1668 #ifdef CONFIG_TCP_MD5SIG
1669 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1670 .md5_lookup
= tcp_v6_md5_lookup
,
1671 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1672 .md5_parse
= tcp_v6_parse_md5_keys
,
1677 * TCP over IPv4 via INET6 API
1679 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1680 .queue_xmit
= ip_queue_xmit
,
1681 .send_check
= tcp_v4_send_check
,
1682 .rebuild_header
= inet_sk_rebuild_header
,
1683 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1684 .conn_request
= tcp_v6_conn_request
,
1685 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1686 .net_header_len
= sizeof(struct iphdr
),
1687 .setsockopt
= ipv6_setsockopt
,
1688 .getsockopt
= ipv6_getsockopt
,
1689 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1690 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1691 #ifdef CONFIG_COMPAT
1692 .compat_setsockopt
= compat_ipv6_setsockopt
,
1693 .compat_getsockopt
= compat_ipv6_getsockopt
,
1695 .mtu_reduced
= tcp_v4_mtu_reduced
,
1698 #ifdef CONFIG_TCP_MD5SIG
1699 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1700 .md5_lookup
= tcp_v4_md5_lookup
,
1701 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1702 .md5_parse
= tcp_v6_parse_md5_keys
,
1706 /* NOTE: A lot of things set to zero explicitly by call to
1707 * sk_alloc() so need not be done here.
1709 static int tcp_v6_init_sock(struct sock
*sk
)
1711 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1715 icsk
->icsk_af_ops
= &ipv6_specific
;
1717 #ifdef CONFIG_TCP_MD5SIG
1718 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1724 static void tcp_v6_destroy_sock(struct sock
*sk
)
1726 tcp_v4_destroy_sock(sk
);
1727 inet6_destroy_sock(sk
);
1730 #ifdef CONFIG_PROC_FS
1731 /* Proc filesystem TCPv6 sock list dumping. */
1732 static void get_openreq6(struct seq_file
*seq
,
1733 const struct request_sock
*req
, int i
)
1735 long ttd
= req
->rsk_timer
.expires
- jiffies
;
1736 const struct in6_addr
*src
= &inet_rsk(req
)->ir_v6_loc_addr
;
1737 const struct in6_addr
*dest
= &inet_rsk(req
)->ir_v6_rmt_addr
;
1743 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1744 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1746 src
->s6_addr32
[0], src
->s6_addr32
[1],
1747 src
->s6_addr32
[2], src
->s6_addr32
[3],
1748 inet_rsk(req
)->ir_num
,
1749 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1750 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1751 ntohs(inet_rsk(req
)->ir_rmt_port
),
1753 0, 0, /* could print option size, but that is af dependent. */
1754 1, /* timers active (only the expire timer) */
1755 jiffies_to_clock_t(ttd
),
1757 from_kuid_munged(seq_user_ns(seq
),
1758 sock_i_uid(req
->rsk_listener
)),
1759 0, /* non standard timer */
1760 0, /* open_requests have no inode */
1764 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1766 const struct in6_addr
*dest
, *src
;
1769 unsigned long timer_expires
;
1770 const struct inet_sock
*inet
= inet_sk(sp
);
1771 const struct tcp_sock
*tp
= tcp_sk(sp
);
1772 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1773 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
1777 dest
= &sp
->sk_v6_daddr
;
1778 src
= &sp
->sk_v6_rcv_saddr
;
1779 destp
= ntohs(inet
->inet_dport
);
1780 srcp
= ntohs(inet
->inet_sport
);
1782 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
1783 icsk
->icsk_pending
== ICSK_TIME_REO_TIMEOUT
||
1784 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
1786 timer_expires
= icsk
->icsk_timeout
;
1787 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1789 timer_expires
= icsk
->icsk_timeout
;
1790 } else if (timer_pending(&sp
->sk_timer
)) {
1792 timer_expires
= sp
->sk_timer
.expires
;
1795 timer_expires
= jiffies
;
1798 state
= inet_sk_state_load(sp
);
1799 if (state
== TCP_LISTEN
)
1800 rx_queue
= sp
->sk_ack_backlog
;
1802 /* Because we don't lock the socket,
1803 * we might find a transient negative value.
1805 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
1808 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1809 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1811 src
->s6_addr32
[0], src
->s6_addr32
[1],
1812 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1813 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1814 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1816 tp
->write_seq
- tp
->snd_una
,
1819 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
1820 icsk
->icsk_retransmits
,
1821 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sp
)),
1822 icsk
->icsk_probes_out
,
1824 refcount_read(&sp
->sk_refcnt
), sp
,
1825 jiffies_to_clock_t(icsk
->icsk_rto
),
1826 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1827 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
1829 state
== TCP_LISTEN
?
1830 fastopenq
->max_qlen
:
1831 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
)
1835 static void get_timewait6_sock(struct seq_file
*seq
,
1836 struct inet_timewait_sock
*tw
, int i
)
1838 long delta
= tw
->tw_timer
.expires
- jiffies
;
1839 const struct in6_addr
*dest
, *src
;
1842 dest
= &tw
->tw_v6_daddr
;
1843 src
= &tw
->tw_v6_rcv_saddr
;
1844 destp
= ntohs(tw
->tw_dport
);
1845 srcp
= ntohs(tw
->tw_sport
);
1848 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1849 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1851 src
->s6_addr32
[0], src
->s6_addr32
[1],
1852 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1853 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1854 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1855 tw
->tw_substate
, 0, 0,
1856 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
1857 refcount_read(&tw
->tw_refcnt
), tw
);
1860 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1862 struct tcp_iter_state
*st
;
1863 struct sock
*sk
= v
;
1865 if (v
== SEQ_START_TOKEN
) {
1870 "st tx_queue rx_queue tr tm->when retrnsmt"
1871 " uid timeout inode\n");
1876 if (sk
->sk_state
== TCP_TIME_WAIT
)
1877 get_timewait6_sock(seq
, v
, st
->num
);
1878 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
1879 get_openreq6(seq
, v
, st
->num
);
1881 get_tcp6_sock(seq
, v
, st
->num
);
1886 static const struct file_operations tcp6_afinfo_seq_fops
= {
1887 .open
= tcp_seq_open
,
1889 .llseek
= seq_lseek
,
1890 .release
= seq_release_net
1893 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1896 .seq_fops
= &tcp6_afinfo_seq_fops
,
1898 .show
= tcp6_seq_show
,
1902 int __net_init
tcp6_proc_init(struct net
*net
)
1904 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
1907 void tcp6_proc_exit(struct net
*net
)
1909 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
1913 struct proto tcpv6_prot
= {
1915 .owner
= THIS_MODULE
,
1917 .connect
= tcp_v6_connect
,
1918 .disconnect
= tcp_disconnect
,
1919 .accept
= inet_csk_accept
,
1921 .init
= tcp_v6_init_sock
,
1922 .destroy
= tcp_v6_destroy_sock
,
1923 .shutdown
= tcp_shutdown
,
1924 .setsockopt
= tcp_setsockopt
,
1925 .getsockopt
= tcp_getsockopt
,
1926 .keepalive
= tcp_set_keepalive
,
1927 .recvmsg
= tcp_recvmsg
,
1928 .sendmsg
= tcp_sendmsg
,
1929 .sendpage
= tcp_sendpage
,
1930 .backlog_rcv
= tcp_v6_do_rcv
,
1931 .release_cb
= tcp_release_cb
,
1933 .unhash
= inet_unhash
,
1934 .get_port
= inet_csk_get_port
,
1935 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1936 .leave_memory_pressure
= tcp_leave_memory_pressure
,
1937 .stream_memory_free
= tcp_stream_memory_free
,
1938 .sockets_allocated
= &tcp_sockets_allocated
,
1939 .memory_allocated
= &tcp_memory_allocated
,
1940 .memory_pressure
= &tcp_memory_pressure
,
1941 .orphan_count
= &tcp_orphan_count
,
1942 .sysctl_mem
= sysctl_tcp_mem
,
1943 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_wmem
),
1944 .sysctl_rmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_rmem
),
1945 .max_header
= MAX_TCP_HEADER
,
1946 .obj_size
= sizeof(struct tcp6_sock
),
1947 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
1948 .twsk_prot
= &tcp6_timewait_sock_ops
,
1949 .rsk_prot
= &tcp6_request_sock_ops
,
1950 .h
.hashinfo
= &tcp_hashinfo
,
1951 .no_autobind
= true,
1952 #ifdef CONFIG_COMPAT
1953 .compat_setsockopt
= compat_tcp_setsockopt
,
1954 .compat_getsockopt
= compat_tcp_getsockopt
,
1956 .diag_destroy
= tcp_abort
,
1959 /* thinking of making this const? Don't.
1960 * early_demux can change based on sysctl.
1962 static struct inet6_protocol tcpv6_protocol
= {
1963 .early_demux
= tcp_v6_early_demux
,
1964 .early_demux_handler
= tcp_v6_early_demux
,
1965 .handler
= tcp_v6_rcv
,
1966 .err_handler
= tcp_v6_err
,
1967 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1970 static struct inet_protosw tcpv6_protosw
= {
1971 .type
= SOCK_STREAM
,
1972 .protocol
= IPPROTO_TCP
,
1973 .prot
= &tcpv6_prot
,
1974 .ops
= &inet6_stream_ops
,
1975 .flags
= INET_PROTOSW_PERMANENT
|
1979 static int __net_init
tcpv6_net_init(struct net
*net
)
1981 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
1982 SOCK_RAW
, IPPROTO_TCP
, net
);
1985 static void __net_exit
tcpv6_net_exit(struct net
*net
)
1987 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
1990 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
1992 inet_twsk_purge(&tcp_hashinfo
, AF_INET6
);
1995 static struct pernet_operations tcpv6_net_ops
= {
1996 .init
= tcpv6_net_init
,
1997 .exit
= tcpv6_net_exit
,
1998 .exit_batch
= tcpv6_net_exit_batch
,
2001 int __init
tcpv6_init(void)
2005 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2009 /* register inet6 protocol */
2010 ret
= inet6_register_protosw(&tcpv6_protosw
);
2012 goto out_tcpv6_protocol
;
2014 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2016 goto out_tcpv6_protosw
;
2021 inet6_unregister_protosw(&tcpv6_protosw
);
2023 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2027 void tcpv6_exit(void)
2029 unregister_pernet_subsys(&tcpv6_net_ops
);
2030 inet6_unregister_protosw(&tcpv6_protosw
);
2031 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);