3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
72 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
);
73 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
74 struct request_sock
*req
);
76 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 static const struct inet_connection_sock_af_ops ipv6_mapped
;
79 static const struct inet_connection_sock_af_ops ipv6_specific
;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
84 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
85 const struct in6_addr
*addr
)
91 static void inet6_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
93 struct dst_entry
*dst
= skb_dst(skb
);
95 if (dst
&& dst_hold_safe(dst
)) {
96 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
99 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
100 inet6_sk(sk
)->rx_dst_cookie
= rt6_get_cookie(rt
);
104 static u32
tcp_v6_init_seq(const struct sk_buff
*skb
)
106 return secure_tcpv6_seq(ipv6_hdr(skb
)->daddr
.s6_addr32
,
107 ipv6_hdr(skb
)->saddr
.s6_addr32
,
109 tcp_hdr(skb
)->source
);
112 static u32
tcp_v6_init_ts_off(const struct net
*net
, const struct sk_buff
*skb
)
114 return secure_tcpv6_ts_off(net
, ipv6_hdr(skb
)->daddr
.s6_addr32
,
115 ipv6_hdr(skb
)->saddr
.s6_addr32
);
118 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
121 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
122 struct inet_sock
*inet
= inet_sk(sk
);
123 struct inet_connection_sock
*icsk
= inet_csk(sk
);
124 struct ipv6_pinfo
*np
= inet6_sk(sk
);
125 struct tcp_sock
*tp
= tcp_sk(sk
);
126 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
127 struct ipv6_txoptions
*opt
;
129 struct dst_entry
*dst
;
132 struct inet_timewait_death_row
*tcp_death_row
= &sock_net(sk
)->ipv4
.tcp_death_row
;
134 if (addr_len
< SIN6_LEN_RFC2133
)
137 if (usin
->sin6_family
!= AF_INET6
)
138 return -EAFNOSUPPORT
;
140 memset(&fl6
, 0, sizeof(fl6
));
143 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
144 IP6_ECN_flow_init(fl6
.flowlabel
);
145 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
146 struct ip6_flowlabel
*flowlabel
;
147 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
150 fl6_sock_release(flowlabel
);
155 * connect() to INADDR_ANY means loopback (BSD'ism).
158 if (ipv6_addr_any(&usin
->sin6_addr
)) {
159 if (ipv6_addr_v4mapped(&sk
->sk_v6_rcv_saddr
))
160 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK
),
163 usin
->sin6_addr
= in6addr_loopback
;
166 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
168 if (addr_type
& IPV6_ADDR_MULTICAST
)
171 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
172 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
173 usin
->sin6_scope_id
) {
174 /* If interface is set while binding, indices
177 if (sk
->sk_bound_dev_if
&&
178 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
181 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
184 /* Connect to link-local address requires an interface */
185 if (!sk
->sk_bound_dev_if
)
189 if (tp
->rx_opt
.ts_recent_stamp
&&
190 !ipv6_addr_equal(&sk
->sk_v6_daddr
, &usin
->sin6_addr
)) {
191 tp
->rx_opt
.ts_recent
= 0;
192 tp
->rx_opt
.ts_recent_stamp
= 0;
196 sk
->sk_v6_daddr
= usin
->sin6_addr
;
197 np
->flow_label
= fl6
.flowlabel
;
203 if (addr_type
& IPV6_ADDR_MAPPED
) {
204 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
205 struct sockaddr_in sin
;
207 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
209 if (__ipv6_only_sock(sk
))
212 sin
.sin_family
= AF_INET
;
213 sin
.sin_port
= usin
->sin6_port
;
214 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
216 icsk
->icsk_af_ops
= &ipv6_mapped
;
217 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
218 #ifdef CONFIG_TCP_MD5SIG
219 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
222 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
225 icsk
->icsk_ext_hdr_len
= exthdrlen
;
226 icsk
->icsk_af_ops
= &ipv6_specific
;
227 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
228 #ifdef CONFIG_TCP_MD5SIG
229 tp
->af_specific
= &tcp_sock_ipv6_specific
;
233 np
->saddr
= sk
->sk_v6_rcv_saddr
;
238 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
))
239 saddr
= &sk
->sk_v6_rcv_saddr
;
241 fl6
.flowi6_proto
= IPPROTO_TCP
;
242 fl6
.daddr
= sk
->sk_v6_daddr
;
243 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
244 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
245 fl6
.flowi6_mark
= sk
->sk_mark
;
246 fl6
.fl6_dport
= usin
->sin6_port
;
247 fl6
.fl6_sport
= inet
->inet_sport
;
248 fl6
.flowi6_uid
= sk
->sk_uid
;
250 opt
= rcu_dereference_protected(np
->opt
, lockdep_sock_is_held(sk
));
251 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
253 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
255 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
);
263 sk
->sk_v6_rcv_saddr
= *saddr
;
266 /* set the source address */
268 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
270 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
271 ip6_dst_store(sk
, dst
, NULL
, NULL
);
273 icsk
->icsk_ext_hdr_len
= 0;
275 icsk
->icsk_ext_hdr_len
= opt
->opt_flen
+
278 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
280 inet
->inet_dport
= usin
->sin6_port
;
282 tcp_set_state(sk
, TCP_SYN_SENT
);
283 err
= inet6_hash_connect(tcp_death_row
, sk
);
289 if (likely(!tp
->repair
)) {
291 tp
->write_seq
= secure_tcpv6_seq(np
->saddr
.s6_addr32
,
292 sk
->sk_v6_daddr
.s6_addr32
,
295 tp
->tsoffset
= secure_tcpv6_ts_off(sock_net(sk
),
297 sk
->sk_v6_daddr
.s6_addr32
);
300 if (tcp_fastopen_defer_connect(sk
, &err
))
305 err
= tcp_connect(sk
);
312 tcp_set_state(sk
, TCP_CLOSE
);
314 inet
->inet_dport
= 0;
315 sk
->sk_route_caps
= 0;
319 static void tcp_v6_mtu_reduced(struct sock
*sk
)
321 struct dst_entry
*dst
;
323 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
326 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
330 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
331 tcp_sync_mss(sk
, dst_mtu(dst
));
332 tcp_simple_retransmit(sk
);
336 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
337 u8 type
, u8 code
, int offset
, __be32 info
)
339 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
340 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
341 struct net
*net
= dev_net(skb
->dev
);
342 struct request_sock
*fastopen
;
343 struct ipv6_pinfo
*np
;
350 sk
= __inet6_lookup_established(net
, &tcp_hashinfo
,
351 &hdr
->daddr
, th
->dest
,
352 &hdr
->saddr
, ntohs(th
->source
),
356 __ICMP6_INC_STATS(net
, __in6_dev_get(skb
->dev
),
361 if (sk
->sk_state
== TCP_TIME_WAIT
) {
362 inet_twsk_put(inet_twsk(sk
));
365 seq
= ntohl(th
->seq
);
366 fatal
= icmpv6_err_convert(type
, code
, &err
);
367 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
368 return tcp_req_err(sk
, seq
, fatal
);
371 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
372 __NET_INC_STATS(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
374 if (sk
->sk_state
== TCP_CLOSE
)
377 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
378 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
383 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
384 fastopen
= tp
->fastopen_rsk
;
385 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
386 if (sk
->sk_state
!= TCP_LISTEN
&&
387 !between(seq
, snd_una
, tp
->snd_nxt
)) {
388 __NET_INC_STATS(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
394 if (type
== NDISC_REDIRECT
) {
395 if (!sock_owned_by_user(sk
)) {
396 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
399 dst
->ops
->redirect(dst
, sk
, skb
);
404 if (type
== ICMPV6_PKT_TOOBIG
) {
405 /* We are not interested in TCP_LISTEN and open_requests
406 * (SYN-ACKs send out by Linux are always <576bytes so
407 * they should go through unfragmented).
409 if (sk
->sk_state
== TCP_LISTEN
)
412 if (!ip6_sk_accept_pmtu(sk
))
415 tp
->mtu_info
= ntohl(info
);
416 if (!sock_owned_by_user(sk
))
417 tcp_v6_mtu_reduced(sk
);
418 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
,
425 /* Might be for an request_sock */
426 switch (sk
->sk_state
) {
429 /* Only in fast or simultaneous open. If a fast open socket is
430 * is already accepted it is treated as a connected one below.
432 if (fastopen
&& !fastopen
->sk
)
435 if (!sock_owned_by_user(sk
)) {
437 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
441 sk
->sk_err_soft
= err
;
445 if (!sock_owned_by_user(sk
) && np
->recverr
) {
447 sk
->sk_error_report(sk
);
449 sk
->sk_err_soft
= err
;
457 static int tcp_v6_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
459 struct request_sock
*req
,
460 struct tcp_fastopen_cookie
*foc
,
461 enum tcp_synack_type synack_type
)
463 struct inet_request_sock
*ireq
= inet_rsk(req
);
464 struct ipv6_pinfo
*np
= inet6_sk(sk
);
465 struct ipv6_txoptions
*opt
;
466 struct flowi6
*fl6
= &fl
->u
.ip6
;
470 /* First, grab a route. */
471 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
,
472 IPPROTO_TCP
)) == NULL
)
475 skb
= tcp_make_synack(sk
, dst
, req
, foc
, synack_type
);
478 __tcp_v6_send_check(skb
, &ireq
->ir_v6_loc_addr
,
479 &ireq
->ir_v6_rmt_addr
);
481 fl6
->daddr
= ireq
->ir_v6_rmt_addr
;
482 if (np
->repflow
&& ireq
->pktopts
)
483 fl6
->flowlabel
= ip6_flowlabel(ipv6_hdr(ireq
->pktopts
));
486 opt
= ireq
->ipv6_opt
;
488 opt
= rcu_dereference(np
->opt
);
489 err
= ip6_xmit(sk
, skb
, fl6
, sk
->sk_mark
, opt
, np
->tclass
);
491 err
= net_xmit_eval(err
);
499 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
501 kfree(inet_rsk(req
)->ipv6_opt
);
502 kfree_skb(inet_rsk(req
)->pktopts
);
505 #ifdef CONFIG_TCP_MD5SIG
506 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
507 const struct in6_addr
*addr
)
509 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
512 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(const struct sock
*sk
,
513 const struct sock
*addr_sk
)
515 return tcp_v6_md5_do_lookup(sk
, &addr_sk
->sk_v6_daddr
);
518 static int tcp_v6_parse_md5_keys(struct sock
*sk
, int optname
,
519 char __user
*optval
, int optlen
)
521 struct tcp_md5sig cmd
;
522 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
525 if (optlen
< sizeof(cmd
))
528 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
531 if (sin6
->sin6_family
!= AF_INET6
)
534 if (optname
== TCP_MD5SIG_EXT
&&
535 cmd
.tcpm_flags
& TCP_MD5SIG_FLAG_PREFIX
) {
536 prefixlen
= cmd
.tcpm_prefixlen
;
537 if (prefixlen
> 128 || (ipv6_addr_v4mapped(&sin6
->sin6_addr
) &&
541 prefixlen
= ipv6_addr_v4mapped(&sin6
->sin6_addr
) ? 32 : 128;
544 if (!cmd
.tcpm_keylen
) {
545 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
546 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
548 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
549 AF_INET6
, prefixlen
);
552 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
555 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
556 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
557 AF_INET
, prefixlen
, cmd
.tcpm_key
,
558 cmd
.tcpm_keylen
, GFP_KERNEL
);
560 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
561 AF_INET6
, prefixlen
, cmd
.tcpm_key
,
562 cmd
.tcpm_keylen
, GFP_KERNEL
);
565 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool
*hp
,
566 const struct in6_addr
*daddr
,
567 const struct in6_addr
*saddr
,
568 const struct tcphdr
*th
, int nbytes
)
570 struct tcp6_pseudohdr
*bp
;
571 struct scatterlist sg
;
575 /* 1. TCP pseudo-header (RFC2460) */
578 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
579 bp
->len
= cpu_to_be32(nbytes
);
581 _th
= (struct tcphdr
*)(bp
+ 1);
582 memcpy(_th
, th
, sizeof(*th
));
585 sg_init_one(&sg
, bp
, sizeof(*bp
) + sizeof(*th
));
586 ahash_request_set_crypt(hp
->md5_req
, &sg
, NULL
,
587 sizeof(*bp
) + sizeof(*th
));
588 return crypto_ahash_update(hp
->md5_req
);
591 static int tcp_v6_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
592 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
593 const struct tcphdr
*th
)
595 struct tcp_md5sig_pool
*hp
;
596 struct ahash_request
*req
;
598 hp
= tcp_get_md5sig_pool();
600 goto clear_hash_noput
;
603 if (crypto_ahash_init(req
))
605 if (tcp_v6_md5_hash_headers(hp
, daddr
, saddr
, th
, th
->doff
<< 2))
607 if (tcp_md5_hash_key(hp
, key
))
609 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
610 if (crypto_ahash_final(req
))
613 tcp_put_md5sig_pool();
617 tcp_put_md5sig_pool();
619 memset(md5_hash
, 0, 16);
623 static int tcp_v6_md5_hash_skb(char *md5_hash
,
624 const struct tcp_md5sig_key
*key
,
625 const struct sock
*sk
,
626 const struct sk_buff
*skb
)
628 const struct in6_addr
*saddr
, *daddr
;
629 struct tcp_md5sig_pool
*hp
;
630 struct ahash_request
*req
;
631 const struct tcphdr
*th
= tcp_hdr(skb
);
633 if (sk
) { /* valid for establish/request sockets */
634 saddr
= &sk
->sk_v6_rcv_saddr
;
635 daddr
= &sk
->sk_v6_daddr
;
637 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
638 saddr
= &ip6h
->saddr
;
639 daddr
= &ip6h
->daddr
;
642 hp
= tcp_get_md5sig_pool();
644 goto clear_hash_noput
;
647 if (crypto_ahash_init(req
))
650 if (tcp_v6_md5_hash_headers(hp
, daddr
, saddr
, th
, skb
->len
))
652 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
654 if (tcp_md5_hash_key(hp
, key
))
656 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
657 if (crypto_ahash_final(req
))
660 tcp_put_md5sig_pool();
664 tcp_put_md5sig_pool();
666 memset(md5_hash
, 0, 16);
672 static bool tcp_v6_inbound_md5_hash(const struct sock
*sk
,
673 const struct sk_buff
*skb
)
675 #ifdef CONFIG_TCP_MD5SIG
676 const __u8
*hash_location
= NULL
;
677 struct tcp_md5sig_key
*hash_expected
;
678 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
679 const struct tcphdr
*th
= tcp_hdr(skb
);
683 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
684 hash_location
= tcp_parse_md5sig_option(th
);
686 /* We've parsed the options - do we have a hash? */
687 if (!hash_expected
&& !hash_location
)
690 if (hash_expected
&& !hash_location
) {
691 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
695 if (!hash_expected
&& hash_location
) {
696 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
700 /* check the signature */
701 genhash
= tcp_v6_md5_hash_skb(newhash
,
705 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
706 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPMD5FAILURE
);
707 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
708 genhash
? "failed" : "mismatch",
709 &ip6h
->saddr
, ntohs(th
->source
),
710 &ip6h
->daddr
, ntohs(th
->dest
));
717 static void tcp_v6_init_req(struct request_sock
*req
,
718 const struct sock
*sk_listener
,
721 struct inet_request_sock
*ireq
= inet_rsk(req
);
722 const struct ipv6_pinfo
*np
= inet6_sk(sk_listener
);
724 ireq
->ir_v6_rmt_addr
= ipv6_hdr(skb
)->saddr
;
725 ireq
->ir_v6_loc_addr
= ipv6_hdr(skb
)->daddr
;
727 /* So that link locals have meaning */
728 if (!sk_listener
->sk_bound_dev_if
&&
729 ipv6_addr_type(&ireq
->ir_v6_rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
730 ireq
->ir_iif
= tcp_v6_iif(skb
);
732 if (!TCP_SKB_CB(skb
)->tcp_tw_isn
&&
733 (ipv6_opt_accepted(sk_listener
, skb
, &TCP_SKB_CB(skb
)->header
.h6
) ||
734 np
->rxopt
.bits
.rxinfo
||
735 np
->rxopt
.bits
.rxoinfo
|| np
->rxopt
.bits
.rxhlim
||
736 np
->rxopt
.bits
.rxohlim
|| np
->repflow
)) {
737 refcount_inc(&skb
->users
);
742 static struct dst_entry
*tcp_v6_route_req(const struct sock
*sk
,
744 const struct request_sock
*req
)
746 return inet6_csk_route_req(sk
, &fl
->u
.ip6
, req
, IPPROTO_TCP
);
749 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
751 .obj_size
= sizeof(struct tcp6_request_sock
),
752 .rtx_syn_ack
= tcp_rtx_synack
,
753 .send_ack
= tcp_v6_reqsk_send_ack
,
754 .destructor
= tcp_v6_reqsk_destructor
,
755 .send_reset
= tcp_v6_send_reset
,
756 .syn_ack_timeout
= tcp_syn_ack_timeout
,
759 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
760 .mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) -
761 sizeof(struct ipv6hdr
),
762 #ifdef CONFIG_TCP_MD5SIG
763 .req_md5_lookup
= tcp_v6_md5_lookup
,
764 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
766 .init_req
= tcp_v6_init_req
,
767 #ifdef CONFIG_SYN_COOKIES
768 .cookie_init_seq
= cookie_v6_init_sequence
,
770 .route_req
= tcp_v6_route_req
,
771 .init_seq
= tcp_v6_init_seq
,
772 .init_ts_off
= tcp_v6_init_ts_off
,
773 .send_synack
= tcp_v6_send_synack
,
776 static void tcp_v6_send_response(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
777 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
,
778 int oif
, struct tcp_md5sig_key
*key
, int rst
,
779 u8 tclass
, __be32 label
)
781 const struct tcphdr
*th
= tcp_hdr(skb
);
783 struct sk_buff
*buff
;
785 struct net
*net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
786 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
787 unsigned int tot_len
= sizeof(struct tcphdr
);
788 struct dst_entry
*dst
;
792 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
793 #ifdef CONFIG_TCP_MD5SIG
795 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
798 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
803 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
805 t1
= skb_push(buff
, tot_len
);
806 skb_reset_transport_header(buff
);
808 /* Swap the send and the receive. */
809 memset(t1
, 0, sizeof(*t1
));
810 t1
->dest
= th
->source
;
811 t1
->source
= th
->dest
;
812 t1
->doff
= tot_len
/ 4;
813 t1
->seq
= htonl(seq
);
814 t1
->ack_seq
= htonl(ack
);
815 t1
->ack
= !rst
|| !th
->ack
;
817 t1
->window
= htons(win
);
819 topt
= (__be32
*)(t1
+ 1);
822 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
823 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
824 *topt
++ = htonl(tsval
);
825 *topt
++ = htonl(tsecr
);
828 #ifdef CONFIG_TCP_MD5SIG
830 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
831 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
832 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
833 &ipv6_hdr(skb
)->saddr
,
834 &ipv6_hdr(skb
)->daddr
, t1
);
838 memset(&fl6
, 0, sizeof(fl6
));
839 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
840 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
841 fl6
.flowlabel
= label
;
843 buff
->ip_summed
= CHECKSUM_PARTIAL
;
846 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
848 fl6
.flowi6_proto
= IPPROTO_TCP
;
849 if (rt6_need_strict(&fl6
.daddr
) && !oif
)
850 fl6
.flowi6_oif
= tcp_v6_iif(skb
);
852 if (!oif
&& netif_index_is_l3_master(net
, skb
->skb_iif
))
855 fl6
.flowi6_oif
= oif
;
858 fl6
.flowi6_mark
= IP6_REPLY_MARK(net
, skb
->mark
);
859 fl6
.fl6_dport
= t1
->dest
;
860 fl6
.fl6_sport
= t1
->source
;
861 fl6
.flowi6_uid
= sock_net_uid(net
, sk
&& sk_fullsock(sk
) ? sk
: NULL
);
862 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
864 /* Pass a socket to ip6_dst_lookup either it is for RST
865 * Underlying function will use this to retrieve the network
868 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
);
870 skb_dst_set(buff
, dst
);
871 ip6_xmit(ctl_sk
, buff
, &fl6
, fl6
.flowi6_mark
, NULL
, tclass
);
872 TCP_INC_STATS(net
, TCP_MIB_OUTSEGS
);
874 TCP_INC_STATS(net
, TCP_MIB_OUTRSTS
);
881 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
883 const struct tcphdr
*th
= tcp_hdr(skb
);
884 u32 seq
= 0, ack_seq
= 0;
885 struct tcp_md5sig_key
*key
= NULL
;
886 #ifdef CONFIG_TCP_MD5SIG
887 const __u8
*hash_location
= NULL
;
888 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
889 unsigned char newhash
[16];
891 struct sock
*sk1
= NULL
;
898 /* If sk not NULL, it means we did a successful lookup and incoming
899 * route had to be correct. prequeue might have dropped our dst.
901 if (!sk
&& !ipv6_unicast_destination(skb
))
904 #ifdef CONFIG_TCP_MD5SIG
906 hash_location
= tcp_parse_md5sig_option(th
);
907 if (sk
&& sk_fullsock(sk
)) {
908 key
= tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
);
909 } else if (hash_location
) {
911 * active side is lost. Try to find listening socket through
912 * source port, and then find md5 key through listening socket.
913 * we are not loose security here:
914 * Incoming packet is checked with md5 hash with finding key,
915 * no RST generated if md5 hash doesn't match.
917 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
918 &tcp_hashinfo
, NULL
, 0,
920 th
->source
, &ipv6h
->daddr
,
921 ntohs(th
->source
), tcp_v6_iif(skb
));
925 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
929 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, skb
);
930 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
936 seq
= ntohl(th
->ack_seq
);
938 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
941 oif
= sk
? sk
->sk_bound_dev_if
: 0;
942 tcp_v6_send_response(sk
, skb
, seq
, ack_seq
, 0, 0, 0, oif
, key
, 1, 0, 0);
944 #ifdef CONFIG_TCP_MD5SIG
950 static void tcp_v6_send_ack(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
951 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
, int oif
,
952 struct tcp_md5sig_key
*key
, u8 tclass
,
955 tcp_v6_send_response(sk
, skb
, seq
, ack
, win
, tsval
, tsecr
, oif
, key
, 0,
959 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
961 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
962 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
964 tcp_v6_send_ack(sk
, skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
965 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
966 tcp_time_stamp_raw() + tcptw
->tw_ts_offset
,
967 tcptw
->tw_ts_recent
, tw
->tw_bound_dev_if
, tcp_twsk_md5_key(tcptw
),
968 tw
->tw_tclass
, cpu_to_be32(tw
->tw_flowlabel
));
973 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
974 struct request_sock
*req
)
976 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
977 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
980 * The window field (SEG.WND) of every outgoing segment, with the
981 * exception of <SYN> segments, MUST be right-shifted by
982 * Rcv.Wind.Shift bits:
984 tcp_v6_send_ack(sk
, skb
, (sk
->sk_state
== TCP_LISTEN
) ?
985 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
986 tcp_rsk(req
)->rcv_nxt
,
987 req
->rsk_rcv_wnd
>> inet_rsk(req
)->rcv_wscale
,
988 tcp_time_stamp_raw() + tcp_rsk(req
)->ts_off
,
989 req
->ts_recent
, sk
->sk_bound_dev_if
,
990 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
),
995 static struct sock
*tcp_v6_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
997 #ifdef CONFIG_SYN_COOKIES
998 const struct tcphdr
*th
= tcp_hdr(skb
);
1001 sk
= cookie_v6_check(sk
, skb
);
1006 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1008 if (skb
->protocol
== htons(ETH_P_IP
))
1009 return tcp_v4_conn_request(sk
, skb
);
1011 if (!ipv6_unicast_destination(skb
))
1014 return tcp_conn_request(&tcp6_request_sock_ops
,
1015 &tcp_request_sock_ipv6_ops
, sk
, skb
);
1019 return 0; /* don't send reset */
1022 static void tcp_v6_restore_cb(struct sk_buff
*skb
)
1024 /* We need to move header back to the beginning if xfrm6_policy_check()
1025 * and tcp_v6_fill_cb() are going to be called again.
1026 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1028 memmove(IP6CB(skb
), &TCP_SKB_CB(skb
)->header
.h6
,
1029 sizeof(struct inet6_skb_parm
));
1032 static struct sock
*tcp_v6_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
1033 struct request_sock
*req
,
1034 struct dst_entry
*dst
,
1035 struct request_sock
*req_unhash
,
1038 struct inet_request_sock
*ireq
;
1039 struct ipv6_pinfo
*newnp
;
1040 const struct ipv6_pinfo
*np
= inet6_sk(sk
);
1041 struct ipv6_txoptions
*opt
;
1042 struct tcp6_sock
*newtcp6sk
;
1043 struct inet_sock
*newinet
;
1044 struct tcp_sock
*newtp
;
1046 #ifdef CONFIG_TCP_MD5SIG
1047 struct tcp_md5sig_key
*key
;
1051 if (skb
->protocol
== htons(ETH_P_IP
)) {
1056 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
,
1057 req_unhash
, own_req
);
1062 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1063 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1065 newinet
= inet_sk(newsk
);
1066 newnp
= inet6_sk(newsk
);
1067 newtp
= tcp_sk(newsk
);
1069 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1071 newnp
->saddr
= newsk
->sk_v6_rcv_saddr
;
1073 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1074 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1075 #ifdef CONFIG_TCP_MD5SIG
1076 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1079 newnp
->ipv6_mc_list
= NULL
;
1080 newnp
->ipv6_ac_list
= NULL
;
1081 newnp
->ipv6_fl_list
= NULL
;
1082 newnp
->pktoptions
= NULL
;
1084 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1085 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1086 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1088 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1091 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1092 * here, tcp_create_openreq_child now does this for us, see the comment in
1093 * that function for the gory details. -acme
1096 /* It is tricky place. Until this moment IPv4 tcp
1097 worked with IPv6 icsk.icsk_af_ops.
1100 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1105 ireq
= inet_rsk(req
);
1107 if (sk_acceptq_is_full(sk
))
1111 dst
= inet6_csk_route_req(sk
, &fl6
, req
, IPPROTO_TCP
);
1116 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1121 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1122 * count here, tcp_create_openreq_child now does this for us, see the
1123 * comment in that function for the gory details. -acme
1126 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1127 ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1128 inet6_sk_rx_dst_set(newsk
, skb
);
1130 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1131 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1133 newtp
= tcp_sk(newsk
);
1134 newinet
= inet_sk(newsk
);
1135 newnp
= inet6_sk(newsk
);
1137 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1139 newsk
->sk_v6_daddr
= ireq
->ir_v6_rmt_addr
;
1140 newnp
->saddr
= ireq
->ir_v6_loc_addr
;
1141 newsk
->sk_v6_rcv_saddr
= ireq
->ir_v6_loc_addr
;
1142 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1144 /* Now IPv6 options...
1146 First: no IPv4 options.
1148 newinet
->inet_opt
= NULL
;
1149 newnp
->ipv6_mc_list
= NULL
;
1150 newnp
->ipv6_ac_list
= NULL
;
1151 newnp
->ipv6_fl_list
= NULL
;
1154 newnp
->rxopt
.all
= np
->rxopt
.all
;
1156 newnp
->pktoptions
= NULL
;
1158 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1159 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1160 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1162 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1164 /* Clone native IPv6 options from listening socket (if any)
1166 Yes, keeping reference count would be much more clever,
1167 but we make one more one thing there: reattach optmem
1170 opt
= ireq
->ipv6_opt
;
1172 opt
= rcu_dereference(np
->opt
);
1174 opt
= ipv6_dup_options(newsk
, opt
);
1175 RCU_INIT_POINTER(newnp
->opt
, opt
);
1177 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1179 inet_csk(newsk
)->icsk_ext_hdr_len
= opt
->opt_nflen
+
1182 tcp_ca_openreq_child(newsk
, dst
);
1184 tcp_sync_mss(newsk
, dst_mtu(dst
));
1185 newtp
->advmss
= tcp_mss_clamp(tcp_sk(sk
), dst_metric_advmss(dst
));
1187 tcp_initialize_rcv_mss(newsk
);
1189 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1190 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1192 #ifdef CONFIG_TCP_MD5SIG
1193 /* Copy over the MD5 key from the original socket */
1194 key
= tcp_v6_md5_do_lookup(sk
, &newsk
->sk_v6_daddr
);
1196 /* We're using one, so create a matching key
1197 * on the newsk structure. If we fail to get
1198 * memory, then we end up not copying the key
1201 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newsk
->sk_v6_daddr
,
1202 AF_INET6
, 128, key
->key
, key
->keylen
,
1203 sk_gfp_mask(sk
, GFP_ATOMIC
));
1207 if (__inet_inherit_port(sk
, newsk
) < 0) {
1208 inet_csk_prepare_forced_close(newsk
);
1212 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1214 tcp_move_syn(newtp
, req
);
1216 /* Clone pktoptions received with SYN, if we own the req */
1217 if (ireq
->pktopts
) {
1218 newnp
->pktoptions
= skb_clone(ireq
->pktopts
,
1219 sk_gfp_mask(sk
, GFP_ATOMIC
));
1220 consume_skb(ireq
->pktopts
);
1221 ireq
->pktopts
= NULL
;
1222 if (newnp
->pktoptions
) {
1223 tcp_v6_restore_cb(newnp
->pktoptions
);
1224 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1232 __NET_INC_STATS(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1240 /* The socket must have it's spinlock held when we get
1241 * here, unless it is a TCP_LISTEN socket.
1243 * We have a potential double-lock case here, so even when
1244 * doing backlog processing we use the BH locking scheme.
1245 * This is because we cannot sleep with the original spinlock
1248 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1250 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1251 struct tcp_sock
*tp
;
1252 struct sk_buff
*opt_skb
= NULL
;
1254 /* Imagine: socket is IPv6. IPv4 packet arrives,
1255 goes to IPv4 receive handler and backlogged.
1256 From backlog it always goes here. Kerboom...
1257 Fortunately, tcp_rcv_established and rcv_established
1258 handle them correctly, but it is not case with
1259 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1262 if (skb
->protocol
== htons(ETH_P_IP
))
1263 return tcp_v4_do_rcv(sk
, skb
);
1266 * socket locking is here for SMP purposes as backlog rcv
1267 * is currently called with bh processing disabled.
1270 /* Do Stevens' IPV6_PKTOPTIONS.
1272 Yes, guys, it is the only place in our code, where we
1273 may make it not affecting IPv4.
1274 The rest of code is protocol independent,
1275 and I do not like idea to uglify IPv4.
1277 Actually, all the idea behind IPV6_PKTOPTIONS
1278 looks not very well thought. For now we latch
1279 options, received in the last packet, enqueued
1280 by tcp. Feel free to propose better solution.
1284 opt_skb
= skb_clone(skb
, sk_gfp_mask(sk
, GFP_ATOMIC
));
1286 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1287 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1289 sock_rps_save_rxhash(sk
, skb
);
1290 sk_mark_napi_id(sk
, skb
);
1292 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1293 dst
->ops
->check(dst
, np
->rx_dst_cookie
) == NULL
) {
1295 sk
->sk_rx_dst
= NULL
;
1299 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1301 goto ipv6_pktoptions
;
1305 if (tcp_checksum_complete(skb
))
1308 if (sk
->sk_state
== TCP_LISTEN
) {
1309 struct sock
*nsk
= tcp_v6_cookie_check(sk
, skb
);
1315 if (tcp_child_process(sk
, nsk
, skb
))
1318 __kfree_skb(opt_skb
);
1322 sock_rps_save_rxhash(sk
, skb
);
1324 if (tcp_rcv_state_process(sk
, skb
))
1327 goto ipv6_pktoptions
;
1331 tcp_v6_send_reset(sk
, skb
);
1334 __kfree_skb(opt_skb
);
1338 TCP_INC_STATS(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1339 TCP_INC_STATS(sock_net(sk
), TCP_MIB_INERRS
);
1344 /* Do you ask, what is it?
1346 1. skb was enqueued by tcp.
1347 2. skb is added to tail of read queue, rather than out of order.
1348 3. socket is not in passive state.
1349 4. Finally, it really contains options, which user wants to receive.
1352 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1353 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1354 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1355 np
->mcast_oif
= tcp_v6_iif(opt_skb
);
1356 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1357 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1358 if (np
->rxopt
.bits
.rxflow
|| np
->rxopt
.bits
.rxtclass
)
1359 np
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(opt_skb
));
1361 np
->flow_label
= ip6_flowlabel(ipv6_hdr(opt_skb
));
1362 if (ipv6_opt_accepted(sk
, opt_skb
, &TCP_SKB_CB(opt_skb
)->header
.h6
)) {
1363 skb_set_owner_r(opt_skb
, sk
);
1364 tcp_v6_restore_cb(opt_skb
);
1365 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1367 __kfree_skb(opt_skb
);
1368 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1376 static void tcp_v6_fill_cb(struct sk_buff
*skb
, const struct ipv6hdr
*hdr
,
1377 const struct tcphdr
*th
)
1379 /* This is tricky: we move IP6CB at its correct location into
1380 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1381 * _decode_session6() uses IP6CB().
1382 * barrier() makes sure compiler won't play aliasing games.
1384 memmove(&TCP_SKB_CB(skb
)->header
.h6
, IP6CB(skb
),
1385 sizeof(struct inet6_skb_parm
));
1388 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1389 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1390 skb
->len
- th
->doff
*4);
1391 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1392 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1393 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1394 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1395 TCP_SKB_CB(skb
)->sacked
= 0;
1398 static int tcp_v6_rcv(struct sk_buff
*skb
)
1400 const struct tcphdr
*th
;
1401 const struct ipv6hdr
*hdr
;
1405 struct net
*net
= dev_net(skb
->dev
);
1407 if (skb
->pkt_type
!= PACKET_HOST
)
1411 * Count it even if it's bad.
1413 __TCP_INC_STATS(net
, TCP_MIB_INSEGS
);
1415 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1418 th
= (const struct tcphdr
*)skb
->data
;
1420 if (unlikely(th
->doff
< sizeof(struct tcphdr
)/4))
1422 if (!pskb_may_pull(skb
, th
->doff
*4))
1425 if (skb_checksum_init(skb
, IPPROTO_TCP
, ip6_compute_pseudo
))
1428 th
= (const struct tcphdr
*)skb
->data
;
1429 hdr
= ipv6_hdr(skb
);
1432 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, __tcp_hdrlen(th
),
1433 th
->source
, th
->dest
, inet6_iif(skb
),
1439 if (sk
->sk_state
== TCP_TIME_WAIT
)
1442 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1443 struct request_sock
*req
= inet_reqsk(sk
);
1446 sk
= req
->rsk_listener
;
1447 tcp_v6_fill_cb(skb
, hdr
, th
);
1448 if (tcp_v6_inbound_md5_hash(sk
, skb
)) {
1449 sk_drops_add(sk
, skb
);
1453 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
1454 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1459 nsk
= tcp_check_req(sk
, skb
, req
, false);
1462 goto discard_and_relse
;
1466 tcp_v6_restore_cb(skb
);
1467 } else if (tcp_filter(sk
, skb
)) {
1468 goto discard_and_relse
;
1469 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1470 tcp_v6_send_reset(nsk
, skb
);
1471 goto discard_and_relse
;
1477 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1478 __NET_INC_STATS(net
, LINUX_MIB_TCPMINTTLDROP
);
1479 goto discard_and_relse
;
1482 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1483 goto discard_and_relse
;
1485 tcp_v6_fill_cb(skb
, hdr
, th
);
1487 if (tcp_v6_inbound_md5_hash(sk
, skb
))
1488 goto discard_and_relse
;
1490 if (tcp_filter(sk
, skb
))
1491 goto discard_and_relse
;
1492 th
= (const struct tcphdr
*)skb
->data
;
1493 hdr
= ipv6_hdr(skb
);
1497 if (sk
->sk_state
== TCP_LISTEN
) {
1498 ret
= tcp_v6_do_rcv(sk
, skb
);
1499 goto put_and_return
;
1502 sk_incoming_cpu_update(sk
);
1504 bh_lock_sock_nested(sk
);
1505 tcp_segs_in(tcp_sk(sk
), skb
);
1507 if (!sock_owned_by_user(sk
)) {
1508 if (!tcp_prequeue(sk
, skb
))
1509 ret
= tcp_v6_do_rcv(sk
, skb
);
1510 } else if (tcp_add_backlog(sk
, skb
)) {
1511 goto discard_and_relse
;
1518 return ret
? -1 : 0;
1521 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1524 tcp_v6_fill_cb(skb
, hdr
, th
);
1526 if (tcp_checksum_complete(skb
)) {
1528 __TCP_INC_STATS(net
, TCP_MIB_CSUMERRORS
);
1530 __TCP_INC_STATS(net
, TCP_MIB_INERRS
);
1532 tcp_v6_send_reset(NULL
, skb
);
1540 sk_drops_add(sk
, skb
);
1546 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1547 inet_twsk_put(inet_twsk(sk
));
1551 tcp_v6_fill_cb(skb
, hdr
, th
);
1553 if (tcp_checksum_complete(skb
)) {
1554 inet_twsk_put(inet_twsk(sk
));
1558 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1563 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1564 skb
, __tcp_hdrlen(th
),
1565 &ipv6_hdr(skb
)->saddr
, th
->source
,
1566 &ipv6_hdr(skb
)->daddr
,
1567 ntohs(th
->dest
), tcp_v6_iif(skb
));
1569 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1570 inet_twsk_deschedule_put(tw
);
1572 tcp_v6_restore_cb(skb
);
1576 /* Fall through to ACK */
1579 tcp_v6_timewait_ack(sk
, skb
);
1582 tcp_v6_restore_cb(skb
);
1583 tcp_v6_send_reset(sk
, skb
);
1584 inet_twsk_deschedule_put(inet_twsk(sk
));
1586 case TCP_TW_SUCCESS
:
1592 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1594 const struct ipv6hdr
*hdr
;
1595 const struct tcphdr
*th
;
1598 if (skb
->pkt_type
!= PACKET_HOST
)
1601 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1604 hdr
= ipv6_hdr(skb
);
1607 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1610 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1611 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1612 &hdr
->saddr
, th
->source
,
1613 &hdr
->daddr
, ntohs(th
->dest
),
1617 skb
->destructor
= sock_edemux
;
1618 if (sk_fullsock(sk
)) {
1619 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1622 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1624 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1625 skb_dst_set_noref(skb
, dst
);
1630 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1631 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1632 .twsk_unique
= tcp_twsk_unique
,
1633 .twsk_destructor
= tcp_twsk_destructor
,
1636 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1637 .queue_xmit
= inet6_csk_xmit
,
1638 .send_check
= tcp_v6_send_check
,
1639 .rebuild_header
= inet6_sk_rebuild_header
,
1640 .sk_rx_dst_set
= inet6_sk_rx_dst_set
,
1641 .conn_request
= tcp_v6_conn_request
,
1642 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1643 .net_header_len
= sizeof(struct ipv6hdr
),
1644 .net_frag_header_len
= sizeof(struct frag_hdr
),
1645 .setsockopt
= ipv6_setsockopt
,
1646 .getsockopt
= ipv6_getsockopt
,
1647 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1648 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1649 #ifdef CONFIG_COMPAT
1650 .compat_setsockopt
= compat_ipv6_setsockopt
,
1651 .compat_getsockopt
= compat_ipv6_getsockopt
,
1653 .mtu_reduced
= tcp_v6_mtu_reduced
,
1656 #ifdef CONFIG_TCP_MD5SIG
1657 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1658 .md5_lookup
= tcp_v6_md5_lookup
,
1659 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1660 .md5_parse
= tcp_v6_parse_md5_keys
,
1665 * TCP over IPv4 via INET6 API
1667 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1668 .queue_xmit
= ip_queue_xmit
,
1669 .send_check
= tcp_v4_send_check
,
1670 .rebuild_header
= inet_sk_rebuild_header
,
1671 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1672 .conn_request
= tcp_v6_conn_request
,
1673 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1674 .net_header_len
= sizeof(struct iphdr
),
1675 .setsockopt
= ipv6_setsockopt
,
1676 .getsockopt
= ipv6_getsockopt
,
1677 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1678 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1679 #ifdef CONFIG_COMPAT
1680 .compat_setsockopt
= compat_ipv6_setsockopt
,
1681 .compat_getsockopt
= compat_ipv6_getsockopt
,
1683 .mtu_reduced
= tcp_v4_mtu_reduced
,
1686 #ifdef CONFIG_TCP_MD5SIG
1687 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1688 .md5_lookup
= tcp_v4_md5_lookup
,
1689 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1690 .md5_parse
= tcp_v6_parse_md5_keys
,
1694 /* NOTE: A lot of things set to zero explicitly by call to
1695 * sk_alloc() so need not be done here.
1697 static int tcp_v6_init_sock(struct sock
*sk
)
1699 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1703 icsk
->icsk_af_ops
= &ipv6_specific
;
1705 #ifdef CONFIG_TCP_MD5SIG
1706 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1712 static void tcp_v6_destroy_sock(struct sock
*sk
)
1714 tcp_v4_destroy_sock(sk
);
1715 inet6_destroy_sock(sk
);
1718 #ifdef CONFIG_PROC_FS
1719 /* Proc filesystem TCPv6 sock list dumping. */
1720 static void get_openreq6(struct seq_file
*seq
,
1721 const struct request_sock
*req
, int i
)
1723 long ttd
= req
->rsk_timer
.expires
- jiffies
;
1724 const struct in6_addr
*src
= &inet_rsk(req
)->ir_v6_loc_addr
;
1725 const struct in6_addr
*dest
= &inet_rsk(req
)->ir_v6_rmt_addr
;
1731 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1732 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1734 src
->s6_addr32
[0], src
->s6_addr32
[1],
1735 src
->s6_addr32
[2], src
->s6_addr32
[3],
1736 inet_rsk(req
)->ir_num
,
1737 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1738 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1739 ntohs(inet_rsk(req
)->ir_rmt_port
),
1741 0, 0, /* could print option size, but that is af dependent. */
1742 1, /* timers active (only the expire timer) */
1743 jiffies_to_clock_t(ttd
),
1745 from_kuid_munged(seq_user_ns(seq
),
1746 sock_i_uid(req
->rsk_listener
)),
1747 0, /* non standard timer */
1748 0, /* open_requests have no inode */
1752 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1754 const struct in6_addr
*dest
, *src
;
1757 unsigned long timer_expires
;
1758 const struct inet_sock
*inet
= inet_sk(sp
);
1759 const struct tcp_sock
*tp
= tcp_sk(sp
);
1760 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1761 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
1765 dest
= &sp
->sk_v6_daddr
;
1766 src
= &sp
->sk_v6_rcv_saddr
;
1767 destp
= ntohs(inet
->inet_dport
);
1768 srcp
= ntohs(inet
->inet_sport
);
1770 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
1771 icsk
->icsk_pending
== ICSK_TIME_REO_TIMEOUT
||
1772 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
1774 timer_expires
= icsk
->icsk_timeout
;
1775 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1777 timer_expires
= icsk
->icsk_timeout
;
1778 } else if (timer_pending(&sp
->sk_timer
)) {
1780 timer_expires
= sp
->sk_timer
.expires
;
1783 timer_expires
= jiffies
;
1786 state
= sk_state_load(sp
);
1787 if (state
== TCP_LISTEN
)
1788 rx_queue
= sp
->sk_ack_backlog
;
1790 /* Because we don't lock the socket,
1791 * we might find a transient negative value.
1793 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
1796 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1797 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1799 src
->s6_addr32
[0], src
->s6_addr32
[1],
1800 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1801 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1802 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1804 tp
->write_seq
- tp
->snd_una
,
1807 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
1808 icsk
->icsk_retransmits
,
1809 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sp
)),
1810 icsk
->icsk_probes_out
,
1812 refcount_read(&sp
->sk_refcnt
), sp
,
1813 jiffies_to_clock_t(icsk
->icsk_rto
),
1814 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1815 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
1817 state
== TCP_LISTEN
?
1818 fastopenq
->max_qlen
:
1819 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
)
1823 static void get_timewait6_sock(struct seq_file
*seq
,
1824 struct inet_timewait_sock
*tw
, int i
)
1826 long delta
= tw
->tw_timer
.expires
- jiffies
;
1827 const struct in6_addr
*dest
, *src
;
1830 dest
= &tw
->tw_v6_daddr
;
1831 src
= &tw
->tw_v6_rcv_saddr
;
1832 destp
= ntohs(tw
->tw_dport
);
1833 srcp
= ntohs(tw
->tw_sport
);
1836 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1837 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1839 src
->s6_addr32
[0], src
->s6_addr32
[1],
1840 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1841 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1842 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1843 tw
->tw_substate
, 0, 0,
1844 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
1845 refcount_read(&tw
->tw_refcnt
), tw
);
1848 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1850 struct tcp_iter_state
*st
;
1851 struct sock
*sk
= v
;
1853 if (v
== SEQ_START_TOKEN
) {
1858 "st tx_queue rx_queue tr tm->when retrnsmt"
1859 " uid timeout inode\n");
1864 if (sk
->sk_state
== TCP_TIME_WAIT
)
1865 get_timewait6_sock(seq
, v
, st
->num
);
1866 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
1867 get_openreq6(seq
, v
, st
->num
);
1869 get_tcp6_sock(seq
, v
, st
->num
);
1874 static const struct file_operations tcp6_afinfo_seq_fops
= {
1875 .owner
= THIS_MODULE
,
1876 .open
= tcp_seq_open
,
1878 .llseek
= seq_lseek
,
1879 .release
= seq_release_net
1882 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1885 .seq_fops
= &tcp6_afinfo_seq_fops
,
1887 .show
= tcp6_seq_show
,
1891 int __net_init
tcp6_proc_init(struct net
*net
)
1893 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
1896 void tcp6_proc_exit(struct net
*net
)
1898 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
1902 struct proto tcpv6_prot
= {
1904 .owner
= THIS_MODULE
,
1906 .connect
= tcp_v6_connect
,
1907 .disconnect
= tcp_disconnect
,
1908 .accept
= inet_csk_accept
,
1910 .init
= tcp_v6_init_sock
,
1911 .destroy
= tcp_v6_destroy_sock
,
1912 .shutdown
= tcp_shutdown
,
1913 .setsockopt
= tcp_setsockopt
,
1914 .getsockopt
= tcp_getsockopt
,
1915 .keepalive
= tcp_set_keepalive
,
1916 .recvmsg
= tcp_recvmsg
,
1917 .sendmsg
= tcp_sendmsg
,
1918 .sendpage
= tcp_sendpage
,
1919 .backlog_rcv
= tcp_v6_do_rcv
,
1920 .release_cb
= tcp_release_cb
,
1922 .unhash
= inet_unhash
,
1923 .get_port
= inet_csk_get_port
,
1924 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1925 .leave_memory_pressure
= tcp_leave_memory_pressure
,
1926 .stream_memory_free
= tcp_stream_memory_free
,
1927 .sockets_allocated
= &tcp_sockets_allocated
,
1928 .memory_allocated
= &tcp_memory_allocated
,
1929 .memory_pressure
= &tcp_memory_pressure
,
1930 .orphan_count
= &tcp_orphan_count
,
1931 .sysctl_mem
= sysctl_tcp_mem
,
1932 .sysctl_wmem
= sysctl_tcp_wmem
,
1933 .sysctl_rmem
= sysctl_tcp_rmem
,
1934 .max_header
= MAX_TCP_HEADER
,
1935 .obj_size
= sizeof(struct tcp6_sock
),
1936 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
1937 .twsk_prot
= &tcp6_timewait_sock_ops
,
1938 .rsk_prot
= &tcp6_request_sock_ops
,
1939 .h
.hashinfo
= &tcp_hashinfo
,
1940 .no_autobind
= true,
1941 #ifdef CONFIG_COMPAT
1942 .compat_setsockopt
= compat_tcp_setsockopt
,
1943 .compat_getsockopt
= compat_tcp_getsockopt
,
1945 .diag_destroy
= tcp_abort
,
1948 static struct inet6_protocol tcpv6_protocol
= {
1949 .early_demux
= tcp_v6_early_demux
,
1950 .early_demux_handler
= tcp_v6_early_demux
,
1951 .handler
= tcp_v6_rcv
,
1952 .err_handler
= tcp_v6_err
,
1953 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1956 static struct inet_protosw tcpv6_protosw
= {
1957 .type
= SOCK_STREAM
,
1958 .protocol
= IPPROTO_TCP
,
1959 .prot
= &tcpv6_prot
,
1960 .ops
= &inet6_stream_ops
,
1961 .flags
= INET_PROTOSW_PERMANENT
|
1965 static int __net_init
tcpv6_net_init(struct net
*net
)
1967 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
1968 SOCK_RAW
, IPPROTO_TCP
, net
);
1971 static void __net_exit
tcpv6_net_exit(struct net
*net
)
1973 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
1976 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
1978 inet_twsk_purge(&tcp_hashinfo
, AF_INET6
);
1981 static struct pernet_operations tcpv6_net_ops
= {
1982 .init
= tcpv6_net_init
,
1983 .exit
= tcpv6_net_exit
,
1984 .exit_batch
= tcpv6_net_exit_batch
,
1987 int __init
tcpv6_init(void)
1991 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1995 /* register inet6 protocol */
1996 ret
= inet6_register_protosw(&tcpv6_protosw
);
1998 goto out_tcpv6_protocol
;
2000 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2002 goto out_tcpv6_protosw
;
2007 inet6_unregister_protosw(&tcpv6_protosw
);
2009 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2013 void tcpv6_exit(void)
2015 unregister_pernet_subsys(&tcpv6_net_ops
);
2016 inet6_unregister_protosw(&tcpv6_protosw
);
2017 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);