3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
74 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
75 struct request_sock
*req
);
77 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
79 static const struct inet_connection_sock_af_ops ipv6_mapped
;
80 static const struct inet_connection_sock_af_ops ipv6_specific
;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
85 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
86 const struct in6_addr
*addr
)
92 static void inet6_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
94 struct dst_entry
*dst
= skb_dst(skb
);
96 if (dst
&& dst_hold_safe(dst
)) {
97 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
100 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
102 inet6_sk(sk
)->rx_dst_cookie
= rt
->rt6i_node
->fn_sernum
;
106 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
108 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
109 ipv6_hdr(skb
)->saddr
.s6_addr32
,
111 tcp_hdr(skb
)->source
);
114 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
117 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
118 struct inet_sock
*inet
= inet_sk(sk
);
119 struct inet_connection_sock
*icsk
= inet_csk(sk
);
120 struct ipv6_pinfo
*np
= inet6_sk(sk
);
121 struct tcp_sock
*tp
= tcp_sk(sk
);
122 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
123 struct ipv6_txoptions
*opt
;
126 struct dst_entry
*dst
;
130 if (addr_len
< SIN6_LEN_RFC2133
)
133 if (usin
->sin6_family
!= AF_INET6
)
134 return -EAFNOSUPPORT
;
136 memset(&fl6
, 0, sizeof(fl6
));
139 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
140 IP6_ECN_flow_init(fl6
.flowlabel
);
141 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
142 struct ip6_flowlabel
*flowlabel
;
143 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
146 fl6_sock_release(flowlabel
);
151 * connect() to INADDR_ANY means loopback (BSD'ism).
154 if (ipv6_addr_any(&usin
->sin6_addr
))
155 usin
->sin6_addr
.s6_addr
[15] = 0x1;
157 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
159 if (addr_type
& IPV6_ADDR_MULTICAST
)
162 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
163 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
164 usin
->sin6_scope_id
) {
165 /* If interface is set while binding, indices
168 if (sk
->sk_bound_dev_if
&&
169 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
172 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
175 /* Connect to link-local address requires an interface */
176 if (!sk
->sk_bound_dev_if
)
180 if (tp
->rx_opt
.ts_recent_stamp
&&
181 !ipv6_addr_equal(&sk
->sk_v6_daddr
, &usin
->sin6_addr
)) {
182 tp
->rx_opt
.ts_recent
= 0;
183 tp
->rx_opt
.ts_recent_stamp
= 0;
187 sk
->sk_v6_daddr
= usin
->sin6_addr
;
188 np
->flow_label
= fl6
.flowlabel
;
194 if (addr_type
== IPV6_ADDR_MAPPED
) {
195 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
196 struct sockaddr_in sin
;
198 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
200 if (__ipv6_only_sock(sk
))
203 sin
.sin_family
= AF_INET
;
204 sin
.sin_port
= usin
->sin6_port
;
205 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
207 icsk
->icsk_af_ops
= &ipv6_mapped
;
208 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
209 #ifdef CONFIG_TCP_MD5SIG
210 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
213 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
216 icsk
->icsk_ext_hdr_len
= exthdrlen
;
217 icsk
->icsk_af_ops
= &ipv6_specific
;
218 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
219 #ifdef CONFIG_TCP_MD5SIG
220 tp
->af_specific
= &tcp_sock_ipv6_specific
;
224 np
->saddr
= sk
->sk_v6_rcv_saddr
;
229 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
))
230 saddr
= &sk
->sk_v6_rcv_saddr
;
232 fl6
.flowi6_proto
= IPPROTO_TCP
;
233 fl6
.daddr
= sk
->sk_v6_daddr
;
234 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
235 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
236 fl6
.flowi6_mark
= sk
->sk_mark
;
237 fl6
.fl6_dport
= usin
->sin6_port
;
238 fl6
.fl6_sport
= inet
->inet_sport
;
240 opt
= rcu_dereference_protected(np
->opt
, sock_owned_by_user(sk
));
241 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
243 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
245 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
);
253 sk
->sk_v6_rcv_saddr
= *saddr
;
256 /* set the source address */
258 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
260 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
261 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
263 rt
= (struct rt6_info
*) dst
;
264 if (tcp_death_row
.sysctl_tw_recycle
&&
265 !tp
->rx_opt
.ts_recent_stamp
&&
266 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, &sk
->sk_v6_daddr
))
267 tcp_fetch_timewait_stamp(sk
, dst
);
269 icsk
->icsk_ext_hdr_len
= 0;
271 icsk
->icsk_ext_hdr_len
= opt
->opt_flen
+
274 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
276 inet
->inet_dport
= usin
->sin6_port
;
278 tcp_set_state(sk
, TCP_SYN_SENT
);
279 err
= inet6_hash_connect(&tcp_death_row
, sk
);
285 if (!tp
->write_seq
&& likely(!tp
->repair
))
286 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
287 sk
->sk_v6_daddr
.s6_addr32
,
291 err
= tcp_connect(sk
);
298 tcp_set_state(sk
, TCP_CLOSE
);
301 inet
->inet_dport
= 0;
302 sk
->sk_route_caps
= 0;
306 static void tcp_v6_mtu_reduced(struct sock
*sk
)
308 struct dst_entry
*dst
;
310 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
313 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
317 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
318 tcp_sync_mss(sk
, dst_mtu(dst
));
319 tcp_simple_retransmit(sk
);
323 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
324 u8 type
, u8 code
, int offset
, __be32 info
)
326 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
327 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
328 struct net
*net
= dev_net(skb
->dev
);
329 struct request_sock
*fastopen
;
330 struct ipv6_pinfo
*np
;
336 sk
= __inet6_lookup_established(net
, &tcp_hashinfo
,
337 &hdr
->daddr
, th
->dest
,
338 &hdr
->saddr
, ntohs(th
->source
),
342 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
347 if (sk
->sk_state
== TCP_TIME_WAIT
) {
348 inet_twsk_put(inet_twsk(sk
));
351 seq
= ntohl(th
->seq
);
352 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
353 return tcp_req_err(sk
, seq
);
356 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
357 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
359 if (sk
->sk_state
== TCP_CLOSE
)
362 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
363 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
368 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
369 fastopen
= tp
->fastopen_rsk
;
370 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
371 if (sk
->sk_state
!= TCP_LISTEN
&&
372 !between(seq
, snd_una
, tp
->snd_nxt
)) {
373 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
379 if (type
== NDISC_REDIRECT
) {
380 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
383 dst
->ops
->redirect(dst
, sk
, skb
);
387 if (type
== ICMPV6_PKT_TOOBIG
) {
388 /* We are not interested in TCP_LISTEN and open_requests
389 * (SYN-ACKs send out by Linux are always <576bytes so
390 * they should go through unfragmented).
392 if (sk
->sk_state
== TCP_LISTEN
)
395 if (!ip6_sk_accept_pmtu(sk
))
398 tp
->mtu_info
= ntohl(info
);
399 if (!sock_owned_by_user(sk
))
400 tcp_v6_mtu_reduced(sk
);
401 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
,
407 icmpv6_err_convert(type
, code
, &err
);
409 /* Might be for an request_sock */
410 switch (sk
->sk_state
) {
413 /* Only in fast or simultaneous open. If a fast open socket is
414 * is already accepted it is treated as a connected one below.
416 if (fastopen
&& !fastopen
->sk
)
419 if (!sock_owned_by_user(sk
)) {
421 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
425 sk
->sk_err_soft
= err
;
429 if (!sock_owned_by_user(sk
) && np
->recverr
) {
431 sk
->sk_error_report(sk
);
433 sk
->sk_err_soft
= err
;
441 static int tcp_v6_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
443 struct request_sock
*req
,
445 struct tcp_fastopen_cookie
*foc
)
447 struct inet_request_sock
*ireq
= inet_rsk(req
);
448 struct ipv6_pinfo
*np
= inet6_sk(sk
);
449 struct flowi6
*fl6
= &fl
->u
.ip6
;
453 /* First, grab a route. */
454 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
)) == NULL
)
457 skb
= tcp_make_synack(sk
, dst
, req
, foc
);
460 __tcp_v6_send_check(skb
, &ireq
->ir_v6_loc_addr
,
461 &ireq
->ir_v6_rmt_addr
);
463 fl6
->daddr
= ireq
->ir_v6_rmt_addr
;
464 if (np
->repflow
&& ireq
->pktopts
)
465 fl6
->flowlabel
= ip6_flowlabel(ipv6_hdr(ireq
->pktopts
));
467 skb_set_queue_mapping(skb
, queue_mapping
);
469 err
= ip6_xmit(sk
, skb
, fl6
, rcu_dereference(np
->opt
),
472 err
= net_xmit_eval(err
);
480 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
482 kfree_skb(inet_rsk(req
)->pktopts
);
485 #ifdef CONFIG_TCP_MD5SIG
486 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
487 const struct in6_addr
*addr
)
489 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
492 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
493 const struct sock
*addr_sk
)
495 return tcp_v6_md5_do_lookup(sk
, &addr_sk
->sk_v6_daddr
);
498 static int tcp_v6_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
501 struct tcp_md5sig cmd
;
502 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
504 if (optlen
< sizeof(cmd
))
507 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
510 if (sin6
->sin6_family
!= AF_INET6
)
513 if (!cmd
.tcpm_keylen
) {
514 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
515 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
517 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
521 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
524 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
525 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
526 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
528 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
529 AF_INET6
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
532 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
533 const struct in6_addr
*daddr
,
534 const struct in6_addr
*saddr
, int nbytes
)
536 struct tcp6_pseudohdr
*bp
;
537 struct scatterlist sg
;
539 bp
= &hp
->md5_blk
.ip6
;
540 /* 1. TCP pseudo-header (RFC2460) */
543 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
544 bp
->len
= cpu_to_be32(nbytes
);
546 sg_init_one(&sg
, bp
, sizeof(*bp
));
547 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
550 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
551 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
552 const struct tcphdr
*th
)
554 struct tcp_md5sig_pool
*hp
;
555 struct hash_desc
*desc
;
557 hp
= tcp_get_md5sig_pool();
559 goto clear_hash_noput
;
560 desc
= &hp
->md5_desc
;
562 if (crypto_hash_init(desc
))
564 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
566 if (tcp_md5_hash_header(hp
, th
))
568 if (tcp_md5_hash_key(hp
, key
))
570 if (crypto_hash_final(desc
, md5_hash
))
573 tcp_put_md5sig_pool();
577 tcp_put_md5sig_pool();
579 memset(md5_hash
, 0, 16);
583 static int tcp_v6_md5_hash_skb(char *md5_hash
,
584 const struct tcp_md5sig_key
*key
,
585 const struct sock
*sk
,
586 const struct sk_buff
*skb
)
588 const struct in6_addr
*saddr
, *daddr
;
589 struct tcp_md5sig_pool
*hp
;
590 struct hash_desc
*desc
;
591 const struct tcphdr
*th
= tcp_hdr(skb
);
593 if (sk
) { /* valid for establish/request sockets */
594 saddr
= &sk
->sk_v6_rcv_saddr
;
595 daddr
= &sk
->sk_v6_daddr
;
597 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
598 saddr
= &ip6h
->saddr
;
599 daddr
= &ip6h
->daddr
;
602 hp
= tcp_get_md5sig_pool();
604 goto clear_hash_noput
;
605 desc
= &hp
->md5_desc
;
607 if (crypto_hash_init(desc
))
610 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
612 if (tcp_md5_hash_header(hp
, th
))
614 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
616 if (tcp_md5_hash_key(hp
, key
))
618 if (crypto_hash_final(desc
, md5_hash
))
621 tcp_put_md5sig_pool();
625 tcp_put_md5sig_pool();
627 memset(md5_hash
, 0, 16);
631 static bool tcp_v6_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
633 const __u8
*hash_location
= NULL
;
634 struct tcp_md5sig_key
*hash_expected
;
635 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
636 const struct tcphdr
*th
= tcp_hdr(skb
);
640 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
641 hash_location
= tcp_parse_md5sig_option(th
);
643 /* We've parsed the options - do we have a hash? */
644 if (!hash_expected
&& !hash_location
)
647 if (hash_expected
&& !hash_location
) {
648 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
652 if (!hash_expected
&& hash_location
) {
653 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
657 /* check the signature */
658 genhash
= tcp_v6_md5_hash_skb(newhash
,
662 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
663 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
664 genhash
? "failed" : "mismatch",
665 &ip6h
->saddr
, ntohs(th
->source
),
666 &ip6h
->daddr
, ntohs(th
->dest
));
673 static void tcp_v6_init_req(struct request_sock
*req
, struct sock
*sk
,
676 struct inet_request_sock
*ireq
= inet_rsk(req
);
677 struct ipv6_pinfo
*np
= inet6_sk(sk
);
679 ireq
->ir_v6_rmt_addr
= ipv6_hdr(skb
)->saddr
;
680 ireq
->ir_v6_loc_addr
= ipv6_hdr(skb
)->daddr
;
682 /* So that link locals have meaning */
683 if (!sk
->sk_bound_dev_if
&&
684 ipv6_addr_type(&ireq
->ir_v6_rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
685 ireq
->ir_iif
= tcp_v6_iif(skb
);
687 if (!TCP_SKB_CB(skb
)->tcp_tw_isn
&&
688 (ipv6_opt_accepted(sk
, skb
, &TCP_SKB_CB(skb
)->header
.h6
) ||
689 np
->rxopt
.bits
.rxinfo
||
690 np
->rxopt
.bits
.rxoinfo
|| np
->rxopt
.bits
.rxhlim
||
691 np
->rxopt
.bits
.rxohlim
|| np
->repflow
)) {
692 atomic_inc(&skb
->users
);
697 static struct dst_entry
*tcp_v6_route_req(struct sock
*sk
, struct flowi
*fl
,
698 const struct request_sock
*req
,
703 return inet6_csk_route_req(sk
, &fl
->u
.ip6
, req
);
706 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
708 .obj_size
= sizeof(struct tcp6_request_sock
),
709 .rtx_syn_ack
= tcp_rtx_synack
,
710 .send_ack
= tcp_v6_reqsk_send_ack
,
711 .destructor
= tcp_v6_reqsk_destructor
,
712 .send_reset
= tcp_v6_send_reset
,
713 .syn_ack_timeout
= tcp_syn_ack_timeout
,
716 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
717 .mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) -
718 sizeof(struct ipv6hdr
),
719 #ifdef CONFIG_TCP_MD5SIG
720 .req_md5_lookup
= tcp_v6_md5_lookup
,
721 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
723 .init_req
= tcp_v6_init_req
,
724 #ifdef CONFIG_SYN_COOKIES
725 .cookie_init_seq
= cookie_v6_init_sequence
,
727 .route_req
= tcp_v6_route_req
,
728 .init_seq
= tcp_v6_init_sequence
,
729 .send_synack
= tcp_v6_send_synack
,
730 .queue_hash_add
= inet6_csk_reqsk_queue_hash_add
,
733 static void tcp_v6_send_response(struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
734 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
,
735 int oif
, struct tcp_md5sig_key
*key
, int rst
,
736 u8 tclass
, u32 label
)
738 const struct tcphdr
*th
= tcp_hdr(skb
);
740 struct sk_buff
*buff
;
742 struct net
*net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
743 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
744 unsigned int tot_len
= sizeof(struct tcphdr
);
745 struct dst_entry
*dst
;
749 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
750 #ifdef CONFIG_TCP_MD5SIG
752 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
755 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
760 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
762 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
763 skb_reset_transport_header(buff
);
765 /* Swap the send and the receive. */
766 memset(t1
, 0, sizeof(*t1
));
767 t1
->dest
= th
->source
;
768 t1
->source
= th
->dest
;
769 t1
->doff
= tot_len
/ 4;
770 t1
->seq
= htonl(seq
);
771 t1
->ack_seq
= htonl(ack
);
772 t1
->ack
= !rst
|| !th
->ack
;
774 t1
->window
= htons(win
);
776 topt
= (__be32
*)(t1
+ 1);
779 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
780 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
781 *topt
++ = htonl(tsval
);
782 *topt
++ = htonl(tsecr
);
785 #ifdef CONFIG_TCP_MD5SIG
787 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
788 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
789 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
790 &ipv6_hdr(skb
)->saddr
,
791 &ipv6_hdr(skb
)->daddr
, t1
);
795 memset(&fl6
, 0, sizeof(fl6
));
796 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
797 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
798 fl6
.flowlabel
= label
;
800 buff
->ip_summed
= CHECKSUM_PARTIAL
;
803 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
805 fl6
.flowi6_proto
= IPPROTO_TCP
;
806 if (rt6_need_strict(&fl6
.daddr
) && !oif
)
807 fl6
.flowi6_oif
= tcp_v6_iif(skb
);
809 fl6
.flowi6_oif
= oif
;
810 fl6
.flowi6_mark
= IP6_REPLY_MARK(net
, skb
->mark
);
811 fl6
.fl6_dport
= t1
->dest
;
812 fl6
.fl6_sport
= t1
->source
;
813 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
815 /* Pass a socket to ip6_dst_lookup either it is for RST
816 * Underlying function will use this to retrieve the network
819 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
);
821 skb_dst_set(buff
, dst
);
822 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
, tclass
);
823 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
825 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
832 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
834 const struct tcphdr
*th
= tcp_hdr(skb
);
835 u32 seq
= 0, ack_seq
= 0;
836 struct tcp_md5sig_key
*key
= NULL
;
837 #ifdef CONFIG_TCP_MD5SIG
838 const __u8
*hash_location
= NULL
;
839 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
840 unsigned char newhash
[16];
842 struct sock
*sk1
= NULL
;
849 /* If sk not NULL, it means we did a successful lookup and incoming
850 * route had to be correct. prequeue might have dropped our dst.
852 if (!sk
&& !ipv6_unicast_destination(skb
))
855 #ifdef CONFIG_TCP_MD5SIG
856 hash_location
= tcp_parse_md5sig_option(th
);
857 if (!sk
&& hash_location
) {
859 * active side is lost. Try to find listening socket through
860 * source port, and then find md5 key through listening socket.
861 * we are not loose security here:
862 * Incoming packet is checked with md5 hash with finding key,
863 * no RST generated if md5 hash doesn't match.
865 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
866 &tcp_hashinfo
, &ipv6h
->saddr
,
867 th
->source
, &ipv6h
->daddr
,
868 ntohs(th
->source
), tcp_v6_iif(skb
));
873 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
877 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, skb
);
878 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
881 key
= sk
? tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
) : NULL
;
886 seq
= ntohl(th
->ack_seq
);
888 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
891 oif
= sk
? sk
->sk_bound_dev_if
: 0;
892 tcp_v6_send_response(sk
, skb
, seq
, ack_seq
, 0, 0, 0, oif
, key
, 1, 0, 0);
894 #ifdef CONFIG_TCP_MD5SIG
903 static void tcp_v6_send_ack(struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
904 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
, int oif
,
905 struct tcp_md5sig_key
*key
, u8 tclass
,
908 tcp_v6_send_response(sk
, skb
, seq
, ack
, win
, tsval
, tsecr
, oif
, key
, 0,
912 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
914 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
915 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
917 tcp_v6_send_ack(sk
, skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
918 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
919 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
920 tcptw
->tw_ts_recent
, tw
->tw_bound_dev_if
, tcp_twsk_md5_key(tcptw
),
921 tw
->tw_tclass
, cpu_to_be32(tw
->tw_flowlabel
));
926 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
927 struct request_sock
*req
)
929 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
930 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
932 tcp_v6_send_ack(sk
, skb
, (sk
->sk_state
== TCP_LISTEN
) ?
933 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
934 tcp_rsk(req
)->rcv_nxt
, req
->rcv_wnd
,
935 tcp_time_stamp
, req
->ts_recent
, sk
->sk_bound_dev_if
,
936 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
),
941 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
, struct sk_buff
*skb
)
943 const struct tcphdr
*th
= tcp_hdr(skb
);
944 struct request_sock
*req
;
947 /* Find possible connection requests. */
948 req
= inet6_csk_search_req(sk
, th
->source
,
949 &ipv6_hdr(skb
)->saddr
,
950 &ipv6_hdr(skb
)->daddr
, tcp_v6_iif(skb
));
952 nsk
= tcp_check_req(sk
, skb
, req
, false);
953 if (!nsk
|| nsk
== sk
)
957 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
958 &ipv6_hdr(skb
)->saddr
, th
->source
,
959 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
),
963 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
967 inet_twsk_put(inet_twsk(nsk
));
971 #ifdef CONFIG_SYN_COOKIES
973 sk
= cookie_v6_check(sk
, skb
);
978 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
980 if (skb
->protocol
== htons(ETH_P_IP
))
981 return tcp_v4_conn_request(sk
, skb
);
983 if (!ipv6_unicast_destination(skb
))
986 return tcp_conn_request(&tcp6_request_sock_ops
,
987 &tcp_request_sock_ipv6_ops
, sk
, skb
);
990 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
991 return 0; /* don't send reset */
994 static struct sock
*tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
995 struct request_sock
*req
,
996 struct dst_entry
*dst
)
998 struct inet_request_sock
*ireq
;
999 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1000 struct tcp6_sock
*newtcp6sk
;
1001 struct ipv6_txoptions
*opt
;
1002 struct inet_sock
*newinet
;
1003 struct tcp_sock
*newtp
;
1005 #ifdef CONFIG_TCP_MD5SIG
1006 struct tcp_md5sig_key
*key
;
1010 if (skb
->protocol
== htons(ETH_P_IP
)) {
1015 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1020 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1021 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1023 newinet
= inet_sk(newsk
);
1024 newnp
= inet6_sk(newsk
);
1025 newtp
= tcp_sk(newsk
);
1027 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1029 newnp
->saddr
= newsk
->sk_v6_rcv_saddr
;
1031 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1032 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1033 #ifdef CONFIG_TCP_MD5SIG
1034 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1037 newnp
->ipv6_ac_list
= NULL
;
1038 newnp
->ipv6_fl_list
= NULL
;
1039 newnp
->pktoptions
= NULL
;
1041 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1042 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1043 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1045 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1048 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1049 * here, tcp_create_openreq_child now does this for us, see the comment in
1050 * that function for the gory details. -acme
1053 /* It is tricky place. Until this moment IPv4 tcp
1054 worked with IPv6 icsk.icsk_af_ops.
1057 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1062 ireq
= inet_rsk(req
);
1064 if (sk_acceptq_is_full(sk
))
1068 dst
= inet6_csk_route_req(sk
, &fl6
, req
);
1073 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1078 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1079 * count here, tcp_create_openreq_child now does this for us, see the
1080 * comment in that function for the gory details. -acme
1083 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1084 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1085 inet6_sk_rx_dst_set(newsk
, skb
);
1087 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1088 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1090 newtp
= tcp_sk(newsk
);
1091 newinet
= inet_sk(newsk
);
1092 newnp
= inet6_sk(newsk
);
1094 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1096 newsk
->sk_v6_daddr
= ireq
->ir_v6_rmt_addr
;
1097 newnp
->saddr
= ireq
->ir_v6_loc_addr
;
1098 newsk
->sk_v6_rcv_saddr
= ireq
->ir_v6_loc_addr
;
1099 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1101 ip6_set_txhash(newsk
);
1103 /* Now IPv6 options...
1105 First: no IPv4 options.
1107 newinet
->inet_opt
= NULL
;
1108 newnp
->ipv6_ac_list
= NULL
;
1109 newnp
->ipv6_fl_list
= NULL
;
1112 newnp
->rxopt
.all
= np
->rxopt
.all
;
1114 /* Clone pktoptions received with SYN */
1115 newnp
->pktoptions
= NULL
;
1116 if (ireq
->pktopts
) {
1117 newnp
->pktoptions
= skb_clone(ireq
->pktopts
,
1118 sk_gfp_atomic(sk
, GFP_ATOMIC
));
1119 consume_skb(ireq
->pktopts
);
1120 ireq
->pktopts
= NULL
;
1121 if (newnp
->pktoptions
)
1122 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1125 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1126 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1127 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1129 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1131 /* Clone native IPv6 options from listening socket (if any)
1133 Yes, keeping reference count would be much more clever,
1134 but we make one more one thing there: reattach optmem
1137 opt
= rcu_dereference(np
->opt
);
1139 opt
= ipv6_dup_options(newsk
, opt
);
1140 RCU_INIT_POINTER(newnp
->opt
, opt
);
1142 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1144 inet_csk(newsk
)->icsk_ext_hdr_len
= opt
->opt_nflen
+
1147 tcp_ca_openreq_child(newsk
, dst
);
1149 tcp_sync_mss(newsk
, dst_mtu(dst
));
1150 newtp
->advmss
= dst_metric_advmss(dst
);
1151 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1152 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1153 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1155 tcp_initialize_rcv_mss(newsk
);
1157 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1158 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1160 #ifdef CONFIG_TCP_MD5SIG
1161 /* Copy over the MD5 key from the original socket */
1162 key
= tcp_v6_md5_do_lookup(sk
, &newsk
->sk_v6_daddr
);
1164 /* We're using one, so create a matching key
1165 * on the newsk structure. If we fail to get
1166 * memory, then we end up not copying the key
1169 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newsk
->sk_v6_daddr
,
1170 AF_INET6
, key
->key
, key
->keylen
,
1171 sk_gfp_atomic(sk
, GFP_ATOMIC
));
1175 if (__inet_inherit_port(sk
, newsk
) < 0) {
1176 inet_csk_prepare_forced_close(newsk
);
1180 __inet_hash(newsk
, NULL
);
1185 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1189 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1193 /* The socket must have it's spinlock held when we get
1196 * We have a potential double-lock case here, so even when
1197 * doing backlog processing we use the BH locking scheme.
1198 * This is because we cannot sleep with the original spinlock
1201 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1203 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1204 struct tcp_sock
*tp
;
1205 struct sk_buff
*opt_skb
= NULL
;
1207 /* Imagine: socket is IPv6. IPv4 packet arrives,
1208 goes to IPv4 receive handler and backlogged.
1209 From backlog it always goes here. Kerboom...
1210 Fortunately, tcp_rcv_established and rcv_established
1211 handle them correctly, but it is not case with
1212 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1215 if (skb
->protocol
== htons(ETH_P_IP
))
1216 return tcp_v4_do_rcv(sk
, skb
);
1218 if (sk_filter(sk
, skb
))
1222 * socket locking is here for SMP purposes as backlog rcv
1223 * is currently called with bh processing disabled.
1226 /* Do Stevens' IPV6_PKTOPTIONS.
1228 Yes, guys, it is the only place in our code, where we
1229 may make it not affecting IPv4.
1230 The rest of code is protocol independent,
1231 and I do not like idea to uglify IPv4.
1233 Actually, all the idea behind IPV6_PKTOPTIONS
1234 looks not very well thought. For now we latch
1235 options, received in the last packet, enqueued
1236 by tcp. Feel free to propose better solution.
1240 opt_skb
= skb_clone(skb
, sk_gfp_atomic(sk
, GFP_ATOMIC
));
1242 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1243 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1245 sock_rps_save_rxhash(sk
, skb
);
1246 sk_mark_napi_id(sk
, skb
);
1248 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1249 dst
->ops
->check(dst
, np
->rx_dst_cookie
) == NULL
) {
1251 sk
->sk_rx_dst
= NULL
;
1255 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1257 goto ipv6_pktoptions
;
1261 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1264 if (sk
->sk_state
== TCP_LISTEN
) {
1265 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1270 * Queue it on the new socket if the new socket is active,
1271 * otherwise we just shortcircuit this and continue with
1275 sock_rps_save_rxhash(nsk
, skb
);
1276 sk_mark_napi_id(sk
, skb
);
1277 if (tcp_child_process(sk
, nsk
, skb
))
1280 __kfree_skb(opt_skb
);
1284 sock_rps_save_rxhash(sk
, skb
);
1286 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1289 goto ipv6_pktoptions
;
1293 tcp_v6_send_reset(sk
, skb
);
1296 __kfree_skb(opt_skb
);
1300 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1301 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1306 /* Do you ask, what is it?
1308 1. skb was enqueued by tcp.
1309 2. skb is added to tail of read queue, rather than out of order.
1310 3. socket is not in passive state.
1311 4. Finally, it really contains options, which user wants to receive.
1314 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1315 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1316 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1317 np
->mcast_oif
= tcp_v6_iif(opt_skb
);
1318 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1319 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1320 if (np
->rxopt
.bits
.rxflow
|| np
->rxopt
.bits
.rxtclass
)
1321 np
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(opt_skb
));
1323 np
->flow_label
= ip6_flowlabel(ipv6_hdr(opt_skb
));
1324 if (ipv6_opt_accepted(sk
, opt_skb
, &TCP_SKB_CB(opt_skb
)->header
.h6
)) {
1325 skb_set_owner_r(opt_skb
, sk
);
1326 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1328 __kfree_skb(opt_skb
);
1329 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1337 static void tcp_v6_fill_cb(struct sk_buff
*skb
, const struct ipv6hdr
*hdr
,
1338 const struct tcphdr
*th
)
1340 /* This is tricky: we move IP6CB at its correct location into
1341 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1342 * _decode_session6() uses IP6CB().
1343 * barrier() makes sure compiler won't play aliasing games.
1345 memmove(&TCP_SKB_CB(skb
)->header
.h6
, IP6CB(skb
),
1346 sizeof(struct inet6_skb_parm
));
1349 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1350 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1351 skb
->len
- th
->doff
*4);
1352 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1353 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1354 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1355 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1356 TCP_SKB_CB(skb
)->sacked
= 0;
1359 static void tcp_v6_restore_cb(struct sk_buff
*skb
)
1361 /* We need to move header back to the beginning if xfrm6_policy_check()
1362 * and tcp_v6_fill_cb() are going to be called again.
1364 memmove(IP6CB(skb
), &TCP_SKB_CB(skb
)->header
.h6
,
1365 sizeof(struct inet6_skb_parm
));
1368 static int tcp_v6_rcv(struct sk_buff
*skb
)
1370 const struct tcphdr
*th
;
1371 const struct ipv6hdr
*hdr
;
1374 struct net
*net
= dev_net(skb
->dev
);
1376 if (skb
->pkt_type
!= PACKET_HOST
)
1380 * Count it even if it's bad.
1382 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1384 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1389 if (th
->doff
< sizeof(struct tcphdr
)/4)
1391 if (!pskb_may_pull(skb
, th
->doff
*4))
1394 if (skb_checksum_init(skb
, IPPROTO_TCP
, ip6_compute_pseudo
))
1398 hdr
= ipv6_hdr(skb
);
1400 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
,
1406 if (sk
->sk_state
== TCP_TIME_WAIT
)
1409 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1410 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1411 goto discard_and_relse
;
1414 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1415 goto discard_and_relse
;
1417 tcp_v6_fill_cb(skb
, hdr
, th
);
1419 #ifdef CONFIG_TCP_MD5SIG
1420 if (tcp_v6_inbound_md5_hash(sk
, skb
))
1421 goto discard_and_relse
;
1424 if (sk_filter(sk
, skb
))
1425 goto discard_and_relse
;
1427 sk_incoming_cpu_update(sk
);
1430 bh_lock_sock_nested(sk
);
1432 if (!sock_owned_by_user(sk
)) {
1433 if (!tcp_prequeue(sk
, skb
))
1434 ret
= tcp_v6_do_rcv(sk
, skb
);
1435 } else if (unlikely(sk_add_backlog(sk
, skb
,
1436 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1438 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1439 goto discard_and_relse
;
1444 return ret
? -1 : 0;
1447 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1450 tcp_v6_fill_cb(skb
, hdr
, th
);
1452 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1454 TCP_INC_STATS_BH(net
, TCP_MIB_CSUMERRORS
);
1456 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1458 tcp_v6_send_reset(NULL
, skb
);
1470 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1471 inet_twsk_put(inet_twsk(sk
));
1475 tcp_v6_fill_cb(skb
, hdr
, th
);
1477 if (skb
->len
< (th
->doff
<<2)) {
1478 inet_twsk_put(inet_twsk(sk
));
1481 if (tcp_checksum_complete(skb
)) {
1482 inet_twsk_put(inet_twsk(sk
));
1486 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1491 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1492 &ipv6_hdr(skb
)->saddr
, th
->source
,
1493 &ipv6_hdr(skb
)->daddr
,
1494 ntohs(th
->dest
), tcp_v6_iif(skb
));
1496 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1497 inet_twsk_deschedule(tw
);
1500 tcp_v6_restore_cb(skb
);
1503 /* Fall through to ACK */
1506 tcp_v6_timewait_ack(sk
, skb
);
1509 tcp_v6_restore_cb(skb
);
1511 case TCP_TW_SUCCESS
:
1517 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1519 const struct ipv6hdr
*hdr
;
1520 const struct tcphdr
*th
;
1523 if (skb
->pkt_type
!= PACKET_HOST
)
1526 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1529 hdr
= ipv6_hdr(skb
);
1532 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1535 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1536 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1537 &hdr
->saddr
, th
->source
,
1538 &hdr
->daddr
, ntohs(th
->dest
),
1542 skb
->destructor
= sock_edemux
;
1543 if (sk_fullsock(sk
)) {
1544 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1547 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1549 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1550 skb_dst_set_noref(skb
, dst
);
1555 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1556 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1557 .twsk_unique
= tcp_twsk_unique
,
1558 .twsk_destructor
= tcp_twsk_destructor
,
1561 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1562 .queue_xmit
= inet6_csk_xmit
,
1563 .send_check
= tcp_v6_send_check
,
1564 .rebuild_header
= inet6_sk_rebuild_header
,
1565 .sk_rx_dst_set
= inet6_sk_rx_dst_set
,
1566 .conn_request
= tcp_v6_conn_request
,
1567 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1568 .net_header_len
= sizeof(struct ipv6hdr
),
1569 .net_frag_header_len
= sizeof(struct frag_hdr
),
1570 .setsockopt
= ipv6_setsockopt
,
1571 .getsockopt
= ipv6_getsockopt
,
1572 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1573 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1574 .bind_conflict
= inet6_csk_bind_conflict
,
1575 #ifdef CONFIG_COMPAT
1576 .compat_setsockopt
= compat_ipv6_setsockopt
,
1577 .compat_getsockopt
= compat_ipv6_getsockopt
,
1579 .mtu_reduced
= tcp_v6_mtu_reduced
,
1582 #ifdef CONFIG_TCP_MD5SIG
1583 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1584 .md5_lookup
= tcp_v6_md5_lookup
,
1585 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1586 .md5_parse
= tcp_v6_parse_md5_keys
,
1591 * TCP over IPv4 via INET6 API
1593 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1594 .queue_xmit
= ip_queue_xmit
,
1595 .send_check
= tcp_v4_send_check
,
1596 .rebuild_header
= inet_sk_rebuild_header
,
1597 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1598 .conn_request
= tcp_v6_conn_request
,
1599 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1600 .net_header_len
= sizeof(struct iphdr
),
1601 .setsockopt
= ipv6_setsockopt
,
1602 .getsockopt
= ipv6_getsockopt
,
1603 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1604 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1605 .bind_conflict
= inet6_csk_bind_conflict
,
1606 #ifdef CONFIG_COMPAT
1607 .compat_setsockopt
= compat_ipv6_setsockopt
,
1608 .compat_getsockopt
= compat_ipv6_getsockopt
,
1610 .mtu_reduced
= tcp_v4_mtu_reduced
,
1613 #ifdef CONFIG_TCP_MD5SIG
1614 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1615 .md5_lookup
= tcp_v4_md5_lookup
,
1616 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1617 .md5_parse
= tcp_v6_parse_md5_keys
,
1621 /* NOTE: A lot of things set to zero explicitly by call to
1622 * sk_alloc() so need not be done here.
1624 static int tcp_v6_init_sock(struct sock
*sk
)
1626 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1630 icsk
->icsk_af_ops
= &ipv6_specific
;
1632 #ifdef CONFIG_TCP_MD5SIG
1633 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1639 static void tcp_v6_destroy_sock(struct sock
*sk
)
1641 tcp_v4_destroy_sock(sk
);
1642 inet6_destroy_sock(sk
);
1645 #ifdef CONFIG_PROC_FS
1646 /* Proc filesystem TCPv6 sock list dumping. */
1647 static void get_openreq6(struct seq_file
*seq
,
1648 struct request_sock
*req
, int i
, kuid_t uid
)
1650 long ttd
= req
->rsk_timer
.expires
- jiffies
;
1651 const struct in6_addr
*src
= &inet_rsk(req
)->ir_v6_loc_addr
;
1652 const struct in6_addr
*dest
= &inet_rsk(req
)->ir_v6_rmt_addr
;
1658 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1659 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1661 src
->s6_addr32
[0], src
->s6_addr32
[1],
1662 src
->s6_addr32
[2], src
->s6_addr32
[3],
1663 inet_rsk(req
)->ir_num
,
1664 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1665 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1666 ntohs(inet_rsk(req
)->ir_rmt_port
),
1668 0, 0, /* could print option size, but that is af dependent. */
1669 1, /* timers active (only the expire timer) */
1670 jiffies_to_clock_t(ttd
),
1672 from_kuid_munged(seq_user_ns(seq
), uid
),
1673 0, /* non standard timer */
1674 0, /* open_requests have no inode */
1678 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1680 const struct in6_addr
*dest
, *src
;
1683 unsigned long timer_expires
;
1684 const struct inet_sock
*inet
= inet_sk(sp
);
1685 const struct tcp_sock
*tp
= tcp_sk(sp
);
1686 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1687 struct fastopen_queue
*fastopenq
= icsk
->icsk_accept_queue
.fastopenq
;
1689 dest
= &sp
->sk_v6_daddr
;
1690 src
= &sp
->sk_v6_rcv_saddr
;
1691 destp
= ntohs(inet
->inet_dport
);
1692 srcp
= ntohs(inet
->inet_sport
);
1694 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1696 timer_expires
= icsk
->icsk_timeout
;
1697 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1699 timer_expires
= icsk
->icsk_timeout
;
1700 } else if (timer_pending(&sp
->sk_timer
)) {
1702 timer_expires
= sp
->sk_timer
.expires
;
1705 timer_expires
= jiffies
;
1709 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1710 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1712 src
->s6_addr32
[0], src
->s6_addr32
[1],
1713 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1714 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1715 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1717 tp
->write_seq
-tp
->snd_una
,
1718 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1720 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
1721 icsk
->icsk_retransmits
,
1722 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sp
)),
1723 icsk
->icsk_probes_out
,
1725 atomic_read(&sp
->sk_refcnt
), sp
,
1726 jiffies_to_clock_t(icsk
->icsk_rto
),
1727 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1728 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
1730 sp
->sk_state
== TCP_LISTEN
?
1731 (fastopenq
? fastopenq
->max_qlen
: 0) :
1732 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
)
1736 static void get_timewait6_sock(struct seq_file
*seq
,
1737 struct inet_timewait_sock
*tw
, int i
)
1739 long delta
= tw
->tw_timer
.expires
- jiffies
;
1740 const struct in6_addr
*dest
, *src
;
1743 dest
= &tw
->tw_v6_daddr
;
1744 src
= &tw
->tw_v6_rcv_saddr
;
1745 destp
= ntohs(tw
->tw_dport
);
1746 srcp
= ntohs(tw
->tw_sport
);
1749 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1750 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1752 src
->s6_addr32
[0], src
->s6_addr32
[1],
1753 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1754 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1755 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1756 tw
->tw_substate
, 0, 0,
1757 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
1758 atomic_read(&tw
->tw_refcnt
), tw
);
1761 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1763 struct tcp_iter_state
*st
;
1764 struct sock
*sk
= v
;
1766 if (v
== SEQ_START_TOKEN
) {
1771 "st tx_queue rx_queue tr tm->when retrnsmt"
1772 " uid timeout inode\n");
1777 switch (st
->state
) {
1778 case TCP_SEQ_STATE_LISTENING
:
1779 case TCP_SEQ_STATE_ESTABLISHED
:
1780 if (sk
->sk_state
== TCP_TIME_WAIT
)
1781 get_timewait6_sock(seq
, v
, st
->num
);
1783 get_tcp6_sock(seq
, v
, st
->num
);
1785 case TCP_SEQ_STATE_OPENREQ
:
1786 get_openreq6(seq
, v
, st
->num
, st
->uid
);
1793 static const struct file_operations tcp6_afinfo_seq_fops
= {
1794 .owner
= THIS_MODULE
,
1795 .open
= tcp_seq_open
,
1797 .llseek
= seq_lseek
,
1798 .release
= seq_release_net
1801 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1804 .seq_fops
= &tcp6_afinfo_seq_fops
,
1806 .show
= tcp6_seq_show
,
1810 int __net_init
tcp6_proc_init(struct net
*net
)
1812 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
1815 void tcp6_proc_exit(struct net
*net
)
1817 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
1821 static void tcp_v6_clear_sk(struct sock
*sk
, int size
)
1823 struct inet_sock
*inet
= inet_sk(sk
);
1825 /* we do not want to clear pinet6 field, because of RCU lookups */
1826 sk_prot_clear_nulls(sk
, offsetof(struct inet_sock
, pinet6
));
1828 size
-= offsetof(struct inet_sock
, pinet6
) + sizeof(inet
->pinet6
);
1829 memset(&inet
->pinet6
+ 1, 0, size
);
1832 struct proto tcpv6_prot
= {
1834 .owner
= THIS_MODULE
,
1836 .connect
= tcp_v6_connect
,
1837 .disconnect
= tcp_disconnect
,
1838 .accept
= inet_csk_accept
,
1840 .init
= tcp_v6_init_sock
,
1841 .destroy
= tcp_v6_destroy_sock
,
1842 .shutdown
= tcp_shutdown
,
1843 .setsockopt
= tcp_setsockopt
,
1844 .getsockopt
= tcp_getsockopt
,
1845 .recvmsg
= tcp_recvmsg
,
1846 .sendmsg
= tcp_sendmsg
,
1847 .sendpage
= tcp_sendpage
,
1848 .backlog_rcv
= tcp_v6_do_rcv
,
1849 .release_cb
= tcp_release_cb
,
1851 .unhash
= inet_unhash
,
1852 .get_port
= inet_csk_get_port
,
1853 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1854 .stream_memory_free
= tcp_stream_memory_free
,
1855 .sockets_allocated
= &tcp_sockets_allocated
,
1856 .memory_allocated
= &tcp_memory_allocated
,
1857 .memory_pressure
= &tcp_memory_pressure
,
1858 .orphan_count
= &tcp_orphan_count
,
1859 .sysctl_mem
= sysctl_tcp_mem
,
1860 .sysctl_wmem
= sysctl_tcp_wmem
,
1861 .sysctl_rmem
= sysctl_tcp_rmem
,
1862 .max_header
= MAX_TCP_HEADER
,
1863 .obj_size
= sizeof(struct tcp6_sock
),
1864 .slab_flags
= SLAB_DESTROY_BY_RCU
,
1865 .twsk_prot
= &tcp6_timewait_sock_ops
,
1866 .rsk_prot
= &tcp6_request_sock_ops
,
1867 .h
.hashinfo
= &tcp_hashinfo
,
1868 .no_autobind
= true,
1869 #ifdef CONFIG_COMPAT
1870 .compat_setsockopt
= compat_tcp_setsockopt
,
1871 .compat_getsockopt
= compat_tcp_getsockopt
,
1873 #ifdef CONFIG_MEMCG_KMEM
1874 .proto_cgroup
= tcp_proto_cgroup
,
1876 .clear_sk
= tcp_v6_clear_sk
,
1879 static const struct inet6_protocol tcpv6_protocol
= {
1880 .early_demux
= tcp_v6_early_demux
,
1881 .handler
= tcp_v6_rcv
,
1882 .err_handler
= tcp_v6_err
,
1883 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1886 static struct inet_protosw tcpv6_protosw
= {
1887 .type
= SOCK_STREAM
,
1888 .protocol
= IPPROTO_TCP
,
1889 .prot
= &tcpv6_prot
,
1890 .ops
= &inet6_stream_ops
,
1891 .flags
= INET_PROTOSW_PERMANENT
|
1895 static int __net_init
tcpv6_net_init(struct net
*net
)
1897 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
1898 SOCK_RAW
, IPPROTO_TCP
, net
);
1901 static void __net_exit
tcpv6_net_exit(struct net
*net
)
1903 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
1906 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
1908 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
1911 static struct pernet_operations tcpv6_net_ops
= {
1912 .init
= tcpv6_net_init
,
1913 .exit
= tcpv6_net_exit
,
1914 .exit_batch
= tcpv6_net_exit_batch
,
1917 int __init
tcpv6_init(void)
1921 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1925 /* register inet6 protocol */
1926 ret
= inet6_register_protosw(&tcpv6_protosw
);
1928 goto out_tcpv6_protocol
;
1930 ret
= register_pernet_subsys(&tcpv6_net_ops
);
1932 goto out_tcpv6_protosw
;
1937 inet6_unregister_protosw(&tcpv6_protosw
);
1939 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1943 void tcpv6_exit(void)
1945 unregister_pernet_subsys(&tcpv6_net_ops
);
1946 inet6_unregister_protosw(&tcpv6_protosw
);
1947 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);